1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2020 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
23 modulus = sqrt(x*x + y*y + z*z);
28 that can be optimized to
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 by the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
89 #include "coretypes.h"
96 #include "alloc-pool.h"
97 #include "tree-pass.h"
99 #include "optabs-tree.h"
100 #include "gimple-pretty-print.h"
102 #include "fold-const.h"
103 #include "gimple-fold.h"
104 #include "gimple-iterator.h"
105 #include "gimplify.h"
106 #include "gimplify-me.h"
107 #include "stor-layout.h"
108 #include "tree-cfg.h"
109 #include "tree-dfa.h"
110 #include "tree-ssa.h"
111 #include "builtins.h"
112 #include "internal-fn.h"
113 #include "case-cfn-macros.h"
114 #include "optabs-libfuncs.h"
116 #include "targhooks.h"
119 /* This structure represents one basic block that either computes a
120 division, or is a common dominator for basic block that compute a
123 /* The basic block represented by this structure. */
124 basic_block bb
= basic_block();
126 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
128 tree recip_def
= tree();
130 /* If non-NULL, the SSA_NAME holding the definition for a squared
131 reciprocal inserted in BB. */
132 tree square_recip_def
= tree();
134 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
135 was inserted in BB. */
136 gimple
*recip_def_stmt
= nullptr;
138 /* Pointer to a list of "struct occurrence"s for blocks dominated
140 struct occurrence
*children
= nullptr;
142 /* Pointer to the next "struct occurrence"s in the list of blocks
143 sharing a common dominator. */
144 struct occurrence
*next
= nullptr;
146 /* The number of divisions that are in BB before compute_merit. The
147 number of divisions that are in BB or post-dominate it after
149 int num_divisions
= 0;
151 /* True if the basic block has a division, false if it is a common
152 dominator for basic blocks that do. If it is false and trapping
153 math is active, BB is not a candidate for inserting a reciprocal. */
154 bool bb_has_division
= false;
156 /* Construct a struct occurrence for basic block BB, and whose
157 children list is headed by CHILDREN. */
158 occurrence (basic_block bb
, struct occurrence
*children
)
159 : bb (bb
), children (children
)
164 /* Destroy a struct occurrence and remove it from its basic block. */
170 /* Allocate memory for a struct occurrence from OCC_POOL. */
171 static void* operator new (size_t);
173 /* Return memory for a struct occurrence to OCC_POOL. */
174 static void operator delete (void*, size_t);
179 /* Number of 1.0/X ops inserted. */
182 /* Number of 1.0/FUNC ops inserted. */
188 /* Number of cexpi calls inserted. */
191 /* Number of conversions removed. */
198 /* Number of widening multiplication ops inserted. */
199 int widen_mults_inserted
;
201 /* Number of integer multiply-and-accumulate ops inserted. */
204 /* Number of fp fused multiply-add ops inserted. */
207 /* Number of divmod calls inserted. */
208 int divmod_calls_inserted
;
211 /* The instance of "struct occurrence" representing the highest
212 interesting block in the dominator tree. */
213 static struct occurrence
*occ_head
;
215 /* Allocation pool for getting instances of "struct occurrence". */
216 static object_allocator
<occurrence
> *occ_pool
;
218 void* occurrence::operator new (size_t n
)
220 gcc_assert (n
== sizeof(occurrence
));
221 return occ_pool
->allocate_raw ();
224 void occurrence::operator delete (void *occ
, size_t n
)
226 gcc_assert (n
== sizeof(occurrence
));
227 occ_pool
->remove_raw (occ
);
230 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
231 list of "struct occurrence"s, one per basic block, having IDOM as
232 their common dominator.
234 We try to insert NEW_OCC as deep as possible in the tree, and we also
235 insert any other block that is a common dominator for BB and one
236 block already in the tree. */
239 insert_bb (struct occurrence
*new_occ
, basic_block idom
,
240 struct occurrence
**p_head
)
242 struct occurrence
*occ
, **p_occ
;
244 for (p_occ
= p_head
; (occ
= *p_occ
) != NULL
; )
246 basic_block bb
= new_occ
->bb
, occ_bb
= occ
->bb
;
247 basic_block dom
= nearest_common_dominator (CDI_DOMINATORS
, occ_bb
, bb
);
250 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
253 occ
->next
= new_occ
->children
;
254 new_occ
->children
= occ
;
256 /* Try the next block (it may as well be dominated by BB). */
259 else if (dom
== occ_bb
)
261 /* OCC_BB dominates BB. Tail recurse to look deeper. */
262 insert_bb (new_occ
, dom
, &occ
->children
);
266 else if (dom
!= idom
)
268 gcc_assert (!dom
->aux
);
270 /* There is a dominator between IDOM and BB, add it and make
271 two children out of NEW_OCC and OCC. First, remove OCC from
277 /* None of the previous blocks has DOM as a dominator: if we tail
278 recursed, we would reexamine them uselessly. Just switch BB with
279 DOM, and go on looking for blocks dominated by DOM. */
280 new_occ
= new occurrence (dom
, new_occ
);
285 /* Nothing special, go on with the next element. */
290 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
291 new_occ
->next
= *p_head
;
295 /* Register that we found a division in BB.
296 IMPORTANCE is a measure of how much weighting to give
297 that division. Use IMPORTANCE = 2 to register a single
298 division. If the division is going to be found multiple
299 times use 1 (as it is with squares). */
302 register_division_in (basic_block bb
, int importance
)
304 struct occurrence
*occ
;
306 occ
= (struct occurrence
*) bb
->aux
;
309 occ
= new occurrence (bb
, NULL
);
310 insert_bb (occ
, ENTRY_BLOCK_PTR_FOR_FN (cfun
), &occ_head
);
313 occ
->bb_has_division
= true;
314 occ
->num_divisions
+= importance
;
318 /* Compute the number of divisions that postdominate each block in OCC and
322 compute_merit (struct occurrence
*occ
)
324 struct occurrence
*occ_child
;
325 basic_block dom
= occ
->bb
;
327 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
330 if (occ_child
->children
)
331 compute_merit (occ_child
);
334 bb
= single_noncomplex_succ (dom
);
338 if (dominated_by_p (CDI_POST_DOMINATORS
, bb
, occ_child
->bb
))
339 occ
->num_divisions
+= occ_child
->num_divisions
;
344 /* Return whether USE_STMT is a floating-point division by DEF. */
346 is_division_by (gimple
*use_stmt
, tree def
)
348 return is_gimple_assign (use_stmt
)
349 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
350 && gimple_assign_rhs2 (use_stmt
) == def
351 /* Do not recognize x / x as valid division, as we are getting
352 confused later by replacing all immediate uses x in such
354 && gimple_assign_rhs1 (use_stmt
) != def
355 && !stmt_can_throw_internal (cfun
, use_stmt
);
358 /* Return TRUE if USE_STMT is a multiplication of DEF by A. */
360 is_mult_by (gimple
*use_stmt
, tree def
, tree a
)
362 if (gimple_code (use_stmt
) == GIMPLE_ASSIGN
363 && gimple_assign_rhs_code (use_stmt
) == MULT_EXPR
)
365 tree op0
= gimple_assign_rhs1 (use_stmt
);
366 tree op1
= gimple_assign_rhs2 (use_stmt
);
368 return (op0
== def
&& op1
== a
)
369 || (op0
== a
&& op1
== def
);
374 /* Return whether USE_STMT is DEF * DEF. */
376 is_square_of (gimple
*use_stmt
, tree def
)
378 return is_mult_by (use_stmt
, def
, def
);
381 /* Return whether USE_STMT is a floating-point division by
384 is_division_by_square (gimple
*use_stmt
, tree def
)
386 if (gimple_code (use_stmt
) == GIMPLE_ASSIGN
387 && gimple_assign_rhs_code (use_stmt
) == RDIV_EXPR
388 && gimple_assign_rhs1 (use_stmt
) != gimple_assign_rhs2 (use_stmt
)
389 && !stmt_can_throw_internal (cfun
, use_stmt
))
391 tree denominator
= gimple_assign_rhs2 (use_stmt
);
392 if (TREE_CODE (denominator
) == SSA_NAME
)
393 return is_square_of (SSA_NAME_DEF_STMT (denominator
), def
);
398 /* Walk the subset of the dominator tree rooted at OCC, setting the
399 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
400 the given basic block. The field may be left NULL, of course,
401 if it is not possible or profitable to do the optimization.
403 DEF_BSI is an iterator pointing at the statement defining DEF.
404 If RECIP_DEF is set, a dominator already has a computation that can
407 If should_insert_square_recip is set, then this also inserts
408 the square of the reciprocal immediately after the definition
409 of the reciprocal. */
412 insert_reciprocals (gimple_stmt_iterator
*def_gsi
, struct occurrence
*occ
,
413 tree def
, tree recip_def
, tree square_recip_def
,
414 int should_insert_square_recip
, int threshold
)
417 gassign
*new_stmt
, *new_square_stmt
;
418 gimple_stmt_iterator gsi
;
419 struct occurrence
*occ_child
;
422 && (occ
->bb_has_division
|| !flag_trapping_math
)
423 /* Divide by two as all divisions are counted twice in
425 && occ
->num_divisions
/ 2 >= threshold
)
427 /* Make a variable with the replacement and substitute it. */
428 type
= TREE_TYPE (def
);
429 recip_def
= create_tmp_reg (type
, "reciptmp");
430 new_stmt
= gimple_build_assign (recip_def
, RDIV_EXPR
,
431 build_one_cst (type
), def
);
433 if (should_insert_square_recip
)
435 square_recip_def
= create_tmp_reg (type
, "powmult_reciptmp");
436 new_square_stmt
= gimple_build_assign (square_recip_def
, MULT_EXPR
,
437 recip_def
, recip_def
);
440 if (occ
->bb_has_division
)
442 /* Case 1: insert before an existing division. */
443 gsi
= gsi_after_labels (occ
->bb
);
444 while (!gsi_end_p (gsi
)
445 && (!is_division_by (gsi_stmt (gsi
), def
))
446 && (!is_division_by_square (gsi_stmt (gsi
), def
)))
449 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
450 if (should_insert_square_recip
)
451 gsi_insert_before (&gsi
, new_square_stmt
, GSI_SAME_STMT
);
453 else if (def_gsi
&& occ
->bb
== gsi_bb (*def_gsi
))
455 /* Case 2: insert right after the definition. Note that this will
456 never happen if the definition statement can throw, because in
457 that case the sole successor of the statement's basic block will
458 dominate all the uses as well. */
459 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
460 if (should_insert_square_recip
)
461 gsi_insert_after (def_gsi
, new_square_stmt
, GSI_NEW_STMT
);
465 /* Case 3: insert in a basic block not containing defs/uses. */
466 gsi
= gsi_after_labels (occ
->bb
);
467 gsi_insert_before (&gsi
, new_stmt
, GSI_SAME_STMT
);
468 if (should_insert_square_recip
)
469 gsi_insert_before (&gsi
, new_square_stmt
, GSI_SAME_STMT
);
472 reciprocal_stats
.rdivs_inserted
++;
474 occ
->recip_def_stmt
= new_stmt
;
477 occ
->recip_def
= recip_def
;
478 occ
->square_recip_def
= square_recip_def
;
479 for (occ_child
= occ
->children
; occ_child
; occ_child
= occ_child
->next
)
480 insert_reciprocals (def_gsi
, occ_child
, def
, recip_def
,
481 square_recip_def
, should_insert_square_recip
,
485 /* Replace occurrences of expr / (x * x) with expr * ((1 / x) * (1 / x)).
486 Take as argument the use for (x * x). */
488 replace_reciprocal_squares (use_operand_p use_p
)
490 gimple
*use_stmt
= USE_STMT (use_p
);
491 basic_block bb
= gimple_bb (use_stmt
);
492 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
494 if (optimize_bb_for_speed_p (bb
) && occ
->square_recip_def
497 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
498 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
499 gimple_assign_set_rhs2 (use_stmt
, occ
->square_recip_def
);
500 SET_USE (use_p
, occ
->square_recip_def
);
501 fold_stmt_inplace (&gsi
);
502 update_stmt (use_stmt
);
507 /* Replace the division at USE_P with a multiplication by the reciprocal, if
511 replace_reciprocal (use_operand_p use_p
)
513 gimple
*use_stmt
= USE_STMT (use_p
);
514 basic_block bb
= gimple_bb (use_stmt
);
515 struct occurrence
*occ
= (struct occurrence
*) bb
->aux
;
517 if (optimize_bb_for_speed_p (bb
)
518 && occ
->recip_def
&& use_stmt
!= occ
->recip_def_stmt
)
520 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
521 gimple_assign_set_rhs_code (use_stmt
, MULT_EXPR
);
522 SET_USE (use_p
, occ
->recip_def
);
523 fold_stmt_inplace (&gsi
);
524 update_stmt (use_stmt
);
529 /* Free OCC and return one more "struct occurrence" to be freed. */
531 static struct occurrence
*
532 free_bb (struct occurrence
*occ
)
534 struct occurrence
*child
, *next
;
536 /* First get the two pointers hanging off OCC. */
538 child
= occ
->children
;
541 /* Now ensure that we don't recurse unless it is necessary. */
547 next
= free_bb (next
);
553 /* Transform sequences like
563 depending on the uses of x, r1, r2. This removes one multiplication and
564 allows the sqrt and division operations to execute in parallel.
565 DEF_GSI is the gsi of the initial division by sqrt that defines
566 DEF (x in the example above). */
569 optimize_recip_sqrt (gimple_stmt_iterator
*def_gsi
, tree def
)
572 imm_use_iterator use_iter
;
573 gimple
*stmt
= gsi_stmt (*def_gsi
);
575 tree orig_sqrt_ssa_name
= gimple_assign_rhs2 (stmt
);
576 tree div_rhs1
= gimple_assign_rhs1 (stmt
);
578 if (TREE_CODE (orig_sqrt_ssa_name
) != SSA_NAME
579 || TREE_CODE (div_rhs1
) != REAL_CST
580 || !real_equal (&TREE_REAL_CST (div_rhs1
), &dconst1
))
584 = dyn_cast
<gcall
*> (SSA_NAME_DEF_STMT (orig_sqrt_ssa_name
));
586 if (!sqrt_stmt
|| !gimple_call_lhs (sqrt_stmt
))
589 switch (gimple_call_combined_fn (sqrt_stmt
))
598 tree a
= gimple_call_arg (sqrt_stmt
, 0);
600 /* We have 'a' and 'x'. Now analyze the uses of 'x'. */
602 /* Statements that use x in x * x. */
603 auto_vec
<gimple
*> sqr_stmts
;
604 /* Statements that use x in a * x. */
605 auto_vec
<gimple
*> mult_stmts
;
606 bool has_other_use
= false;
607 bool mult_on_main_path
= false;
609 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, x
)
611 if (is_gimple_debug (use_stmt
))
613 if (is_square_of (use_stmt
, x
))
615 sqr_stmts
.safe_push (use_stmt
);
616 if (gimple_bb (use_stmt
) == gimple_bb (stmt
))
617 mult_on_main_path
= true;
619 else if (is_mult_by (use_stmt
, x
, a
))
621 mult_stmts
.safe_push (use_stmt
);
622 if (gimple_bb (use_stmt
) == gimple_bb (stmt
))
623 mult_on_main_path
= true;
626 has_other_use
= true;
629 /* In the x * x and a * x cases we just rewire stmt operands or
630 remove multiplications. In the has_other_use case we introduce
631 a multiplication so make sure we don't introduce a multiplication
632 on a path where there was none. */
633 if (has_other_use
&& !mult_on_main_path
)
636 if (sqr_stmts
.is_empty () && mult_stmts
.is_empty ())
639 /* If x = 1.0 / sqrt (a) has uses other than those optimized here we want
640 to be able to compose it from the sqr and mult cases. */
641 if (has_other_use
&& (sqr_stmts
.is_empty () || mult_stmts
.is_empty ()))
646 fprintf (dump_file
, "Optimizing reciprocal sqrt multiplications of\n");
647 print_gimple_stmt (dump_file
, sqrt_stmt
, 0, TDF_NONE
);
648 print_gimple_stmt (dump_file
, stmt
, 0, TDF_NONE
);
649 fprintf (dump_file
, "\n");
652 bool delete_div
= !has_other_use
;
653 tree sqr_ssa_name
= NULL_TREE
;
654 if (!sqr_stmts
.is_empty ())
656 /* r1 = x * x. Transform the original
663 = make_temp_ssa_name (TREE_TYPE (a
), NULL
, "recip_sqrt_sqr");
667 fprintf (dump_file
, "Replacing original division\n");
668 print_gimple_stmt (dump_file
, stmt
, 0, TDF_NONE
);
669 fprintf (dump_file
, "with new division\n");
672 = gimple_build_assign (sqr_ssa_name
, gimple_assign_rhs_code (stmt
),
673 gimple_assign_rhs1 (stmt
), a
);
674 gsi_insert_before (def_gsi
, stmt
, GSI_SAME_STMT
);
675 gsi_remove (def_gsi
, true);
676 *def_gsi
= gsi_for_stmt (stmt
);
677 fold_stmt_inplace (def_gsi
);
681 print_gimple_stmt (dump_file
, stmt
, 0, TDF_NONE
);
686 FOR_EACH_VEC_ELT (sqr_stmts
, i
, sqr_stmt
)
688 gimple_stmt_iterator gsi2
= gsi_for_stmt (sqr_stmt
);
689 gimple_assign_set_rhs_from_tree (&gsi2
, sqr_ssa_name
);
690 update_stmt (sqr_stmt
);
693 if (!mult_stmts
.is_empty ())
695 /* r2 = a * x. Transform this into:
696 r2 = t (The original sqrt (a)). */
698 gimple
*mult_stmt
= NULL
;
699 FOR_EACH_VEC_ELT (mult_stmts
, i
, mult_stmt
)
701 gimple_stmt_iterator gsi2
= gsi_for_stmt (mult_stmt
);
705 fprintf (dump_file
, "Replacing squaring multiplication\n");
706 print_gimple_stmt (dump_file
, mult_stmt
, 0, TDF_NONE
);
707 fprintf (dump_file
, "with assignment\n");
709 gimple_assign_set_rhs_from_tree (&gsi2
, orig_sqrt_ssa_name
);
710 fold_stmt_inplace (&gsi2
);
711 update_stmt (mult_stmt
);
713 print_gimple_stmt (dump_file
, mult_stmt
, 0, TDF_NONE
);
719 /* Using the two temporaries tmp1, tmp2 from above
720 the original x is now:
722 gcc_assert (orig_sqrt_ssa_name
);
723 gcc_assert (sqr_ssa_name
);
726 = gimple_build_assign (x
, MULT_EXPR
,
727 orig_sqrt_ssa_name
, sqr_ssa_name
);
728 gsi_insert_after (def_gsi
, new_stmt
, GSI_NEW_STMT
);
733 /* Remove the original division. */
734 gimple_stmt_iterator gsi2
= gsi_for_stmt (stmt
);
735 gsi_remove (&gsi2
, true);
739 release_ssa_name (x
);
742 /* Look for floating-point divisions among DEF's uses, and try to
743 replace them by multiplications with the reciprocal. Add
744 as many statements computing the reciprocal as needed.
746 DEF must be a GIMPLE register of a floating-point type. */
749 execute_cse_reciprocals_1 (gimple_stmt_iterator
*def_gsi
, tree def
)
751 use_operand_p use_p
, square_use_p
;
752 imm_use_iterator use_iter
, square_use_iter
;
754 struct occurrence
*occ
;
757 int square_recip_count
= 0;
758 int sqrt_recip_count
= 0;
760 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def
)) && TREE_CODE (def
) == SSA_NAME
);
761 threshold
= targetm
.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def
)));
763 /* If DEF is a square (x * x), count the number of divisions by x.
764 If there are more divisions by x than by (DEF * DEF), prefer to optimize
765 the reciprocal of x instead of DEF. This improves cases like:
770 Reciprocal optimization of x results in 1 division rather than 2 or 3. */
771 gimple
*def_stmt
= SSA_NAME_DEF_STMT (def
);
773 if (is_gimple_assign (def_stmt
)
774 && gimple_assign_rhs_code (def_stmt
) == MULT_EXPR
775 && TREE_CODE (gimple_assign_rhs1 (def_stmt
)) == SSA_NAME
776 && gimple_assign_rhs1 (def_stmt
) == gimple_assign_rhs2 (def_stmt
))
778 tree op0
= gimple_assign_rhs1 (def_stmt
);
780 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, op0
)
782 gimple
*use_stmt
= USE_STMT (use_p
);
783 if (is_division_by (use_stmt
, op0
))
788 FOR_EACH_IMM_USE_FAST (use_p
, use_iter
, def
)
790 gimple
*use_stmt
= USE_STMT (use_p
);
791 if (is_division_by (use_stmt
, def
))
793 register_division_in (gimple_bb (use_stmt
), 2);
797 if (is_square_of (use_stmt
, def
))
799 square_def
= gimple_assign_lhs (use_stmt
);
800 FOR_EACH_IMM_USE_FAST (square_use_p
, square_use_iter
, square_def
)
802 gimple
*square_use_stmt
= USE_STMT (square_use_p
);
803 if (is_division_by (square_use_stmt
, square_def
))
805 /* This is executed twice for each division by a square. */
806 register_division_in (gimple_bb (square_use_stmt
), 1);
807 square_recip_count
++;
813 /* Square reciprocals were counted twice above. */
814 square_recip_count
/= 2;
816 /* If it is more profitable to optimize 1 / x, don't optimize 1 / (x * x). */
817 if (sqrt_recip_count
> square_recip_count
)
820 /* Do the expensive part only if we can hope to optimize something. */
821 if (count
+ square_recip_count
>= threshold
&& count
>= 1)
824 for (occ
= occ_head
; occ
; occ
= occ
->next
)
827 insert_reciprocals (def_gsi
, occ
, def
, NULL
, NULL
,
828 square_recip_count
, threshold
);
831 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, def
)
833 if (is_division_by (use_stmt
, def
))
835 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
836 replace_reciprocal (use_p
);
838 else if (square_recip_count
> 0 && is_square_of (use_stmt
, def
))
840 FOR_EACH_IMM_USE_ON_STMT (use_p
, use_iter
)
842 /* Find all uses of the square that are divisions and
843 * replace them by multiplications with the inverse. */
844 imm_use_iterator square_iterator
;
845 gimple
*powmult_use_stmt
= USE_STMT (use_p
);
846 tree powmult_def_name
= gimple_assign_lhs (powmult_use_stmt
);
848 FOR_EACH_IMM_USE_STMT (powmult_use_stmt
,
849 square_iterator
, powmult_def_name
)
850 FOR_EACH_IMM_USE_ON_STMT (square_use_p
, square_iterator
)
852 gimple
*powmult_use_stmt
= USE_STMT (square_use_p
);
853 if (is_division_by (powmult_use_stmt
, powmult_def_name
))
854 replace_reciprocal_squares (square_use_p
);
862 for (occ
= occ_head
; occ
; )
868 /* Return an internal function that implements the reciprocal of CALL,
869 or IFN_LAST if there is no such function that the target supports. */
872 internal_fn_reciprocal (gcall
*call
)
876 switch (gimple_call_combined_fn (call
))
887 tree_pair types
= direct_internal_fn_types (ifn
, call
);
888 if (!direct_internal_fn_supported_p (ifn
, types
, OPTIMIZE_FOR_SPEED
))
894 /* Go through all the floating-point SSA_NAMEs, and call
895 execute_cse_reciprocals_1 on each of them. */
898 const pass_data pass_data_cse_reciprocals
=
900 GIMPLE_PASS
, /* type */
902 OPTGROUP_NONE
, /* optinfo_flags */
903 TV_TREE_RECIP
, /* tv_id */
904 PROP_ssa
, /* properties_required */
905 0, /* properties_provided */
906 0, /* properties_destroyed */
907 0, /* todo_flags_start */
908 TODO_update_ssa
, /* todo_flags_finish */
911 class pass_cse_reciprocals
: public gimple_opt_pass
914 pass_cse_reciprocals (gcc::context
*ctxt
)
915 : gimple_opt_pass (pass_data_cse_reciprocals
, ctxt
)
918 /* opt_pass methods: */
919 virtual bool gate (function
*) { return optimize
&& flag_reciprocal_math
; }
920 virtual unsigned int execute (function
*);
922 }; // class pass_cse_reciprocals
925 pass_cse_reciprocals::execute (function
*fun
)
930 occ_pool
= new object_allocator
<occurrence
> ("dominators for recip");
932 memset (&reciprocal_stats
, 0, sizeof (reciprocal_stats
));
933 calculate_dominance_info (CDI_DOMINATORS
);
934 calculate_dominance_info (CDI_POST_DOMINATORS
);
937 FOR_EACH_BB_FN (bb
, fun
)
938 gcc_assert (!bb
->aux
);
940 for (arg
= DECL_ARGUMENTS (fun
->decl
); arg
; arg
= DECL_CHAIN (arg
))
941 if (FLOAT_TYPE_P (TREE_TYPE (arg
))
942 && is_gimple_reg (arg
))
944 tree name
= ssa_default_def (fun
, arg
);
946 execute_cse_reciprocals_1 (NULL
, name
);
949 FOR_EACH_BB_FN (bb
, fun
)
953 for (gphi_iterator gsi
= gsi_start_phis (bb
); !gsi_end_p (gsi
);
956 gphi
*phi
= gsi
.phi ();
957 def
= PHI_RESULT (phi
);
958 if (! virtual_operand_p (def
)
959 && FLOAT_TYPE_P (TREE_TYPE (def
)))
960 execute_cse_reciprocals_1 (NULL
, def
);
963 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
966 gimple
*stmt
= gsi_stmt (gsi
);
968 if (gimple_has_lhs (stmt
)
969 && (def
= SINGLE_SSA_TREE_OPERAND (stmt
, SSA_OP_DEF
)) != NULL
970 && FLOAT_TYPE_P (TREE_TYPE (def
))
971 && TREE_CODE (def
) == SSA_NAME
)
973 execute_cse_reciprocals_1 (&gsi
, def
);
974 stmt
= gsi_stmt (gsi
);
975 if (flag_unsafe_math_optimizations
976 && is_gimple_assign (stmt
)
977 && gimple_assign_lhs (stmt
) == def
978 && !stmt_can_throw_internal (cfun
, stmt
)
979 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
980 optimize_recip_sqrt (&gsi
, def
);
984 if (optimize_bb_for_size_p (bb
))
987 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
988 for (gimple_stmt_iterator gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);
991 gimple
*stmt
= gsi_stmt (gsi
);
993 if (is_gimple_assign (stmt
)
994 && gimple_assign_rhs_code (stmt
) == RDIV_EXPR
)
996 tree arg1
= gimple_assign_rhs2 (stmt
);
999 if (TREE_CODE (arg1
) != SSA_NAME
)
1002 stmt1
= SSA_NAME_DEF_STMT (arg1
);
1004 if (is_gimple_call (stmt1
)
1005 && gimple_call_lhs (stmt1
))
1008 imm_use_iterator ui
;
1009 use_operand_p use_p
;
1010 tree fndecl
= NULL_TREE
;
1012 gcall
*call
= as_a
<gcall
*> (stmt1
);
1013 internal_fn ifn
= internal_fn_reciprocal (call
);
1014 if (ifn
== IFN_LAST
)
1016 fndecl
= gimple_call_fndecl (call
);
1018 || !fndecl_built_in_p (fndecl
, BUILT_IN_MD
))
1020 fndecl
= targetm
.builtin_reciprocal (fndecl
);
1025 /* Check that all uses of the SSA name are divisions,
1026 otherwise replacing the defining statement will do
1029 FOR_EACH_IMM_USE_FAST (use_p
, ui
, arg1
)
1031 gimple
*stmt2
= USE_STMT (use_p
);
1032 if (is_gimple_debug (stmt2
))
1034 if (!is_gimple_assign (stmt2
)
1035 || gimple_assign_rhs_code (stmt2
) != RDIV_EXPR
1036 || gimple_assign_rhs1 (stmt2
) == arg1
1037 || gimple_assign_rhs2 (stmt2
) != arg1
)
1046 gimple_replace_ssa_lhs (call
, arg1
);
1047 if (gimple_call_internal_p (call
) != (ifn
!= IFN_LAST
))
1049 auto_vec
<tree
, 4> args
;
1050 for (unsigned int i
= 0;
1051 i
< gimple_call_num_args (call
); i
++)
1052 args
.safe_push (gimple_call_arg (call
, i
));
1054 if (ifn
== IFN_LAST
)
1055 stmt2
= gimple_build_call_vec (fndecl
, args
);
1057 stmt2
= gimple_build_call_internal_vec (ifn
, args
);
1058 gimple_call_set_lhs (stmt2
, arg1
);
1059 gimple_move_vops (stmt2
, call
);
1060 gimple_call_set_nothrow (stmt2
,
1061 gimple_call_nothrow_p (call
));
1062 gimple_stmt_iterator gsi2
= gsi_for_stmt (call
);
1063 gsi_replace (&gsi2
, stmt2
, true);
1067 if (ifn
== IFN_LAST
)
1068 gimple_call_set_fndecl (call
, fndecl
);
1070 gimple_call_set_internal_fn (call
, ifn
);
1073 reciprocal_stats
.rfuncs_inserted
++;
1075 FOR_EACH_IMM_USE_STMT (stmt
, ui
, arg1
)
1077 gimple_stmt_iterator gsi
= gsi_for_stmt (stmt
);
1078 gimple_assign_set_rhs_code (stmt
, MULT_EXPR
);
1079 fold_stmt_inplace (&gsi
);
1087 statistics_counter_event (fun
, "reciprocal divs inserted",
1088 reciprocal_stats
.rdivs_inserted
);
1089 statistics_counter_event (fun
, "reciprocal functions inserted",
1090 reciprocal_stats
.rfuncs_inserted
);
1092 free_dominance_info (CDI_DOMINATORS
);
1093 free_dominance_info (CDI_POST_DOMINATORS
);
1101 make_pass_cse_reciprocals (gcc::context
*ctxt
)
1103 return new pass_cse_reciprocals (ctxt
);
1106 /* If NAME is the result of a type conversion, look for other
1107 equivalent dominating or dominated conversions, and replace all
1108 uses with the earliest dominating name, removing the redundant
1109 conversions. Return the prevailing name. */
1112 execute_cse_conv_1 (tree name
)
1114 if (SSA_NAME_IS_DEFAULT_DEF (name
)
1115 || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name
))
1118 gimple
*def_stmt
= SSA_NAME_DEF_STMT (name
);
1120 if (!gimple_assign_cast_p (def_stmt
))
1123 tree src
= gimple_assign_rhs1 (def_stmt
);
1125 if (TREE_CODE (src
) != SSA_NAME
)
1128 imm_use_iterator use_iter
;
1131 /* Find the earliest dominating def. */
1132 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, src
)
1134 if (use_stmt
== def_stmt
1135 || !gimple_assign_cast_p (use_stmt
))
1138 tree lhs
= gimple_assign_lhs (use_stmt
);
1140 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
1141 || (gimple_assign_rhs1 (use_stmt
)
1142 != gimple_assign_rhs1 (def_stmt
))
1143 || !types_compatible_p (TREE_TYPE (name
), TREE_TYPE (lhs
)))
1147 if (gimple_bb (def_stmt
) == gimple_bb (use_stmt
))
1149 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
1150 while (!gsi_end_p (gsi
) && gsi_stmt (gsi
) != def_stmt
)
1152 use_dominates
= !gsi_end_p (gsi
);
1154 else if (dominated_by_p (CDI_DOMINATORS
, gimple_bb (use_stmt
),
1155 gimple_bb (def_stmt
)))
1156 use_dominates
= false;
1157 else if (dominated_by_p (CDI_DOMINATORS
, gimple_bb (def_stmt
),
1158 gimple_bb (use_stmt
)))
1159 use_dominates
= true;
1165 std::swap (name
, lhs
);
1166 std::swap (def_stmt
, use_stmt
);
1170 /* Now go through all uses of SRC again, replacing the equivalent
1171 dominated conversions. We may replace defs that were not
1172 dominated by the then-prevailing defs when we first visited
1174 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, src
)
1176 if (use_stmt
== def_stmt
1177 || !gimple_assign_cast_p (use_stmt
))
1180 tree lhs
= gimple_assign_lhs (use_stmt
);
1182 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs
)
1183 || (gimple_assign_rhs1 (use_stmt
)
1184 != gimple_assign_rhs1 (def_stmt
))
1185 || !types_compatible_p (TREE_TYPE (name
), TREE_TYPE (lhs
)))
1188 if (gimple_bb (def_stmt
) == gimple_bb (use_stmt
)
1189 || dominated_by_p (CDI_DOMINATORS
, gimple_bb (use_stmt
),
1190 gimple_bb (def_stmt
)))
1192 sincos_stats
.conv_removed
++;
1194 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
1195 replace_uses_by (lhs
, name
);
1196 gsi_remove (&gsi
, true);
1203 /* Records an occurrence at statement USE_STMT in the vector of trees
1204 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
1205 is not yet initialized. Returns true if the occurrence was pushed on
1206 the vector. Adjusts *TOP_BB to be the basic block dominating all
1207 statements in the vector. */
1210 maybe_record_sincos (vec
<gimple
*> *stmts
,
1211 basic_block
*top_bb
, gimple
*use_stmt
)
1213 basic_block use_bb
= gimple_bb (use_stmt
);
1215 && (*top_bb
== use_bb
1216 || dominated_by_p (CDI_DOMINATORS
, use_bb
, *top_bb
)))
1217 stmts
->safe_push (use_stmt
);
1219 || dominated_by_p (CDI_DOMINATORS
, *top_bb
, use_bb
))
1221 stmts
->safe_push (use_stmt
);
1230 /* Look for sin, cos and cexpi calls with the same argument NAME and
1231 create a single call to cexpi CSEing the result in this case.
1232 We first walk over all immediate uses of the argument collecting
1233 statements that we can CSE in a vector and in a second pass replace
1234 the statement rhs with a REALPART or IMAGPART expression on the
1235 result of the cexpi call we insert before the use statement that
1236 dominates all other candidates. */
1239 execute_cse_sincos_1 (tree name
)
1241 gimple_stmt_iterator gsi
;
1242 imm_use_iterator use_iter
;
1243 tree fndecl
, res
, type
= NULL_TREE
;
1244 gimple
*def_stmt
, *use_stmt
, *stmt
;
1245 int seen_cos
= 0, seen_sin
= 0, seen_cexpi
= 0;
1246 auto_vec
<gimple
*> stmts
;
1247 basic_block top_bb
= NULL
;
1249 bool cfg_changed
= false;
1251 name
= execute_cse_conv_1 (name
);
1253 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, name
)
1255 if (gimple_code (use_stmt
) != GIMPLE_CALL
1256 || !gimple_call_lhs (use_stmt
))
1259 switch (gimple_call_combined_fn (use_stmt
))
1262 seen_cos
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
1266 seen_sin
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
1270 seen_cexpi
|= maybe_record_sincos (&stmts
, &top_bb
, use_stmt
) ? 1 : 0;
1277 tree t
= mathfn_built_in_type (gimple_call_combined_fn (use_stmt
));
1281 t
= TREE_TYPE (name
);
1283 /* This checks that NAME has the right type in the first round,
1284 and, in subsequent rounds, that the built_in type is the same
1285 type, or a compatible type. */
1286 if (type
!= t
&& !types_compatible_p (type
, t
))
1287 RETURN_FROM_IMM_USE_STMT (use_iter
, false);
1289 if (seen_cos
+ seen_sin
+ seen_cexpi
<= 1)
1292 /* Simply insert cexpi at the beginning of top_bb but not earlier than
1293 the name def statement. */
1294 fndecl
= mathfn_built_in (type
, BUILT_IN_CEXPI
);
1297 stmt
= gimple_build_call (fndecl
, 1, name
);
1298 res
= make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl
)), stmt
, "sincostmp");
1299 gimple_call_set_lhs (stmt
, res
);
1301 def_stmt
= SSA_NAME_DEF_STMT (name
);
1302 if (!SSA_NAME_IS_DEFAULT_DEF (name
)
1303 && gimple_code (def_stmt
) != GIMPLE_PHI
1304 && gimple_bb (def_stmt
) == top_bb
)
1306 gsi
= gsi_for_stmt (def_stmt
);
1307 gsi_insert_after (&gsi
, stmt
, GSI_SAME_STMT
);
1311 gsi
= gsi_after_labels (top_bb
);
1312 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
1314 sincos_stats
.inserted
++;
1316 /* And adjust the recorded old call sites. */
1317 for (i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
1321 switch (gimple_call_combined_fn (use_stmt
))
1324 rhs
= fold_build1 (REALPART_EXPR
, type
, res
);
1328 rhs
= fold_build1 (IMAGPART_EXPR
, type
, res
);
1339 /* Replace call with a copy. */
1340 stmt
= gimple_build_assign (gimple_call_lhs (use_stmt
), rhs
);
1342 gsi
= gsi_for_stmt (use_stmt
);
1343 gsi_replace (&gsi
, stmt
, true);
1344 if (gimple_purge_dead_eh_edges (gimple_bb (stmt
)))
1351 /* To evaluate powi(x,n), the floating point value x raised to the
1352 constant integer exponent n, we use a hybrid algorithm that
1353 combines the "window method" with look-up tables. For an
1354 introduction to exponentiation algorithms and "addition chains",
1355 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
1356 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
1357 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
1358 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
1360 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
1361 multiplications to inline before calling the system library's pow
1362 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
1363 so this default never requires calling pow, powf or powl. */
1365 #ifndef POWI_MAX_MULTS
1366 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
1369 /* The size of the "optimal power tree" lookup table. All
1370 exponents less than this value are simply looked up in the
1371 powi_table below. This threshold is also used to size the
1372 cache of pseudo registers that hold intermediate results. */
1373 #define POWI_TABLE_SIZE 256
1375 /* The size, in bits of the window, used in the "window method"
1376 exponentiation algorithm. This is equivalent to a radix of
1377 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
1378 #define POWI_WINDOW_SIZE 3
1380 /* The following table is an efficient representation of an
1381 "optimal power tree". For each value, i, the corresponding
1382 value, j, in the table states than an optimal evaluation
1383 sequence for calculating pow(x,i) can be found by evaluating
1384 pow(x,j)*pow(x,i-j). An optimal power tree for the first
1385 100 integers is given in Knuth's "Seminumerical algorithms". */
1387 static const unsigned char powi_table
[POWI_TABLE_SIZE
] =
1389 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
1390 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
1391 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
1392 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
1393 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
1394 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
1395 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
1396 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
1397 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
1398 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
1399 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
1400 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
1401 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
1402 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
1403 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
1404 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
1405 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
1406 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
1407 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
1408 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
1409 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
1410 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
1411 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
1412 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
1413 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
1414 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
1415 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
1416 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
1417 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
1418 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
1419 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
1420 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
1424 /* Return the number of multiplications required to calculate
1425 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
1426 subroutine of powi_cost. CACHE is an array indicating
1427 which exponents have already been calculated. */
1430 powi_lookup_cost (unsigned HOST_WIDE_INT n
, bool *cache
)
1432 /* If we've already calculated this exponent, then this evaluation
1433 doesn't require any additional multiplications. */
1438 return powi_lookup_cost (n
- powi_table
[n
], cache
)
1439 + powi_lookup_cost (powi_table
[n
], cache
) + 1;
1442 /* Return the number of multiplications required to calculate
1443 powi(x,n) for an arbitrary x, given the exponent N. This
1444 function needs to be kept in sync with powi_as_mults below. */
1447 powi_cost (HOST_WIDE_INT n
)
1449 bool cache
[POWI_TABLE_SIZE
];
1450 unsigned HOST_WIDE_INT digit
;
1451 unsigned HOST_WIDE_INT val
;
1457 /* Ignore the reciprocal when calculating the cost. */
1458 val
= (n
< 0) ? -n
: n
;
1460 /* Initialize the exponent cache. */
1461 memset (cache
, 0, POWI_TABLE_SIZE
* sizeof (bool));
1466 while (val
>= POWI_TABLE_SIZE
)
1470 digit
= val
& ((1 << POWI_WINDOW_SIZE
) - 1);
1471 result
+= powi_lookup_cost (digit
, cache
)
1472 + POWI_WINDOW_SIZE
+ 1;
1473 val
>>= POWI_WINDOW_SIZE
;
1482 return result
+ powi_lookup_cost (val
, cache
);
1485 /* Recursive subroutine of powi_as_mults. This function takes the
1486 array, CACHE, of already calculated exponents and an exponent N and
1487 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
1490 powi_as_mults_1 (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1491 HOST_WIDE_INT n
, tree
*cache
)
1493 tree op0
, op1
, ssa_target
;
1494 unsigned HOST_WIDE_INT digit
;
1497 if (n
< POWI_TABLE_SIZE
&& cache
[n
])
1500 ssa_target
= make_temp_ssa_name (type
, NULL
, "powmult");
1502 if (n
< POWI_TABLE_SIZE
)
1504 cache
[n
] = ssa_target
;
1505 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- powi_table
[n
], cache
);
1506 op1
= powi_as_mults_1 (gsi
, loc
, type
, powi_table
[n
], cache
);
1510 digit
= n
& ((1 << POWI_WINDOW_SIZE
) - 1);
1511 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
- digit
, cache
);
1512 op1
= powi_as_mults_1 (gsi
, loc
, type
, digit
, cache
);
1516 op0
= powi_as_mults_1 (gsi
, loc
, type
, n
>> 1, cache
);
1520 mult_stmt
= gimple_build_assign (ssa_target
, MULT_EXPR
, op0
, op1
);
1521 gimple_set_location (mult_stmt
, loc
);
1522 gsi_insert_before (gsi
, mult_stmt
, GSI_SAME_STMT
);
1527 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1528 This function needs to be kept in sync with powi_cost above. */
1531 powi_as_mults (gimple_stmt_iterator
*gsi
, location_t loc
,
1532 tree arg0
, HOST_WIDE_INT n
)
1534 tree cache
[POWI_TABLE_SIZE
], result
, type
= TREE_TYPE (arg0
);
1539 return build_real (type
, dconst1
);
1541 memset (cache
, 0, sizeof (cache
));
1544 result
= powi_as_mults_1 (gsi
, loc
, type
, (n
< 0) ? -n
: n
, cache
);
1548 /* If the original exponent was negative, reciprocate the result. */
1549 target
= make_temp_ssa_name (type
, NULL
, "powmult");
1550 div_stmt
= gimple_build_assign (target
, RDIV_EXPR
,
1551 build_real (type
, dconst1
), result
);
1552 gimple_set_location (div_stmt
, loc
);
1553 gsi_insert_before (gsi
, div_stmt
, GSI_SAME_STMT
);
1558 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1559 location info LOC. If the arguments are appropriate, create an
1560 equivalent sequence of statements prior to GSI using an optimal
1561 number of multiplications, and return an expession holding the
1565 gimple_expand_builtin_powi (gimple_stmt_iterator
*gsi
, location_t loc
,
1566 tree arg0
, HOST_WIDE_INT n
)
1568 /* Avoid largest negative number. */
1570 && ((n
>= -1 && n
<= 2)
1571 || (optimize_function_for_speed_p (cfun
)
1572 && powi_cost (n
) <= POWI_MAX_MULTS
)))
1573 return powi_as_mults (gsi
, loc
, arg0
, n
);
1578 /* Build a gimple call statement that calls FN with argument ARG.
1579 Set the lhs of the call statement to a fresh SSA name. Insert the
1580 statement prior to GSI's current position, and return the fresh
1584 build_and_insert_call (gimple_stmt_iterator
*gsi
, location_t loc
,
1590 call_stmt
= gimple_build_call (fn
, 1, arg
);
1591 ssa_target
= make_temp_ssa_name (TREE_TYPE (arg
), NULL
, "powroot");
1592 gimple_set_lhs (call_stmt
, ssa_target
);
1593 gimple_set_location (call_stmt
, loc
);
1594 gsi_insert_before (gsi
, call_stmt
, GSI_SAME_STMT
);
1599 /* Build a gimple binary operation with the given CODE and arguments
1600 ARG0, ARG1, assigning the result to a new SSA name for variable
1601 TARGET. Insert the statement prior to GSI's current position, and
1602 return the fresh SSA name.*/
1605 build_and_insert_binop (gimple_stmt_iterator
*gsi
, location_t loc
,
1606 const char *name
, enum tree_code code
,
1607 tree arg0
, tree arg1
)
1609 tree result
= make_temp_ssa_name (TREE_TYPE (arg0
), NULL
, name
);
1610 gassign
*stmt
= gimple_build_assign (result
, code
, arg0
, arg1
);
1611 gimple_set_location (stmt
, loc
);
1612 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1616 /* Build a gimple reference operation with the given CODE and argument
1617 ARG, assigning the result to a new SSA name of TYPE with NAME.
1618 Insert the statement prior to GSI's current position, and return
1619 the fresh SSA name. */
1622 build_and_insert_ref (gimple_stmt_iterator
*gsi
, location_t loc
, tree type
,
1623 const char *name
, enum tree_code code
, tree arg0
)
1625 tree result
= make_temp_ssa_name (type
, NULL
, name
);
1626 gimple
*stmt
= gimple_build_assign (result
, build1 (code
, type
, arg0
));
1627 gimple_set_location (stmt
, loc
);
1628 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1632 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1633 prior to GSI's current position, and return the fresh SSA name. */
1636 build_and_insert_cast (gimple_stmt_iterator
*gsi
, location_t loc
,
1637 tree type
, tree val
)
1639 tree result
= make_ssa_name (type
);
1640 gassign
*stmt
= gimple_build_assign (result
, NOP_EXPR
, val
);
1641 gimple_set_location (stmt
, loc
);
1642 gsi_insert_before (gsi
, stmt
, GSI_SAME_STMT
);
1646 struct pow_synth_sqrt_info
1649 unsigned int deepest
;
1650 unsigned int num_mults
;
1653 /* Return true iff the real value C can be represented as a
1654 sum of powers of 0.5 up to N. That is:
1655 C == SUM<i from 1..N> (a[i]*(0.5**i)) where a[i] is either 0 or 1.
1656 Record in INFO the various parameters of the synthesis algorithm such
1657 as the factors a[i], the maximum 0.5 power and the number of
1658 multiplications that will be required. */
1661 representable_as_half_series_p (REAL_VALUE_TYPE c
, unsigned n
,
1662 struct pow_synth_sqrt_info
*info
)
1664 REAL_VALUE_TYPE factor
= dconsthalf
;
1665 REAL_VALUE_TYPE remainder
= c
;
1668 info
->num_mults
= 0;
1669 memset (info
->factors
, 0, n
* sizeof (bool));
1671 for (unsigned i
= 0; i
< n
; i
++)
1673 REAL_VALUE_TYPE res
;
1675 /* If something inexact happened bail out now. */
1676 if (real_arithmetic (&res
, MINUS_EXPR
, &remainder
, &factor
))
1679 /* We have hit zero. The number is representable as a sum
1680 of powers of 0.5. */
1681 if (real_equal (&res
, &dconst0
))
1683 info
->factors
[i
] = true;
1684 info
->deepest
= i
+ 1;
1687 else if (!REAL_VALUE_NEGATIVE (res
))
1690 info
->factors
[i
] = true;
1694 info
->factors
[i
] = false;
1696 real_arithmetic (&factor
, MULT_EXPR
, &factor
, &dconsthalf
);
1701 /* Return the tree corresponding to FN being applied
1702 to ARG N times at GSI and LOC.
1703 Look up previous results from CACHE if need be.
1704 cache[0] should contain just plain ARG i.e. FN applied to ARG 0 times. */
1707 get_fn_chain (tree arg
, unsigned int n
, gimple_stmt_iterator
*gsi
,
1708 tree fn
, location_t loc
, tree
*cache
)
1710 tree res
= cache
[n
];
1713 tree prev
= get_fn_chain (arg
, n
- 1, gsi
, fn
, loc
, cache
);
1714 res
= build_and_insert_call (gsi
, loc
, fn
, prev
);
1721 /* Print to STREAM the repeated application of function FNAME to ARG
1722 N times. So, for FNAME = "foo", ARG = "x", N = 2 it would print:
1726 print_nested_fn (FILE* stream
, const char *fname
, const char* arg
,
1730 fprintf (stream
, "%s", arg
);
1733 fprintf (stream
, "%s (", fname
);
1734 print_nested_fn (stream
, fname
, arg
, n
- 1);
1735 fprintf (stream
, ")");
1739 /* Print to STREAM the fractional sequence of sqrt chains
1740 applied to ARG, described by INFO. Used for the dump file. */
1743 dump_fractional_sqrt_sequence (FILE *stream
, const char *arg
,
1744 struct pow_synth_sqrt_info
*info
)
1746 for (unsigned int i
= 0; i
< info
->deepest
; i
++)
1748 bool is_set
= info
->factors
[i
];
1751 print_nested_fn (stream
, "sqrt", arg
, i
+ 1);
1752 if (i
!= info
->deepest
- 1)
1753 fprintf (stream
, " * ");
1758 /* Print to STREAM a representation of raising ARG to an integer
1759 power N. Used for the dump file. */
1762 dump_integer_part (FILE *stream
, const char* arg
, HOST_WIDE_INT n
)
1765 fprintf (stream
, "powi (%s, " HOST_WIDE_INT_PRINT_DEC
")", arg
, n
);
1767 fprintf (stream
, "%s", arg
);
1770 /* Attempt to synthesize a POW[F] (ARG0, ARG1) call using chains of
1771 square roots. Place at GSI and LOC. Limit the maximum depth
1772 of the sqrt chains to MAX_DEPTH. Return the tree holding the
1773 result of the expanded sequence or NULL_TREE if the expansion failed.
1775 This routine assumes that ARG1 is a real number with a fractional part
1776 (the integer exponent case will have been handled earlier in
1777 gimple_expand_builtin_pow).
1780 * For ARG1 composed of a whole part WHOLE_PART and a fractional part
1781 FRAC_PART i.e. WHOLE_PART == floor (ARG1) and
1782 FRAC_PART == ARG1 - WHOLE_PART:
1783 Produce POWI (ARG0, WHOLE_PART) * POW (ARG0, FRAC_PART) where
1784 POW (ARG0, FRAC_PART) is expanded as a product of square root chains
1785 if it can be expressed as such, that is if FRAC_PART satisfies:
1786 FRAC_PART == <SUM from i = 1 until MAX_DEPTH> (a[i] * (0.5**i))
1787 where integer a[i] is either 0 or 1.
1790 POW (x, 3.625) == POWI (x, 3) * POW (x, 0.625)
1791 --> POWI (x, 3) * SQRT (x) * SQRT (SQRT (SQRT (x)))
1793 For ARG1 < 0.0 there are two approaches:
1794 * (A) Expand to 1.0 / POW (ARG0, -ARG1) where POW (ARG0, -ARG1)
1795 is calculated as above.
1798 POW (x, -5.625) == 1.0 / POW (x, 5.625)
1799 --> 1.0 / (POWI (x, 5) * SQRT (x) * SQRT (SQRT (SQRT (x))))
1801 * (B) : WHOLE_PART := - ceil (abs (ARG1))
1802 FRAC_PART := ARG1 - WHOLE_PART
1803 and expand to POW (x, FRAC_PART) / POWI (x, WHOLE_PART).
1805 POW (x, -5.875) == POW (x, 0.125) / POWI (X, 6)
1806 --> SQRT (SQRT (SQRT (x))) / (POWI (x, 6))
1808 For ARG1 < 0.0 we choose between (A) and (B) depending on
1809 how many multiplications we'd have to do.
1810 So, for the example in (B): POW (x, -5.875), if we were to
1811 follow algorithm (A) we would produce:
1812 1.0 / POWI (X, 5) * SQRT (X) * SQRT (SQRT (X)) * SQRT (SQRT (SQRT (X)))
1813 which contains more multiplications than approach (B).
1815 Hopefully, this approach will eliminate potentially expensive POW library
1816 calls when unsafe floating point math is enabled and allow the compiler to
1817 further optimise the multiplies, square roots and divides produced by this
1821 expand_pow_as_sqrts (gimple_stmt_iterator
*gsi
, location_t loc
,
1822 tree arg0
, tree arg1
, HOST_WIDE_INT max_depth
)
1824 tree type
= TREE_TYPE (arg0
);
1825 machine_mode mode
= TYPE_MODE (type
);
1826 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
1827 bool one_over
= true;
1832 if (TREE_CODE (arg1
) != REAL_CST
)
1835 REAL_VALUE_TYPE exp_init
= TREE_REAL_CST (arg1
);
1837 gcc_assert (max_depth
> 0);
1838 tree
*cache
= XALLOCAVEC (tree
, max_depth
+ 1);
1840 struct pow_synth_sqrt_info synth_info
;
1841 synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1842 synth_info
.deepest
= 0;
1843 synth_info
.num_mults
= 0;
1845 bool neg_exp
= REAL_VALUE_NEGATIVE (exp_init
);
1846 REAL_VALUE_TYPE exp
= real_value_abs (&exp_init
);
1848 /* The whole and fractional parts of exp. */
1849 REAL_VALUE_TYPE whole_part
;
1850 REAL_VALUE_TYPE frac_part
;
1852 real_floor (&whole_part
, mode
, &exp
);
1853 real_arithmetic (&frac_part
, MINUS_EXPR
, &exp
, &whole_part
);
1856 REAL_VALUE_TYPE ceil_whole
= dconst0
;
1857 REAL_VALUE_TYPE ceil_fract
= dconst0
;
1861 real_ceil (&ceil_whole
, mode
, &exp
);
1862 real_arithmetic (&ceil_fract
, MINUS_EXPR
, &ceil_whole
, &exp
);
1865 if (!representable_as_half_series_p (frac_part
, max_depth
, &synth_info
))
1868 /* Check whether it's more profitable to not use 1.0 / ... */
1871 struct pow_synth_sqrt_info alt_synth_info
;
1872 alt_synth_info
.factors
= XALLOCAVEC (bool, max_depth
+ 1);
1873 alt_synth_info
.deepest
= 0;
1874 alt_synth_info
.num_mults
= 0;
1876 if (representable_as_half_series_p (ceil_fract
, max_depth
,
1878 && alt_synth_info
.deepest
<= synth_info
.deepest
1879 && alt_synth_info
.num_mults
< synth_info
.num_mults
)
1881 whole_part
= ceil_whole
;
1882 frac_part
= ceil_fract
;
1883 synth_info
.deepest
= alt_synth_info
.deepest
;
1884 synth_info
.num_mults
= alt_synth_info
.num_mults
;
1885 memcpy (synth_info
.factors
, alt_synth_info
.factors
,
1886 (max_depth
+ 1) * sizeof (bool));
1891 HOST_WIDE_INT n
= real_to_integer (&whole_part
);
1892 REAL_VALUE_TYPE cint
;
1893 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
1895 if (!real_identical (&whole_part
, &cint
))
1898 if (powi_cost (n
) + synth_info
.num_mults
> POWI_MAX_MULTS
)
1901 memset (cache
, 0, (max_depth
+ 1) * sizeof (tree
));
1903 tree integer_res
= n
== 0 ? build_real (type
, dconst1
) : arg0
;
1905 /* Calculate the integer part of the exponent. */
1908 integer_res
= gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
1917 real_to_decimal (string
, &exp_init
, sizeof (string
), 0, 1);
1918 fprintf (dump_file
, "synthesizing pow (x, %s) as:\n", string
);
1924 fprintf (dump_file
, "1.0 / (");
1925 dump_integer_part (dump_file
, "x", n
);
1927 fprintf (dump_file
, " * ");
1928 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1929 fprintf (dump_file
, ")");
1933 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1934 fprintf (dump_file
, " / (");
1935 dump_integer_part (dump_file
, "x", n
);
1936 fprintf (dump_file
, ")");
1941 dump_fractional_sqrt_sequence (dump_file
, "x", &synth_info
);
1943 fprintf (dump_file
, " * ");
1944 dump_integer_part (dump_file
, "x", n
);
1947 fprintf (dump_file
, "\ndeepest sqrt chain: %d\n", synth_info
.deepest
);
1951 tree fract_res
= NULL_TREE
;
1954 /* Calculate the fractional part of the exponent. */
1955 for (unsigned i
= 0; i
< synth_info
.deepest
; i
++)
1957 if (synth_info
.factors
[i
])
1959 tree sqrt_chain
= get_fn_chain (arg0
, i
+ 1, gsi
, sqrtfn
, loc
, cache
);
1962 fract_res
= sqrt_chain
;
1965 fract_res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1966 fract_res
, sqrt_chain
);
1970 tree res
= NULL_TREE
;
1977 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1978 fract_res
, integer_res
);
1982 res
= build_and_insert_binop (gsi
, loc
, "powrootrecip", RDIV_EXPR
,
1983 build_real (type
, dconst1
), res
);
1987 res
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
1988 fract_res
, integer_res
);
1992 res
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
1993 fract_res
, integer_res
);
1997 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1998 with location info LOC. If possible, create an equivalent and
1999 less expensive sequence of statements prior to GSI, and return an
2000 expession holding the result. */
2003 gimple_expand_builtin_pow (gimple_stmt_iterator
*gsi
, location_t loc
,
2004 tree arg0
, tree arg1
)
2006 REAL_VALUE_TYPE c
, cint
, dconst1_3
, dconst1_4
, dconst1_6
;
2007 REAL_VALUE_TYPE c2
, dconst3
;
2009 tree type
, sqrtfn
, cbrtfn
, sqrt_arg0
, result
, cbrt_x
, powi_cbrt_x
;
2011 bool speed_p
= optimize_bb_for_speed_p (gsi_bb (*gsi
));
2012 bool hw_sqrt_exists
, c_is_int
, c2_is_int
;
2014 dconst1_4
= dconst1
;
2015 SET_REAL_EXP (&dconst1_4
, REAL_EXP (&dconst1_4
) - 2);
2017 /* If the exponent isn't a constant, there's nothing of interest
2019 if (TREE_CODE (arg1
) != REAL_CST
)
2022 /* Don't perform the operation if flag_signaling_nans is on
2023 and the operand is a signaling NaN. */
2024 if (HONOR_SNANS (TYPE_MODE (TREE_TYPE (arg1
)))
2025 && ((TREE_CODE (arg0
) == REAL_CST
2026 && REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg0
)))
2027 || REAL_VALUE_ISSIGNALING_NAN (TREE_REAL_CST (arg1
))))
2030 /* If the exponent is equivalent to an integer, expand to an optimal
2031 multiplication sequence when profitable. */
2032 c
= TREE_REAL_CST (arg1
);
2033 n
= real_to_integer (&c
);
2034 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
2035 c_is_int
= real_identical (&c
, &cint
);
2038 && ((n
>= -1 && n
<= 2)
2039 || (flag_unsafe_math_optimizations
2041 && powi_cost (n
) <= POWI_MAX_MULTS
)))
2042 return gimple_expand_builtin_powi (gsi
, loc
, arg0
, n
);
2044 /* Attempt various optimizations using sqrt and cbrt. */
2045 type
= TREE_TYPE (arg0
);
2046 mode
= TYPE_MODE (type
);
2047 sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
2049 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
2050 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
2053 && real_equal (&c
, &dconsthalf
)
2054 && !HONOR_SIGNED_ZEROS (mode
))
2055 return build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
2057 hw_sqrt_exists
= optab_handler (sqrt_optab
, mode
) != CODE_FOR_nothing
;
2059 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
2060 optimizations since 1./3. is not exactly representable. If x
2061 is negative and finite, the correct value of pow(x,1./3.) is
2062 a NaN with the "invalid" exception raised, because the value
2063 of 1./3. actually has an even denominator. The correct value
2064 of cbrt(x) is a negative real value. */
2065 cbrtfn
= mathfn_built_in (type
, BUILT_IN_CBRT
);
2066 dconst1_3
= real_value_truncate (mode
, dconst_third ());
2068 if (flag_unsafe_math_optimizations
2070 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
2071 && real_equal (&c
, &dconst1_3
))
2072 return build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
2074 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
2075 if we don't have a hardware sqrt insn. */
2076 dconst1_6
= dconst1_3
;
2077 SET_REAL_EXP (&dconst1_6
, REAL_EXP (&dconst1_6
) - 1);
2079 if (flag_unsafe_math_optimizations
2082 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
2085 && real_equal (&c
, &dconst1_6
))
2088 sqrt_arg0
= build_and_insert_call (gsi
, loc
, sqrtfn
, arg0
);
2091 return build_and_insert_call (gsi
, loc
, cbrtfn
, sqrt_arg0
);
2095 /* Attempt to expand the POW as a product of square root chains.
2096 Expand the 0.25 case even when otpimising for size. */
2097 if (flag_unsafe_math_optimizations
2100 && (speed_p
|| real_equal (&c
, &dconst1_4
))
2101 && !HONOR_SIGNED_ZEROS (mode
))
2103 unsigned int max_depth
= speed_p
2104 ? param_max_pow_sqrt_depth
2107 tree expand_with_sqrts
2108 = expand_pow_as_sqrts (gsi
, loc
, arg0
, arg1
, max_depth
);
2110 if (expand_with_sqrts
)
2111 return expand_with_sqrts
;
2114 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst2
);
2115 n
= real_to_integer (&c2
);
2116 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
2117 c2_is_int
= real_identical (&c2
, &cint
);
2119 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
2121 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
2122 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
2124 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
2125 different from pow(x, 1./3.) due to rounding and behavior with
2126 negative x, we need to constrain this transformation to unsafe
2127 math and positive x or finite math. */
2128 real_from_integer (&dconst3
, VOIDmode
, 3, SIGNED
);
2129 real_arithmetic (&c2
, MULT_EXPR
, &c
, &dconst3
);
2130 real_round (&c2
, mode
, &c2
);
2131 n
= real_to_integer (&c2
);
2132 real_from_integer (&cint
, VOIDmode
, n
, SIGNED
);
2133 real_arithmetic (&c2
, RDIV_EXPR
, &cint
, &dconst3
);
2134 real_convert (&c2
, mode
, &c2
);
2136 if (flag_unsafe_math_optimizations
2138 && (!HONOR_NANS (mode
) || tree_expr_nonnegative_p (arg0
))
2139 && real_identical (&c2
, &c
)
2141 && optimize_function_for_speed_p (cfun
)
2142 && powi_cost (n
/ 3) <= POWI_MAX_MULTS
)
2144 tree powi_x_ndiv3
= NULL_TREE
;
2146 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
2147 possible or profitable, give up. Skip the degenerate case when
2148 abs(n) < 3, where the result is always 1. */
2149 if (absu_hwi (n
) >= 3)
2151 powi_x_ndiv3
= gimple_expand_builtin_powi (gsi
, loc
, arg0
,
2157 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
2158 as that creates an unnecessary variable. Instead, just produce
2159 either cbrt(x) or cbrt(x) * cbrt(x). */
2160 cbrt_x
= build_and_insert_call (gsi
, loc
, cbrtfn
, arg0
);
2162 if (absu_hwi (n
) % 3 == 1)
2163 powi_cbrt_x
= cbrt_x
;
2165 powi_cbrt_x
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
2168 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
2169 if (absu_hwi (n
) < 3)
2170 result
= powi_cbrt_x
;
2172 result
= build_and_insert_binop (gsi
, loc
, "powroot", MULT_EXPR
,
2173 powi_x_ndiv3
, powi_cbrt_x
);
2175 /* If n is negative, reciprocate the result. */
2177 result
= build_and_insert_binop (gsi
, loc
, "powroot", RDIV_EXPR
,
2178 build_real (type
, dconst1
), result
);
2183 /* No optimizations succeeded. */
2187 /* ARG is the argument to a cabs builtin call in GSI with location info
2188 LOC. Create a sequence of statements prior to GSI that calculates
2189 sqrt(R*R + I*I), where R and I are the real and imaginary components
2190 of ARG, respectively. Return an expression holding the result. */
2193 gimple_expand_builtin_cabs (gimple_stmt_iterator
*gsi
, location_t loc
, tree arg
)
2195 tree real_part
, imag_part
, addend1
, addend2
, sum
, result
;
2196 tree type
= TREE_TYPE (TREE_TYPE (arg
));
2197 tree sqrtfn
= mathfn_built_in (type
, BUILT_IN_SQRT
);
2198 machine_mode mode
= TYPE_MODE (type
);
2200 if (!flag_unsafe_math_optimizations
2201 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi
)))
2203 || optab_handler (sqrt_optab
, mode
) == CODE_FOR_nothing
)
2206 real_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
2207 REALPART_EXPR
, arg
);
2208 addend1
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
2209 real_part
, real_part
);
2210 imag_part
= build_and_insert_ref (gsi
, loc
, type
, "cabs",
2211 IMAGPART_EXPR
, arg
);
2212 addend2
= build_and_insert_binop (gsi
, loc
, "cabs", MULT_EXPR
,
2213 imag_part
, imag_part
);
2214 sum
= build_and_insert_binop (gsi
, loc
, "cabs", PLUS_EXPR
, addend1
, addend2
);
2215 result
= build_and_insert_call (gsi
, loc
, sqrtfn
, sum
);
2220 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
2221 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
2222 an optimal number of multiplies, when n is a constant. */
2226 const pass_data pass_data_cse_sincos
=
2228 GIMPLE_PASS
, /* type */
2229 "sincos", /* name */
2230 OPTGROUP_NONE
, /* optinfo_flags */
2231 TV_TREE_SINCOS
, /* tv_id */
2232 PROP_ssa
, /* properties_required */
2233 PROP_gimple_opt_math
, /* properties_provided */
2234 0, /* properties_destroyed */
2235 0, /* todo_flags_start */
2236 TODO_update_ssa
, /* todo_flags_finish */
2239 class pass_cse_sincos
: public gimple_opt_pass
2242 pass_cse_sincos (gcc::context
*ctxt
)
2243 : gimple_opt_pass (pass_data_cse_sincos
, ctxt
)
2246 /* opt_pass methods: */
2247 virtual bool gate (function
*)
2249 /* We no longer require either sincos or cexp, since powi expansion
2250 piggybacks on this pass. */
2254 virtual unsigned int execute (function
*);
2256 }; // class pass_cse_sincos
2259 pass_cse_sincos::execute (function
*fun
)
2262 bool cfg_changed
= false;
2264 calculate_dominance_info (CDI_DOMINATORS
);
2265 memset (&sincos_stats
, 0, sizeof (sincos_stats
));
2267 FOR_EACH_BB_FN (bb
, fun
)
2269 gimple_stmt_iterator gsi
;
2270 bool cleanup_eh
= false;
2272 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
2274 gimple
*stmt
= gsi_stmt (gsi
);
2276 /* Only the last stmt in a bb could throw, no need to call
2277 gimple_purge_dead_eh_edges if we change something in the middle
2278 of a basic block. */
2281 if (is_gimple_call (stmt
)
2282 && gimple_call_lhs (stmt
))
2284 tree arg
, arg0
, arg1
, result
;
2288 switch (gimple_call_combined_fn (stmt
))
2293 arg
= gimple_call_arg (stmt
, 0);
2294 /* Make sure we have either sincos or cexp. */
2295 if (!targetm
.libc_has_function (function_c99_math_complex
,
2297 && !targetm
.libc_has_function (function_sincos
,
2301 if (TREE_CODE (arg
) == SSA_NAME
)
2302 cfg_changed
|= execute_cse_sincos_1 (arg
);
2306 arg0
= gimple_call_arg (stmt
, 0);
2307 arg1
= gimple_call_arg (stmt
, 1);
2309 loc
= gimple_location (stmt
);
2310 result
= gimple_expand_builtin_pow (&gsi
, loc
, arg0
, arg1
);
2314 tree lhs
= gimple_get_lhs (stmt
);
2315 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2316 gimple_set_location (new_stmt
, loc
);
2317 unlink_stmt_vdef (stmt
);
2318 gsi_replace (&gsi
, new_stmt
, true);
2320 if (gimple_vdef (stmt
))
2321 release_ssa_name (gimple_vdef (stmt
));
2326 arg0
= gimple_call_arg (stmt
, 0);
2327 arg1
= gimple_call_arg (stmt
, 1);
2328 loc
= gimple_location (stmt
);
2330 if (real_minus_onep (arg0
))
2332 tree t0
, t1
, cond
, one
, minus_one
;
2335 t0
= TREE_TYPE (arg0
);
2336 t1
= TREE_TYPE (arg1
);
2337 one
= build_real (t0
, dconst1
);
2338 minus_one
= build_real (t0
, dconstm1
);
2340 cond
= make_temp_ssa_name (t1
, NULL
, "powi_cond");
2341 stmt
= gimple_build_assign (cond
, BIT_AND_EXPR
,
2342 arg1
, build_int_cst (t1
, 1));
2343 gimple_set_location (stmt
, loc
);
2344 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
2346 result
= make_temp_ssa_name (t0
, NULL
, "powi");
2347 stmt
= gimple_build_assign (result
, COND_EXPR
, cond
,
2349 gimple_set_location (stmt
, loc
);
2350 gsi_insert_before (&gsi
, stmt
, GSI_SAME_STMT
);
2354 if (!tree_fits_shwi_p (arg1
))
2357 n
= tree_to_shwi (arg1
);
2358 result
= gimple_expand_builtin_powi (&gsi
, loc
, arg0
, n
);
2363 tree lhs
= gimple_get_lhs (stmt
);
2364 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2365 gimple_set_location (new_stmt
, loc
);
2366 unlink_stmt_vdef (stmt
);
2367 gsi_replace (&gsi
, new_stmt
, true);
2369 if (gimple_vdef (stmt
))
2370 release_ssa_name (gimple_vdef (stmt
));
2375 arg0
= gimple_call_arg (stmt
, 0);
2376 loc
= gimple_location (stmt
);
2377 result
= gimple_expand_builtin_cabs (&gsi
, loc
, arg0
);
2381 tree lhs
= gimple_get_lhs (stmt
);
2382 gassign
*new_stmt
= gimple_build_assign (lhs
, result
);
2383 gimple_set_location (new_stmt
, loc
);
2384 unlink_stmt_vdef (stmt
);
2385 gsi_replace (&gsi
, new_stmt
, true);
2387 if (gimple_vdef (stmt
))
2388 release_ssa_name (gimple_vdef (stmt
));
2397 cfg_changed
|= gimple_purge_dead_eh_edges (bb
);
2400 statistics_counter_event (fun
, "sincos statements inserted",
2401 sincos_stats
.inserted
);
2402 statistics_counter_event (fun
, "conv statements removed",
2403 sincos_stats
.conv_removed
);
2405 return cfg_changed
? TODO_cleanup_cfg
: 0;
2411 make_pass_cse_sincos (gcc::context
*ctxt
)
2413 return new pass_cse_sincos (ctxt
);
2416 /* Return true if stmt is a type conversion operation that can be stripped
2417 when used in a widening multiply operation. */
2419 widening_mult_conversion_strippable_p (tree result_type
, gimple
*stmt
)
2421 enum tree_code rhs_code
= gimple_assign_rhs_code (stmt
);
2423 if (TREE_CODE (result_type
) == INTEGER_TYPE
)
2428 if (!CONVERT_EXPR_CODE_P (rhs_code
))
2431 op_type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2433 /* If the type of OP has the same precision as the result, then
2434 we can strip this conversion. The multiply operation will be
2435 selected to create the correct extension as a by-product. */
2436 if (TYPE_PRECISION (result_type
) == TYPE_PRECISION (op_type
))
2439 /* We can also strip a conversion if it preserves the signed-ness of
2440 the operation and doesn't narrow the range. */
2441 inner_op_type
= TREE_TYPE (gimple_assign_rhs1 (stmt
));
2443 /* If the inner-most type is unsigned, then we can strip any
2444 intermediate widening operation. If it's signed, then the
2445 intermediate widening operation must also be signed. */
2446 if ((TYPE_UNSIGNED (inner_op_type
)
2447 || TYPE_UNSIGNED (op_type
) == TYPE_UNSIGNED (inner_op_type
))
2448 && TYPE_PRECISION (op_type
) > TYPE_PRECISION (inner_op_type
))
2454 return rhs_code
== FIXED_CONVERT_EXPR
;
2457 /* Return true if RHS is a suitable operand for a widening multiplication,
2458 assuming a target type of TYPE.
2459 There are two cases:
2461 - RHS makes some value at least twice as wide. Store that value
2462 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2464 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2465 but leave *TYPE_OUT untouched. */
2468 is_widening_mult_rhs_p (tree type
, tree rhs
, tree
*type_out
,
2474 if (TREE_CODE (rhs
) == SSA_NAME
)
2476 stmt
= SSA_NAME_DEF_STMT (rhs
);
2477 if (is_gimple_assign (stmt
))
2479 if (! widening_mult_conversion_strippable_p (type
, stmt
))
2483 rhs1
= gimple_assign_rhs1 (stmt
);
2485 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2487 *new_rhs_out
= rhs1
;
2496 type1
= TREE_TYPE (rhs1
);
2498 if (TREE_CODE (type1
) != TREE_CODE (type
)
2499 || TYPE_PRECISION (type1
) * 2 > TYPE_PRECISION (type
))
2502 *new_rhs_out
= rhs1
;
2507 if (TREE_CODE (rhs
) == INTEGER_CST
)
2517 /* Return true if STMT performs a widening multiplication, assuming the
2518 output type is TYPE. If so, store the unwidened types of the operands
2519 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2520 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2521 and *TYPE2_OUT would give the operands of the multiplication. */
2524 is_widening_mult_p (gimple
*stmt
,
2525 tree
*type1_out
, tree
*rhs1_out
,
2526 tree
*type2_out
, tree
*rhs2_out
)
2528 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
2530 if (TREE_CODE (type
) == INTEGER_TYPE
)
2532 if (TYPE_OVERFLOW_TRAPS (type
))
2535 else if (TREE_CODE (type
) != FIXED_POINT_TYPE
)
2538 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs1 (stmt
), type1_out
,
2542 if (!is_widening_mult_rhs_p (type
, gimple_assign_rhs2 (stmt
), type2_out
,
2546 if (*type1_out
== NULL
)
2548 if (*type2_out
== NULL
|| !int_fits_type_p (*rhs1_out
, *type2_out
))
2550 *type1_out
= *type2_out
;
2553 if (*type2_out
== NULL
)
2555 if (!int_fits_type_p (*rhs2_out
, *type1_out
))
2557 *type2_out
= *type1_out
;
2560 /* Ensure that the larger of the two operands comes first. */
2561 if (TYPE_PRECISION (*type1_out
) < TYPE_PRECISION (*type2_out
))
2563 std::swap (*type1_out
, *type2_out
);
2564 std::swap (*rhs1_out
, *rhs2_out
);
2570 /* Check to see if the CALL statement is an invocation of copysign
2571 with 1. being the first argument. */
2573 is_copysign_call_with_1 (gimple
*call
)
2575 gcall
*c
= dyn_cast
<gcall
*> (call
);
2579 enum combined_fn code
= gimple_call_combined_fn (c
);
2581 if (code
== CFN_LAST
)
2584 if (builtin_fn_p (code
))
2586 switch (as_builtin_fn (code
))
2588 CASE_FLT_FN (BUILT_IN_COPYSIGN
):
2589 CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN
):
2590 return real_onep (gimple_call_arg (c
, 0));
2596 if (internal_fn_p (code
))
2598 switch (as_internal_fn (code
))
2601 return real_onep (gimple_call_arg (c
, 0));
2610 /* Try to expand the pattern x * copysign (1, y) into xorsign (x, y).
2611 This only happens when the xorsign optab is defined, if the
2612 pattern is not a xorsign pattern or if expansion fails FALSE is
2613 returned, otherwise TRUE is returned. */
2615 convert_expand_mult_copysign (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
2617 tree treeop0
, treeop1
, lhs
, type
;
2618 location_t loc
= gimple_location (stmt
);
2619 lhs
= gimple_assign_lhs (stmt
);
2620 treeop0
= gimple_assign_rhs1 (stmt
);
2621 treeop1
= gimple_assign_rhs2 (stmt
);
2622 type
= TREE_TYPE (lhs
);
2623 machine_mode mode
= TYPE_MODE (type
);
2625 if (HONOR_SNANS (type
))
2628 if (TREE_CODE (treeop0
) == SSA_NAME
&& TREE_CODE (treeop1
) == SSA_NAME
)
2630 gimple
*call0
= SSA_NAME_DEF_STMT (treeop0
);
2631 if (!has_single_use (treeop0
) || !is_copysign_call_with_1 (call0
))
2633 call0
= SSA_NAME_DEF_STMT (treeop1
);
2634 if (!has_single_use (treeop1
) || !is_copysign_call_with_1 (call0
))
2639 if (optab_handler (xorsign_optab
, mode
) == CODE_FOR_nothing
)
2642 gcall
*c
= as_a
<gcall
*> (call0
);
2643 treeop0
= gimple_call_arg (c
, 1);
2646 = gimple_build_call_internal (IFN_XORSIGN
, 2, treeop1
, treeop0
);
2647 gimple_set_lhs (call_stmt
, lhs
);
2648 gimple_set_location (call_stmt
, loc
);
2649 gsi_replace (gsi
, call_stmt
, true);
2656 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2657 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2658 value is true iff we converted the statement. */
2661 convert_mult_to_widen (gimple
*stmt
, gimple_stmt_iterator
*gsi
)
2663 tree lhs
, rhs1
, rhs2
, type
, type1
, type2
;
2664 enum insn_code handler
;
2665 scalar_int_mode to_mode
, from_mode
, actual_mode
;
2667 int actual_precision
;
2668 location_t loc
= gimple_location (stmt
);
2669 bool from_unsigned1
, from_unsigned2
;
2671 lhs
= gimple_assign_lhs (stmt
);
2672 type
= TREE_TYPE (lhs
);
2673 if (TREE_CODE (type
) != INTEGER_TYPE
)
2676 if (!is_widening_mult_p (stmt
, &type1
, &rhs1
, &type2
, &rhs2
))
2679 to_mode
= SCALAR_INT_TYPE_MODE (type
);
2680 from_mode
= SCALAR_INT_TYPE_MODE (type1
);
2681 if (to_mode
== from_mode
)
2684 from_unsigned1
= TYPE_UNSIGNED (type1
);
2685 from_unsigned2
= TYPE_UNSIGNED (type2
);
2687 if (from_unsigned1
&& from_unsigned2
)
2688 op
= umul_widen_optab
;
2689 else if (!from_unsigned1
&& !from_unsigned2
)
2690 op
= smul_widen_optab
;
2692 op
= usmul_widen_optab
;
2694 handler
= find_widening_optab_handler_and_mode (op
, to_mode
, from_mode
,
2697 if (handler
== CODE_FOR_nothing
)
2699 if (op
!= smul_widen_optab
)
2701 /* We can use a signed multiply with unsigned types as long as
2702 there is a wider mode to use, or it is the smaller of the two
2703 types that is unsigned. Note that type1 >= type2, always. */
2704 if ((TYPE_UNSIGNED (type1
)
2705 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2706 || (TYPE_UNSIGNED (type2
)
2707 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2709 if (!GET_MODE_WIDER_MODE (from_mode
).exists (&from_mode
)
2710 || GET_MODE_SIZE (to_mode
) <= GET_MODE_SIZE (from_mode
))
2714 op
= smul_widen_optab
;
2715 handler
= find_widening_optab_handler_and_mode (op
, to_mode
,
2719 if (handler
== CODE_FOR_nothing
)
2722 from_unsigned1
= from_unsigned2
= false;
2728 /* Ensure that the inputs to the handler are in the correct precison
2729 for the opcode. This will be the full mode size. */
2730 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2731 if (2 * actual_precision
> TYPE_PRECISION (type
))
2733 if (actual_precision
!= TYPE_PRECISION (type1
)
2734 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2735 rhs1
= build_and_insert_cast (gsi
, loc
,
2736 build_nonstandard_integer_type
2737 (actual_precision
, from_unsigned1
), rhs1
);
2738 if (actual_precision
!= TYPE_PRECISION (type2
)
2739 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2740 rhs2
= build_and_insert_cast (gsi
, loc
,
2741 build_nonstandard_integer_type
2742 (actual_precision
, from_unsigned2
), rhs2
);
2744 /* Handle constants. */
2745 if (TREE_CODE (rhs1
) == INTEGER_CST
)
2746 rhs1
= fold_convert (type1
, rhs1
);
2747 if (TREE_CODE (rhs2
) == INTEGER_CST
)
2748 rhs2
= fold_convert (type2
, rhs2
);
2750 gimple_assign_set_rhs1 (stmt
, rhs1
);
2751 gimple_assign_set_rhs2 (stmt
, rhs2
);
2752 gimple_assign_set_rhs_code (stmt
, WIDEN_MULT_EXPR
);
2754 widen_mul_stats
.widen_mults_inserted
++;
2758 /* Process a single gimple statement STMT, which is found at the
2759 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2760 rhs (given by CODE), and try to convert it into a
2761 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2762 is true iff we converted the statement. */
2765 convert_plusminus_to_widen (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
2766 enum tree_code code
)
2768 gimple
*rhs1_stmt
= NULL
, *rhs2_stmt
= NULL
;
2769 gimple
*conv1_stmt
= NULL
, *conv2_stmt
= NULL
, *conv_stmt
;
2770 tree type
, type1
, type2
, optype
;
2771 tree lhs
, rhs1
, rhs2
, mult_rhs1
, mult_rhs2
, add_rhs
;
2772 enum tree_code rhs1_code
= ERROR_MARK
, rhs2_code
= ERROR_MARK
;
2774 enum tree_code wmult_code
;
2775 enum insn_code handler
;
2776 scalar_mode to_mode
, from_mode
, actual_mode
;
2777 location_t loc
= gimple_location (stmt
);
2778 int actual_precision
;
2779 bool from_unsigned1
, from_unsigned2
;
2781 lhs
= gimple_assign_lhs (stmt
);
2782 type
= TREE_TYPE (lhs
);
2783 if (TREE_CODE (type
) != INTEGER_TYPE
2784 && TREE_CODE (type
) != FIXED_POINT_TYPE
)
2787 if (code
== MINUS_EXPR
)
2788 wmult_code
= WIDEN_MULT_MINUS_EXPR
;
2790 wmult_code
= WIDEN_MULT_PLUS_EXPR
;
2792 rhs1
= gimple_assign_rhs1 (stmt
);
2793 rhs2
= gimple_assign_rhs2 (stmt
);
2795 if (TREE_CODE (rhs1
) == SSA_NAME
)
2797 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2798 if (is_gimple_assign (rhs1_stmt
))
2799 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2802 if (TREE_CODE (rhs2
) == SSA_NAME
)
2804 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2805 if (is_gimple_assign (rhs2_stmt
))
2806 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2809 /* Allow for one conversion statement between the multiply
2810 and addition/subtraction statement. If there are more than
2811 one conversions then we assume they would invalidate this
2812 transformation. If that's not the case then they should have
2813 been folded before now. */
2814 if (CONVERT_EXPR_CODE_P (rhs1_code
))
2816 conv1_stmt
= rhs1_stmt
;
2817 rhs1
= gimple_assign_rhs1 (rhs1_stmt
);
2818 if (TREE_CODE (rhs1
) == SSA_NAME
)
2820 rhs1_stmt
= SSA_NAME_DEF_STMT (rhs1
);
2821 if (is_gimple_assign (rhs1_stmt
))
2822 rhs1_code
= gimple_assign_rhs_code (rhs1_stmt
);
2827 if (CONVERT_EXPR_CODE_P (rhs2_code
))
2829 conv2_stmt
= rhs2_stmt
;
2830 rhs2
= gimple_assign_rhs1 (rhs2_stmt
);
2831 if (TREE_CODE (rhs2
) == SSA_NAME
)
2833 rhs2_stmt
= SSA_NAME_DEF_STMT (rhs2
);
2834 if (is_gimple_assign (rhs2_stmt
))
2835 rhs2_code
= gimple_assign_rhs_code (rhs2_stmt
);
2841 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2842 is_widening_mult_p, but we still need the rhs returns.
2844 It might also appear that it would be sufficient to use the existing
2845 operands of the widening multiply, but that would limit the choice of
2846 multiply-and-accumulate instructions.
2848 If the widened-multiplication result has more than one uses, it is
2849 probably wiser not to do the conversion. Also restrict this operation
2850 to single basic block to avoid moving the multiply to a different block
2851 with a higher execution frequency. */
2852 if (code
== PLUS_EXPR
2853 && (rhs1_code
== MULT_EXPR
|| rhs1_code
== WIDEN_MULT_EXPR
))
2855 if (!has_single_use (rhs1
)
2856 || gimple_bb (rhs1_stmt
) != gimple_bb (stmt
)
2857 || !is_widening_mult_p (rhs1_stmt
, &type1
, &mult_rhs1
,
2858 &type2
, &mult_rhs2
))
2861 conv_stmt
= conv1_stmt
;
2863 else if (rhs2_code
== MULT_EXPR
|| rhs2_code
== WIDEN_MULT_EXPR
)
2865 if (!has_single_use (rhs2
)
2866 || gimple_bb (rhs2_stmt
) != gimple_bb (stmt
)
2867 || !is_widening_mult_p (rhs2_stmt
, &type1
, &mult_rhs1
,
2868 &type2
, &mult_rhs2
))
2871 conv_stmt
= conv2_stmt
;
2876 to_mode
= SCALAR_TYPE_MODE (type
);
2877 from_mode
= SCALAR_TYPE_MODE (type1
);
2878 if (to_mode
== from_mode
)
2881 from_unsigned1
= TYPE_UNSIGNED (type1
);
2882 from_unsigned2
= TYPE_UNSIGNED (type2
);
2885 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2886 if (from_unsigned1
!= from_unsigned2
)
2888 if (!INTEGRAL_TYPE_P (type
))
2890 /* We can use a signed multiply with unsigned types as long as
2891 there is a wider mode to use, or it is the smaller of the two
2892 types that is unsigned. Note that type1 >= type2, always. */
2894 && TYPE_PRECISION (type1
) == GET_MODE_PRECISION (from_mode
))
2896 && TYPE_PRECISION (type2
) == GET_MODE_PRECISION (from_mode
)))
2898 if (!GET_MODE_WIDER_MODE (from_mode
).exists (&from_mode
)
2899 || GET_MODE_SIZE (from_mode
) >= GET_MODE_SIZE (to_mode
))
2903 from_unsigned1
= from_unsigned2
= false;
2904 optype
= build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode
),
2908 /* If there was a conversion between the multiply and addition
2909 then we need to make sure it fits a multiply-and-accumulate.
2910 The should be a single mode change which does not change the
2914 /* We use the original, unmodified data types for this. */
2915 tree from_type
= TREE_TYPE (gimple_assign_rhs1 (conv_stmt
));
2916 tree to_type
= TREE_TYPE (gimple_assign_lhs (conv_stmt
));
2917 int data_size
= TYPE_PRECISION (type1
) + TYPE_PRECISION (type2
);
2918 bool is_unsigned
= TYPE_UNSIGNED (type1
) && TYPE_UNSIGNED (type2
);
2920 if (TYPE_PRECISION (from_type
) > TYPE_PRECISION (to_type
))
2922 /* Conversion is a truncate. */
2923 if (TYPE_PRECISION (to_type
) < data_size
)
2926 else if (TYPE_PRECISION (from_type
) < TYPE_PRECISION (to_type
))
2928 /* Conversion is an extend. Check it's the right sort. */
2929 if (TYPE_UNSIGNED (from_type
) != is_unsigned
2930 && !(is_unsigned
&& TYPE_PRECISION (from_type
) > data_size
))
2933 /* else convert is a no-op for our purposes. */
2936 /* Verify that the machine can perform a widening multiply
2937 accumulate in this mode/signedness combination, otherwise
2938 this transformation is likely to pessimize code. */
2939 this_optab
= optab_for_tree_code (wmult_code
, optype
, optab_default
);
2940 handler
= find_widening_optab_handler_and_mode (this_optab
, to_mode
,
2941 from_mode
, &actual_mode
);
2943 if (handler
== CODE_FOR_nothing
)
2946 /* Ensure that the inputs to the handler are in the correct precison
2947 for the opcode. This will be the full mode size. */
2948 actual_precision
= GET_MODE_PRECISION (actual_mode
);
2949 if (actual_precision
!= TYPE_PRECISION (type1
)
2950 || from_unsigned1
!= TYPE_UNSIGNED (type1
))
2951 mult_rhs1
= build_and_insert_cast (gsi
, loc
,
2952 build_nonstandard_integer_type
2953 (actual_precision
, from_unsigned1
),
2955 if (actual_precision
!= TYPE_PRECISION (type2
)
2956 || from_unsigned2
!= TYPE_UNSIGNED (type2
))
2957 mult_rhs2
= build_and_insert_cast (gsi
, loc
,
2958 build_nonstandard_integer_type
2959 (actual_precision
, from_unsigned2
),
2962 if (!useless_type_conversion_p (type
, TREE_TYPE (add_rhs
)))
2963 add_rhs
= build_and_insert_cast (gsi
, loc
, type
, add_rhs
);
2965 /* Handle constants. */
2966 if (TREE_CODE (mult_rhs1
) == INTEGER_CST
)
2967 mult_rhs1
= fold_convert (type1
, mult_rhs1
);
2968 if (TREE_CODE (mult_rhs2
) == INTEGER_CST
)
2969 mult_rhs2
= fold_convert (type2
, mult_rhs2
);
2971 gimple_assign_set_rhs_with_ops (gsi
, wmult_code
, mult_rhs1
, mult_rhs2
,
2973 update_stmt (gsi_stmt (*gsi
));
2974 widen_mul_stats
.maccs_inserted
++;
2978 /* Given a result MUL_RESULT which is a result of a multiplication of OP1 and
2979 OP2 and which we know is used in statements that can be, together with the
2980 multiplication, converted to FMAs, perform the transformation. */
2983 convert_mult_to_fma_1 (tree mul_result
, tree op1
, tree op2
)
2985 tree type
= TREE_TYPE (mul_result
);
2987 imm_use_iterator imm_iter
;
2990 FOR_EACH_IMM_USE_STMT (use_stmt
, imm_iter
, mul_result
)
2992 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
2993 tree addop
, mulop1
= op1
, result
= mul_result
;
2994 bool negate_p
= false;
2995 gimple_seq seq
= NULL
;
2997 if (is_gimple_debug (use_stmt
))
3000 if (is_gimple_assign (use_stmt
)
3001 && gimple_assign_rhs_code (use_stmt
) == NEGATE_EXPR
)
3003 result
= gimple_assign_lhs (use_stmt
);
3004 use_operand_p use_p
;
3005 gimple
*neguse_stmt
;
3006 single_imm_use (gimple_assign_lhs (use_stmt
), &use_p
, &neguse_stmt
);
3007 gsi_remove (&gsi
, true);
3008 release_defs (use_stmt
);
3010 use_stmt
= neguse_stmt
;
3011 gsi
= gsi_for_stmt (use_stmt
);
3015 tree cond
, else_value
, ops
[3];
3017 if (!can_interpret_as_conditional_op_p (use_stmt
, &cond
, &code
,
3020 addop
= ops
[0] == result
? ops
[1] : ops
[0];
3022 if (code
== MINUS_EXPR
)
3024 if (ops
[0] == result
)
3025 /* a * b - c -> a * b + (-c) */
3026 addop
= gimple_build (&seq
, NEGATE_EXPR
, type
, addop
);
3028 /* a - b * c -> (-b) * c + a */
3029 negate_p
= !negate_p
;
3033 mulop1
= gimple_build (&seq
, NEGATE_EXPR
, type
, mulop1
);
3036 gsi_insert_seq_before (&gsi
, seq
, GSI_SAME_STMT
);
3039 fma_stmt
= gimple_build_call_internal (IFN_COND_FMA
, 5, cond
, mulop1
,
3040 op2
, addop
, else_value
);
3042 fma_stmt
= gimple_build_call_internal (IFN_FMA
, 3, mulop1
, op2
, addop
);
3043 gimple_set_lhs (fma_stmt
, gimple_get_lhs (use_stmt
));
3044 gimple_call_set_nothrow (fma_stmt
, !stmt_can_throw_internal (cfun
,
3046 gsi_replace (&gsi
, fma_stmt
, true);
3047 /* Follow all SSA edges so that we generate FMS, FNMA and FNMS
3048 regardless of where the negation occurs. */
3049 gimple
*orig_stmt
= gsi_stmt (gsi
);
3050 if (fold_stmt (&gsi
, follow_all_ssa_edges
))
3052 if (maybe_clean_or_replace_eh_stmt (orig_stmt
, gsi_stmt (gsi
)))
3054 update_stmt (gsi_stmt (gsi
));
3057 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3059 fprintf (dump_file
, "Generated FMA ");
3060 print_gimple_stmt (dump_file
, gsi_stmt (gsi
), 0, TDF_NONE
);
3061 fprintf (dump_file
, "\n");
3064 /* If the FMA result is negated in a single use, fold the negation
3066 orig_stmt
= gsi_stmt (gsi
);
3067 use_operand_p use_p
;
3069 if (is_gimple_call (orig_stmt
)
3070 && gimple_call_internal_p (orig_stmt
)
3071 && gimple_call_lhs (orig_stmt
)
3072 && TREE_CODE (gimple_call_lhs (orig_stmt
)) == SSA_NAME
3073 && single_imm_use (gimple_call_lhs (orig_stmt
), &use_p
, &neg_stmt
)
3074 && is_gimple_assign (neg_stmt
)
3075 && gimple_assign_rhs_code (neg_stmt
) == NEGATE_EXPR
3076 && !stmt_could_throw_p (cfun
, neg_stmt
))
3078 gsi
= gsi_for_stmt (neg_stmt
);
3079 if (fold_stmt (&gsi
, follow_all_ssa_edges
))
3081 if (maybe_clean_or_replace_eh_stmt (neg_stmt
, gsi_stmt (gsi
)))
3083 update_stmt (gsi_stmt (gsi
));
3084 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3086 fprintf (dump_file
, "Folded FMA negation ");
3087 print_gimple_stmt (dump_file
, gsi_stmt (gsi
), 0, TDF_NONE
);
3088 fprintf (dump_file
, "\n");
3093 widen_mul_stats
.fmas_inserted
++;
3097 /* Data necessary to perform the actual transformation from a multiplication
3098 and an addition to an FMA after decision is taken it should be done and to
3099 then delete the multiplication statement from the function IL. */
3101 struct fma_transformation_info
3109 /* Structure containing the current state of FMA deferring, i.e. whether we are
3110 deferring, whether to continue deferring, and all data necessary to come
3111 back and perform all deferred transformations. */
3113 class fma_deferring_state
3116 /* Class constructor. Pass true as PERFORM_DEFERRING in order to actually
3117 do any deferring. */
3119 fma_deferring_state (bool perform_deferring
)
3120 : m_candidates (), m_mul_result_set (), m_initial_phi (NULL
),
3121 m_last_result (NULL_TREE
), m_deferring_p (perform_deferring
) {}
3123 /* List of FMA candidates for which we the transformation has been determined
3124 possible but we at this point in BB analysis we do not consider them
3126 auto_vec
<fma_transformation_info
, 8> m_candidates
;
3128 /* Set of results of multiplication that are part of an already deferred FMA
3130 hash_set
<tree
> m_mul_result_set
;
3132 /* The PHI that supposedly feeds back result of a FMA to another over loop
3134 gphi
*m_initial_phi
;
3136 /* Result of the last produced FMA candidate or NULL if there has not been
3140 /* If true, deferring might still be profitable. If false, transform all
3141 candidates and no longer defer. */
3145 /* Transform all deferred FMA candidates and mark STATE as no longer
3149 cancel_fma_deferring (fma_deferring_state
*state
)
3151 if (!state
->m_deferring_p
)
3154 for (unsigned i
= 0; i
< state
->m_candidates
.length (); i
++)
3156 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3157 fprintf (dump_file
, "Generating deferred FMA\n");
3159 const fma_transformation_info
&fti
= state
->m_candidates
[i
];
3160 convert_mult_to_fma_1 (fti
.mul_result
, fti
.op1
, fti
.op2
);
3162 gimple_stmt_iterator gsi
= gsi_for_stmt (fti
.mul_stmt
);
3163 gsi_remove (&gsi
, true);
3164 release_defs (fti
.mul_stmt
);
3166 state
->m_deferring_p
= false;
3169 /* If OP is an SSA name defined by a PHI node, return the PHI statement.
3170 Otherwise return NULL. */
3173 result_of_phi (tree op
)
3175 if (TREE_CODE (op
) != SSA_NAME
)
3178 return dyn_cast
<gphi
*> (SSA_NAME_DEF_STMT (op
));
3181 /* After processing statements of a BB and recording STATE, return true if the
3182 initial phi is fed by the last FMA candidate result ore one such result from
3183 previously processed BBs marked in LAST_RESULT_SET. */
3186 last_fma_candidate_feeds_initial_phi (fma_deferring_state
*state
,
3187 hash_set
<tree
> *last_result_set
)
3191 FOR_EACH_PHI_ARG (use
, state
->m_initial_phi
, iter
, SSA_OP_USE
)
3193 tree t
= USE_FROM_PTR (use
);
3194 if (t
== state
->m_last_result
3195 || last_result_set
->contains (t
))
3202 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
3203 with uses in additions and subtractions to form fused multiply-add
3204 operations. Returns true if successful and MUL_STMT should be removed.
3205 If MUL_COND is nonnull, the multiplication in MUL_STMT is conditional
3206 on MUL_COND, otherwise it is unconditional.
3208 If STATE indicates that we are deferring FMA transformation, that means
3209 that we do not produce FMAs for basic blocks which look like:
3212 # accumulator_111 = PHI <0.0(5), accumulator_66(6)>
3214 accumulator_66 = _65 + accumulator_111;
3216 or its unrolled version, i.e. with several FMA candidates that feed result
3217 of one into the addend of another. Instead, we add them to a list in STATE
3218 and if we later discover an FMA candidate that is not part of such a chain,
3219 we go back and perform all deferred past candidates. */
3222 convert_mult_to_fma (gimple
*mul_stmt
, tree op1
, tree op2
,
3223 fma_deferring_state
*state
, tree mul_cond
= NULL_TREE
)
3225 tree mul_result
= gimple_get_lhs (mul_stmt
);
3226 tree type
= TREE_TYPE (mul_result
);
3227 gimple
*use_stmt
, *neguse_stmt
;
3228 use_operand_p use_p
;
3229 imm_use_iterator imm_iter
;
3231 if (FLOAT_TYPE_P (type
)
3232 && flag_fp_contract_mode
== FP_CONTRACT_OFF
)
3235 /* We don't want to do bitfield reduction ops. */
3236 if (INTEGRAL_TYPE_P (type
)
3237 && (!type_has_mode_precision_p (type
) || TYPE_OVERFLOW_TRAPS (type
)))
3240 /* If the target doesn't support it, don't generate it. We assume that
3241 if fma isn't available then fms, fnma or fnms are not either. */
3242 optimization_type opt_type
= bb_optimization_type (gimple_bb (mul_stmt
));
3243 if (!direct_internal_fn_supported_p (IFN_FMA
, type
, opt_type
))
3246 /* If the multiplication has zero uses, it is kept around probably because
3247 of -fnon-call-exceptions. Don't optimize it away in that case,
3249 if (has_zero_uses (mul_result
))
3253 = (state
->m_deferring_p
3254 && (tree_to_shwi (TYPE_SIZE (type
))
3255 <= param_avoid_fma_max_bits
));
3256 bool defer
= check_defer
;
3257 bool seen_negate_p
= false;
3258 /* Make sure that the multiplication statement becomes dead after
3259 the transformation, thus that all uses are transformed to FMAs.
3260 This means we assume that an FMA operation has the same cost
3262 FOR_EACH_IMM_USE_FAST (use_p
, imm_iter
, mul_result
)
3264 tree result
= mul_result
;
3265 bool negate_p
= false;
3267 use_stmt
= USE_STMT (use_p
);
3269 if (is_gimple_debug (use_stmt
))
3272 /* For now restrict this operations to single basic blocks. In theory
3273 we would want to support sinking the multiplication in
3279 to form a fma in the then block and sink the multiplication to the
3281 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3284 /* A negate on the multiplication leads to FNMA. */
3285 if (is_gimple_assign (use_stmt
)
3286 && gimple_assign_rhs_code (use_stmt
) == NEGATE_EXPR
)
3291 /* If (due to earlier missed optimizations) we have two
3292 negates of the same value, treat them as equivalent
3293 to a single negate with multiple uses. */
3297 result
= gimple_assign_lhs (use_stmt
);
3299 /* Make sure the negate statement becomes dead with this
3300 single transformation. */
3301 if (!single_imm_use (gimple_assign_lhs (use_stmt
),
3302 &use_p
, &neguse_stmt
))
3305 /* Make sure the multiplication isn't also used on that stmt. */
3306 FOR_EACH_PHI_OR_STMT_USE (usep
, neguse_stmt
, iter
, SSA_OP_USE
)
3307 if (USE_FROM_PTR (usep
) == mul_result
)
3311 use_stmt
= neguse_stmt
;
3312 if (gimple_bb (use_stmt
) != gimple_bb (mul_stmt
))
3315 negate_p
= seen_negate_p
= true;
3318 tree cond
, else_value
, ops
[3];
3320 if (!can_interpret_as_conditional_op_p (use_stmt
, &cond
, &code
, ops
,
3327 if (ops
[1] == result
)
3328 negate_p
= !negate_p
;
3333 /* FMA can only be formed from PLUS and MINUS. */
3337 if (mul_cond
&& cond
!= mul_cond
)
3342 if (cond
== result
|| else_value
== result
)
3344 if (!direct_internal_fn_supported_p (IFN_COND_FMA
, type
, opt_type
))
3348 /* If the subtrahend (OPS[1]) is computed by a MULT_EXPR that
3349 we'll visit later, we might be able to get a more profitable
3351 OTOH, if we don't, a negate / fma pair has likely lower latency
3352 that a mult / subtract pair. */
3353 if (code
== MINUS_EXPR
3356 && !direct_internal_fn_supported_p (IFN_FMS
, type
, opt_type
)
3357 && direct_internal_fn_supported_p (IFN_FNMA
, type
, opt_type
)
3358 && TREE_CODE (ops
[1]) == SSA_NAME
3359 && has_single_use (ops
[1]))
3361 gimple
*stmt2
= SSA_NAME_DEF_STMT (ops
[1]);
3362 if (is_gimple_assign (stmt2
)
3363 && gimple_assign_rhs_code (stmt2
) == MULT_EXPR
)
3367 /* We can't handle a * b + a * b. */
3368 if (ops
[0] == ops
[1])
3370 /* If deferring, make sure we are not looking at an instruction that
3371 wouldn't have existed if we were not. */
3372 if (state
->m_deferring_p
3373 && (state
->m_mul_result_set
.contains (ops
[0])
3374 || state
->m_mul_result_set
.contains (ops
[1])))
3379 tree use_lhs
= gimple_get_lhs (use_stmt
);
3380 if (state
->m_last_result
)
3382 if (ops
[1] == state
->m_last_result
3383 || ops
[0] == state
->m_last_result
)
3390 gcc_checking_assert (!state
->m_initial_phi
);
3392 if (ops
[0] == result
)
3393 phi
= result_of_phi (ops
[1]);
3396 gcc_assert (ops
[1] == result
);
3397 phi
= result_of_phi (ops
[0]);
3402 state
->m_initial_phi
= phi
;
3409 state
->m_last_result
= use_lhs
;
3410 check_defer
= false;
3415 /* While it is possible to validate whether or not the exact form that
3416 we've recognized is available in the backend, the assumption is that
3417 if the deferring logic above did not trigger, the transformation is
3418 never a loss. For instance, suppose the target only has the plain FMA
3419 pattern available. Consider a*b-c -> fma(a,b,-c): we've exchanged
3420 MUL+SUB for FMA+NEG, which is still two operations. Consider
3421 -(a*b)-c -> fma(-a,b,-c): we still have 3 operations, but in the FMA
3422 form the two NEGs are independent and could be run in parallel. */
3427 fma_transformation_info fti
;
3428 fti
.mul_stmt
= mul_stmt
;
3429 fti
.mul_result
= mul_result
;
3432 state
->m_candidates
.safe_push (fti
);
3433 state
->m_mul_result_set
.add (mul_result
);
3435 if (dump_file
&& (dump_flags
& TDF_DETAILS
))
3437 fprintf (dump_file
, "Deferred generating FMA for multiplication ");
3438 print_gimple_stmt (dump_file
, mul_stmt
, 0, TDF_NONE
);
3439 fprintf (dump_file
, "\n");
3446 if (state
->m_deferring_p
)
3447 cancel_fma_deferring (state
);
3448 convert_mult_to_fma_1 (mul_result
, op1
, op2
);
3454 /* Helper function of match_uaddsub_overflow. Return 1
3455 if USE_STMT is unsigned overflow check ovf != 0 for
3456 STMT, -1 if USE_STMT is unsigned overflow check ovf == 0
3460 uaddsub_overflow_check_p (gimple
*stmt
, gimple
*use_stmt
, tree maxval
)
3462 enum tree_code ccode
= ERROR_MARK
;
3463 tree crhs1
= NULL_TREE
, crhs2
= NULL_TREE
;
3464 if (gimple_code (use_stmt
) == GIMPLE_COND
)
3466 ccode
= gimple_cond_code (use_stmt
);
3467 crhs1
= gimple_cond_lhs (use_stmt
);
3468 crhs2
= gimple_cond_rhs (use_stmt
);
3470 else if (is_gimple_assign (use_stmt
))
3472 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
3474 ccode
= gimple_assign_rhs_code (use_stmt
);
3475 crhs1
= gimple_assign_rhs1 (use_stmt
);
3476 crhs2
= gimple_assign_rhs2 (use_stmt
);
3478 else if (gimple_assign_rhs_code (use_stmt
) == COND_EXPR
)
3480 tree cond
= gimple_assign_rhs1 (use_stmt
);
3481 if (COMPARISON_CLASS_P (cond
))
3483 ccode
= TREE_CODE (cond
);
3484 crhs1
= TREE_OPERAND (cond
, 0);
3485 crhs2
= TREE_OPERAND (cond
, 1);
3496 if (TREE_CODE_CLASS (ccode
) != tcc_comparison
)
3499 enum tree_code code
= gimple_assign_rhs_code (stmt
);
3500 tree lhs
= gimple_assign_lhs (stmt
);
3501 tree rhs1
= gimple_assign_rhs1 (stmt
);
3502 tree rhs2
= gimple_assign_rhs2 (stmt
);
3510 /* r = a + b; r > maxval or r <= maxval */
3512 && TREE_CODE (crhs2
) == INTEGER_CST
3513 && tree_int_cst_equal (crhs2
, maxval
))
3514 return ccode
== GT_EXPR
? 1 : -1;
3517 /* r = a - b; r > a or r <= a
3518 r = a + b; a > r or a <= r or b > r or b <= r. */
3519 if ((code
== MINUS_EXPR
&& crhs1
== lhs
&& crhs2
== rhs1
)
3520 || (code
== PLUS_EXPR
&& (crhs1
== rhs1
|| crhs1
== rhs2
)
3522 return ccode
== GT_EXPR
? 1 : -1;
3528 /* r = a - b; a < r or a >= r
3529 r = a + b; r < a or r >= a or r < b or r >= b. */
3530 if ((code
== MINUS_EXPR
&& crhs1
== rhs1
&& crhs2
== lhs
)
3531 || (code
== PLUS_EXPR
&& crhs1
== lhs
3532 && (crhs2
== rhs1
|| crhs2
== rhs2
)))
3533 return ccode
== LT_EXPR
? 1 : -1;
3541 /* Recognize for unsigned x
3544 where there are other uses of x and replace it with
3545 _7 = SUB_OVERFLOW (y, z);
3546 x = REALPART_EXPR <_7>;
3547 _8 = IMAGPART_EXPR <_7>;
3549 and similarly for addition.
3556 where y and z have unsigned types with maximum max
3557 and there are other uses of x and all of those cast x
3558 back to that unsigned type and again replace it with
3559 _7 = ADD_OVERFLOW (y, z);
3560 _9 = REALPART_EXPR <_7>;
3561 _8 = IMAGPART_EXPR <_8>;
3563 and replace (utype) x with _9. */
3566 match_uaddsub_overflow (gimple_stmt_iterator
*gsi
, gimple
*stmt
,
3567 enum tree_code code
)
3569 tree lhs
= gimple_assign_lhs (stmt
);
3570 tree type
= TREE_TYPE (lhs
);
3571 use_operand_p use_p
;
3572 imm_use_iterator iter
;
3573 bool use_seen
= false;
3574 bool ovf_use_seen
= false;
3576 gimple
*add_stmt
= NULL
;
3577 bool add_first
= false;
3579 gcc_checking_assert (code
== PLUS_EXPR
|| code
== MINUS_EXPR
);
3580 if (!INTEGRAL_TYPE_P (type
)
3581 || !TYPE_UNSIGNED (type
)
3582 || has_zero_uses (lhs
)
3583 || (code
== MINUS_EXPR
3584 && optab_handler (usubv4_optab
,
3585 TYPE_MODE (type
)) == CODE_FOR_nothing
))
3588 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
3590 use_stmt
= USE_STMT (use_p
);
3591 if (is_gimple_debug (use_stmt
))
3594 if (uaddsub_overflow_check_p (stmt
, use_stmt
, NULL_TREE
))
3595 ovf_use_seen
= true;
3598 if (ovf_use_seen
&& use_seen
)
3602 tree rhs1
= gimple_assign_rhs1 (stmt
);
3603 tree rhs2
= gimple_assign_rhs2 (stmt
);
3604 tree maxval
= NULL_TREE
;
3607 || (code
== PLUS_EXPR
3608 && optab_handler (uaddv4_optab
,
3609 TYPE_MODE (type
)) == CODE_FOR_nothing
))
3611 if (code
!= PLUS_EXPR
)
3613 if (TREE_CODE (rhs1
) != SSA_NAME
3614 || !gimple_assign_cast_p (SSA_NAME_DEF_STMT (rhs1
)))
3616 rhs1
= gimple_assign_rhs1 (SSA_NAME_DEF_STMT (rhs1
));
3617 tree type1
= TREE_TYPE (rhs1
);
3618 if (!INTEGRAL_TYPE_P (type1
)
3619 || !TYPE_UNSIGNED (type1
)
3620 || TYPE_PRECISION (type1
) >= TYPE_PRECISION (type
)
3621 || (TYPE_PRECISION (type1
)
3622 != GET_MODE_BITSIZE (SCALAR_INT_TYPE_MODE (type1
))))
3624 if (TREE_CODE (rhs2
) == INTEGER_CST
)
3626 if (wi::ne_p (wi::rshift (wi::to_wide (rhs2
),
3627 TYPE_PRECISION (type1
),
3630 rhs2
= fold_convert (type1
, rhs2
);
3634 if (TREE_CODE (rhs2
) != SSA_NAME
3635 || !gimple_assign_cast_p (SSA_NAME_DEF_STMT (rhs2
)))
3637 rhs2
= gimple_assign_rhs1 (SSA_NAME_DEF_STMT (rhs2
));
3638 tree type2
= TREE_TYPE (rhs2
);
3639 if (!INTEGRAL_TYPE_P (type2
)
3640 || !TYPE_UNSIGNED (type2
)
3641 || TYPE_PRECISION (type2
) >= TYPE_PRECISION (type
)
3642 || (TYPE_PRECISION (type2
)
3643 != GET_MODE_BITSIZE (SCALAR_INT_TYPE_MODE (type2
))))
3646 if (TYPE_PRECISION (type1
) >= TYPE_PRECISION (TREE_TYPE (rhs2
)))
3649 type
= TREE_TYPE (rhs2
);
3651 if (TREE_CODE (type
) != INTEGER_TYPE
3652 || optab_handler (uaddv4_optab
,
3653 TYPE_MODE (type
)) == CODE_FOR_nothing
)
3656 maxval
= wide_int_to_tree (type
, wi::max_value (TYPE_PRECISION (type
),
3658 ovf_use_seen
= false;
3660 basic_block use_bb
= NULL
;
3661 FOR_EACH_IMM_USE_FAST (use_p
, iter
, lhs
)
3663 use_stmt
= USE_STMT (use_p
);
3664 if (is_gimple_debug (use_stmt
))
3667 if (uaddsub_overflow_check_p (stmt
, use_stmt
, maxval
))
3669 ovf_use_seen
= true;
3670 use_bb
= gimple_bb (use_stmt
);
3674 if (!gimple_assign_cast_p (use_stmt
)
3675 || gimple_assign_rhs_code (use_stmt
) == VIEW_CONVERT_EXPR
)
3677 tree use_lhs
= gimple_assign_lhs (use_stmt
);
3678 if (!INTEGRAL_TYPE_P (TREE_TYPE (use_lhs
))
3679 || (TYPE_PRECISION (TREE_TYPE (use_lhs
))
3680 > TYPE_PRECISION (type
)))
3687 if (!useless_type_conversion_p (type
, TREE_TYPE (rhs1
)))
3691 tree new_rhs1
= make_ssa_name (type
);
3692 gimple
*g
= gimple_build_assign (new_rhs1
, NOP_EXPR
, rhs1
);
3693 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
3696 else if (!useless_type_conversion_p (type
, TREE_TYPE (rhs2
)))
3700 tree new_rhs2
= make_ssa_name (type
);
3701 gimple
*g
= gimple_build_assign (new_rhs2
, NOP_EXPR
, rhs2
);
3702 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
3707 /* If there are no uses of the wider addition, check if
3708 forwprop has not created a narrower addition.
3709 Require it to be in the same bb as the overflow check. */
3710 FOR_EACH_IMM_USE_FAST (use_p
, iter
, rhs1
)
3712 use_stmt
= USE_STMT (use_p
);
3713 if (is_gimple_debug (use_stmt
))
3716 if (use_stmt
== stmt
)
3719 if (!is_gimple_assign (use_stmt
)
3720 || gimple_bb (use_stmt
) != use_bb
3721 || gimple_assign_rhs_code (use_stmt
) != PLUS_EXPR
)
3724 if (gimple_assign_rhs1 (use_stmt
) == rhs1
)
3726 if (!operand_equal_p (gimple_assign_rhs2 (use_stmt
),
3730 else if (gimple_assign_rhs2 (use_stmt
) == rhs1
)
3732 if (gimple_assign_rhs1 (use_stmt
) != rhs2
)
3738 add_stmt
= use_stmt
;
3741 if (add_stmt
== NULL
)
3744 /* If stmt and add_stmt are in the same bb, we need to find out
3745 which one is earlier. If they are in different bbs, we've
3746 checked add_stmt is in the same bb as one of the uses of the
3747 stmt lhs, so stmt needs to dominate add_stmt too. */
3748 if (gimple_bb (stmt
) == gimple_bb (add_stmt
))
3750 gimple_stmt_iterator gsif
= *gsi
;
3751 gimple_stmt_iterator gsib
= *gsi
;
3753 /* Search both forward and backward from stmt and have a small
3755 for (i
= 0; i
< 128; i
++)
3757 if (!gsi_end_p (gsib
))
3759 gsi_prev_nondebug (&gsib
);
3760 if (gsi_stmt (gsib
) == add_stmt
)
3766 else if (gsi_end_p (gsif
))
3768 if (!gsi_end_p (gsif
))
3770 gsi_next_nondebug (&gsif
);
3771 if (gsi_stmt (gsif
) == add_stmt
)
3778 *gsi
= gsi_for_stmt (add_stmt
);
3783 tree ctype
= build_complex_type (type
);
3784 gcall
*g
= gimple_build_call_internal (code
== PLUS_EXPR
3785 ? IFN_ADD_OVERFLOW
: IFN_SUB_OVERFLOW
,
3787 tree ctmp
= make_ssa_name (ctype
);
3788 gimple_call_set_lhs (g
, ctmp
);
3789 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
3790 tree new_lhs
= maxval
? make_ssa_name (type
) : lhs
;
3791 gassign
*g2
= gimple_build_assign (new_lhs
, REALPART_EXPR
,
3792 build1 (REALPART_EXPR
, type
, ctmp
));
3795 gsi_insert_before (gsi
, g2
, GSI_SAME_STMT
);
3797 *gsi
= gsi_for_stmt (stmt
);
3800 gsi_replace (gsi
, g2
, true);
3801 tree ovf
= make_ssa_name (type
);
3802 g2
= gimple_build_assign (ovf
, IMAGPART_EXPR
,
3803 build1 (IMAGPART_EXPR
, type
, ctmp
));
3804 gsi_insert_after (gsi
, g2
, GSI_NEW_STMT
);
3806 FOR_EACH_IMM_USE_STMT (use_stmt
, iter
, lhs
)
3808 if (is_gimple_debug (use_stmt
))
3811 int ovf_use
= uaddsub_overflow_check_p (stmt
, use_stmt
, maxval
);
3816 tree use_lhs
= gimple_assign_lhs (use_stmt
);
3817 gimple_assign_set_rhs1 (use_stmt
, new_lhs
);
3818 if (useless_type_conversion_p (TREE_TYPE (use_lhs
),
3819 TREE_TYPE (new_lhs
)))
3820 gimple_assign_set_rhs_code (use_stmt
, SSA_NAME
);
3821 update_stmt (use_stmt
);
3825 if (gimple_code (use_stmt
) == GIMPLE_COND
)
3827 gcond
*cond_stmt
= as_a
<gcond
*> (use_stmt
);
3828 gimple_cond_set_lhs (cond_stmt
, ovf
);
3829 gimple_cond_set_rhs (cond_stmt
, build_int_cst (type
, 0));
3830 gimple_cond_set_code (cond_stmt
, ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
3834 gcc_checking_assert (is_gimple_assign (use_stmt
));
3835 if (gimple_assign_rhs_class (use_stmt
) == GIMPLE_BINARY_RHS
)
3837 gimple_assign_set_rhs1 (use_stmt
, ovf
);
3838 gimple_assign_set_rhs2 (use_stmt
, build_int_cst (type
, 0));
3839 gimple_assign_set_rhs_code (use_stmt
,
3840 ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
);
3844 gcc_checking_assert (gimple_assign_rhs_code (use_stmt
)
3846 tree cond
= build2 (ovf_use
== 1 ? NE_EXPR
: EQ_EXPR
,
3847 boolean_type_node
, ovf
,
3848 build_int_cst (type
, 0));
3849 gimple_assign_set_rhs1 (use_stmt
, cond
);
3852 update_stmt (use_stmt
);
3856 gimple_stmt_iterator gsi2
= gsi_for_stmt (stmt
);
3857 gsi_remove (&gsi2
, true);
3860 gimple
*g
= gimple_build_assign (gimple_assign_lhs (add_stmt
),
3862 gsi2
= gsi_for_stmt (add_stmt
);
3863 gsi_replace (&gsi2
, g
, true);
3869 /* Return true if target has support for divmod. */
3872 target_supports_divmod_p (optab divmod_optab
, optab div_optab
, machine_mode mode
)
3874 /* If target supports hardware divmod insn, use it for divmod. */
3875 if (optab_handler (divmod_optab
, mode
) != CODE_FOR_nothing
)
3878 /* Check if libfunc for divmod is available. */
3879 rtx libfunc
= optab_libfunc (divmod_optab
, mode
);
3880 if (libfunc
!= NULL_RTX
)
3882 /* If optab_handler exists for div_optab, perhaps in a wider mode,
3883 we don't want to use the libfunc even if it exists for given mode. */
3884 machine_mode div_mode
;
3885 FOR_EACH_MODE_FROM (div_mode
, mode
)
3886 if (optab_handler (div_optab
, div_mode
) != CODE_FOR_nothing
)
3889 return targetm
.expand_divmod_libfunc
!= NULL
;
3895 /* Check if stmt is candidate for divmod transform. */
3898 divmod_candidate_p (gassign
*stmt
)
3900 tree type
= TREE_TYPE (gimple_assign_lhs (stmt
));
3901 machine_mode mode
= TYPE_MODE (type
);
3902 optab divmod_optab
, div_optab
;
3904 if (TYPE_UNSIGNED (type
))
3906 divmod_optab
= udivmod_optab
;
3907 div_optab
= udiv_optab
;
3911 divmod_optab
= sdivmod_optab
;
3912 div_optab
= sdiv_optab
;
3915 tree op1
= gimple_assign_rhs1 (stmt
);
3916 tree op2
= gimple_assign_rhs2 (stmt
);
3918 /* Disable the transform if either is a constant, since division-by-constant
3919 may have specialized expansion. */
3920 if (CONSTANT_CLASS_P (op1
))
3923 if (CONSTANT_CLASS_P (op2
))
3925 if (integer_pow2p (op2
))
3928 if (TYPE_PRECISION (type
) <= HOST_BITS_PER_WIDE_INT
3929 && TYPE_PRECISION (type
) <= BITS_PER_WORD
)
3932 /* If the divisor is not power of 2 and the precision wider than
3933 HWI, expand_divmod punts on that, so in that case it is better
3934 to use divmod optab or libfunc. Similarly if choose_multiplier
3935 might need pre/post shifts of BITS_PER_WORD or more. */
3938 /* Exclude the case where TYPE_OVERFLOW_TRAPS (type) as that should
3939 expand using the [su]divv optabs. */
3940 if (TYPE_OVERFLOW_TRAPS (type
))
3943 if (!target_supports_divmod_p (divmod_optab
, div_optab
, mode
))
3949 /* This function looks for:
3950 t1 = a TRUNC_DIV_EXPR b;
3951 t2 = a TRUNC_MOD_EXPR b;
3952 and transforms it to the following sequence:
3953 complex_tmp = DIVMOD (a, b);
3954 t1 = REALPART_EXPR(a);
3955 t2 = IMAGPART_EXPR(b);
3956 For conditions enabling the transform see divmod_candidate_p().
3958 The pass has three parts:
3959 1) Find top_stmt which is trunc_div or trunc_mod stmt and dominates all
3960 other trunc_div_expr and trunc_mod_expr stmts.
3961 2) Add top_stmt and all trunc_div and trunc_mod stmts dominated by top_stmt
3963 3) Insert DIVMOD call just before top_stmt and update entries in
3964 stmts vector to use return value of DIMOVD (REALEXPR_PART for div,
3965 IMAGPART_EXPR for mod). */
3968 convert_to_divmod (gassign
*stmt
)
3970 if (stmt_can_throw_internal (cfun
, stmt
)
3971 || !divmod_candidate_p (stmt
))
3974 tree op1
= gimple_assign_rhs1 (stmt
);
3975 tree op2
= gimple_assign_rhs2 (stmt
);
3977 imm_use_iterator use_iter
;
3979 auto_vec
<gimple
*> stmts
;
3981 gimple
*top_stmt
= stmt
;
3982 basic_block top_bb
= gimple_bb (stmt
);
3984 /* Part 1: Try to set top_stmt to "topmost" stmt that dominates
3985 at-least stmt and possibly other trunc_div/trunc_mod stmts
3986 having same operands as stmt. */
3988 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, op1
)
3990 if (is_gimple_assign (use_stmt
)
3991 && (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
3992 || gimple_assign_rhs_code (use_stmt
) == TRUNC_MOD_EXPR
)
3993 && operand_equal_p (op1
, gimple_assign_rhs1 (use_stmt
), 0)
3994 && operand_equal_p (op2
, gimple_assign_rhs2 (use_stmt
), 0))
3996 if (stmt_can_throw_internal (cfun
, use_stmt
))
3999 basic_block bb
= gimple_bb (use_stmt
);
4003 if (gimple_uid (use_stmt
) < gimple_uid (top_stmt
))
4004 top_stmt
= use_stmt
;
4006 else if (dominated_by_p (CDI_DOMINATORS
, top_bb
, bb
))
4009 top_stmt
= use_stmt
;
4014 tree top_op1
= gimple_assign_rhs1 (top_stmt
);
4015 tree top_op2
= gimple_assign_rhs2 (top_stmt
);
4017 stmts
.safe_push (top_stmt
);
4018 bool div_seen
= (gimple_assign_rhs_code (top_stmt
) == TRUNC_DIV_EXPR
);
4020 /* Part 2: Add all trunc_div/trunc_mod statements domianted by top_bb
4021 to stmts vector. The 2nd loop will always add stmt to stmts vector, since
4022 gimple_bb (top_stmt) dominates gimple_bb (stmt), so the
4023 2nd loop ends up adding at-least single trunc_mod_expr stmt. */
4025 FOR_EACH_IMM_USE_STMT (use_stmt
, use_iter
, top_op1
)
4027 if (is_gimple_assign (use_stmt
)
4028 && (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
4029 || gimple_assign_rhs_code (use_stmt
) == TRUNC_MOD_EXPR
)
4030 && operand_equal_p (top_op1
, gimple_assign_rhs1 (use_stmt
), 0)
4031 && operand_equal_p (top_op2
, gimple_assign_rhs2 (use_stmt
), 0))
4033 if (use_stmt
== top_stmt
4034 || stmt_can_throw_internal (cfun
, use_stmt
)
4035 || !dominated_by_p (CDI_DOMINATORS
, gimple_bb (use_stmt
), top_bb
))
4038 stmts
.safe_push (use_stmt
);
4039 if (gimple_assign_rhs_code (use_stmt
) == TRUNC_DIV_EXPR
)
4047 /* Part 3: Create libcall to internal fn DIVMOD:
4048 divmod_tmp = DIVMOD (op1, op2). */
4050 gcall
*call_stmt
= gimple_build_call_internal (IFN_DIVMOD
, 2, op1
, op2
);
4051 tree res
= make_temp_ssa_name (build_complex_type (TREE_TYPE (op1
)),
4052 call_stmt
, "divmod_tmp");
4053 gimple_call_set_lhs (call_stmt
, res
);
4054 /* We rejected throwing statements above. */
4055 gimple_call_set_nothrow (call_stmt
, true);
4057 /* Insert the call before top_stmt. */
4058 gimple_stmt_iterator top_stmt_gsi
= gsi_for_stmt (top_stmt
);
4059 gsi_insert_before (&top_stmt_gsi
, call_stmt
, GSI_SAME_STMT
);
4061 widen_mul_stats
.divmod_calls_inserted
++;
4063 /* Update all statements in stmts vector:
4064 lhs = op1 TRUNC_DIV_EXPR op2 -> lhs = REALPART_EXPR<divmod_tmp>
4065 lhs = op1 TRUNC_MOD_EXPR op2 -> lhs = IMAGPART_EXPR<divmod_tmp>. */
4067 for (unsigned i
= 0; stmts
.iterate (i
, &use_stmt
); ++i
)
4071 switch (gimple_assign_rhs_code (use_stmt
))
4073 case TRUNC_DIV_EXPR
:
4074 new_rhs
= fold_build1 (REALPART_EXPR
, TREE_TYPE (op1
), res
);
4077 case TRUNC_MOD_EXPR
:
4078 new_rhs
= fold_build1 (IMAGPART_EXPR
, TREE_TYPE (op1
), res
);
4085 gimple_stmt_iterator gsi
= gsi_for_stmt (use_stmt
);
4086 gimple_assign_set_rhs_from_tree (&gsi
, new_rhs
);
4087 update_stmt (use_stmt
);
4093 /* Find integer multiplications where the operands are extended from
4094 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
4095 where appropriate. */
4099 const pass_data pass_data_optimize_widening_mul
=
4101 GIMPLE_PASS
, /* type */
4102 "widening_mul", /* name */
4103 OPTGROUP_NONE
, /* optinfo_flags */
4104 TV_TREE_WIDEN_MUL
, /* tv_id */
4105 PROP_ssa
, /* properties_required */
4106 0, /* properties_provided */
4107 0, /* properties_destroyed */
4108 0, /* todo_flags_start */
4109 TODO_update_ssa
, /* todo_flags_finish */
4112 class pass_optimize_widening_mul
: public gimple_opt_pass
4115 pass_optimize_widening_mul (gcc::context
*ctxt
)
4116 : gimple_opt_pass (pass_data_optimize_widening_mul
, ctxt
)
4119 /* opt_pass methods: */
4120 virtual bool gate (function
*)
4122 return flag_expensive_optimizations
&& optimize
;
4125 virtual unsigned int execute (function
*);
4127 }; // class pass_optimize_widening_mul
4129 /* Walker class to perform the transformation in reverse dominance order. */
4131 class math_opts_dom_walker
: public dom_walker
4134 /* Constructor, CFG_CHANGED is a pointer to a boolean flag that will be set
4135 if walking modidifes the CFG. */
4137 math_opts_dom_walker (bool *cfg_changed_p
)
4138 : dom_walker (CDI_DOMINATORS
), m_last_result_set (),
4139 m_cfg_changed_p (cfg_changed_p
) {}
4141 /* The actual actions performed in the walk. */
4143 virtual void after_dom_children (basic_block
);
4145 /* Set of results of chains of multiply and add statement combinations that
4146 were not transformed into FMAs because of active deferring. */
4147 hash_set
<tree
> m_last_result_set
;
4149 /* Pointer to a flag of the user that needs to be set if CFG has been
4151 bool *m_cfg_changed_p
;
4155 math_opts_dom_walker::after_dom_children (basic_block bb
)
4157 gimple_stmt_iterator gsi
;
4159 fma_deferring_state
fma_state (param_avoid_fma_max_bits
> 0);
4161 for (gsi
= gsi_after_labels (bb
); !gsi_end_p (gsi
);)
4163 gimple
*stmt
= gsi_stmt (gsi
);
4164 enum tree_code code
;
4166 if (is_gimple_assign (stmt
))
4168 code
= gimple_assign_rhs_code (stmt
);
4172 if (!convert_mult_to_widen (stmt
, &gsi
)
4173 && !convert_expand_mult_copysign (stmt
, &gsi
)
4174 && convert_mult_to_fma (stmt
,
4175 gimple_assign_rhs1 (stmt
),
4176 gimple_assign_rhs2 (stmt
),
4179 gsi_remove (&gsi
, true);
4180 release_defs (stmt
);
4187 if (!convert_plusminus_to_widen (&gsi
, stmt
, code
))
4188 match_uaddsub_overflow (&gsi
, stmt
, code
);
4191 case TRUNC_MOD_EXPR
:
4192 convert_to_divmod (as_a
<gassign
*> (stmt
));
4198 else if (is_gimple_call (stmt
))
4200 switch (gimple_call_combined_fn (stmt
))
4203 if (gimple_call_lhs (stmt
)
4204 && TREE_CODE (gimple_call_arg (stmt
, 1)) == REAL_CST
4205 && real_equal (&TREE_REAL_CST (gimple_call_arg (stmt
, 1)),
4207 && convert_mult_to_fma (stmt
,
4208 gimple_call_arg (stmt
, 0),
4209 gimple_call_arg (stmt
, 0),
4212 unlink_stmt_vdef (stmt
);
4213 if (gsi_remove (&gsi
, true)
4214 && gimple_purge_dead_eh_edges (bb
))
4215 *m_cfg_changed_p
= true;
4216 release_defs (stmt
);
4222 if (convert_mult_to_fma (stmt
,
4223 gimple_call_arg (stmt
, 1),
4224 gimple_call_arg (stmt
, 2),
4226 gimple_call_arg (stmt
, 0)))
4229 gsi_remove (&gsi
, true);
4230 release_defs (stmt
);
4236 cancel_fma_deferring (&fma_state
);
4245 if (fma_state
.m_deferring_p
4246 && fma_state
.m_initial_phi
)
4248 gcc_checking_assert (fma_state
.m_last_result
);
4249 if (!last_fma_candidate_feeds_initial_phi (&fma_state
,
4250 &m_last_result_set
))
4251 cancel_fma_deferring (&fma_state
);
4253 m_last_result_set
.add (fma_state
.m_last_result
);
4259 pass_optimize_widening_mul::execute (function
*fun
)
4261 bool cfg_changed
= false;
4263 memset (&widen_mul_stats
, 0, sizeof (widen_mul_stats
));
4264 calculate_dominance_info (CDI_DOMINATORS
);
4265 renumber_gimple_stmt_uids (cfun
);
4267 math_opts_dom_walker (&cfg_changed
).walk (ENTRY_BLOCK_PTR_FOR_FN (cfun
));
4269 statistics_counter_event (fun
, "widening multiplications inserted",
4270 widen_mul_stats
.widen_mults_inserted
);
4271 statistics_counter_event (fun
, "widening maccs inserted",
4272 widen_mul_stats
.maccs_inserted
);
4273 statistics_counter_event (fun
, "fused multiply-adds inserted",
4274 widen_mul_stats
.fmas_inserted
);
4275 statistics_counter_event (fun
, "divmod calls inserted",
4276 widen_mul_stats
.divmod_calls_inserted
);
4278 return cfg_changed
? TODO_cleanup_cfg
: 0;
4284 make_pass_optimize_widening_mul (gcc::context
*ctxt
)
4286 return new pass_optimize_widening_mul (ctxt
);