re PR tree-optimization/55524 (If fnma exists but not fms, convert_mult_to_fma should...
[gcc.git] / gcc / tree-ssa-math-opts.c
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2013 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
22
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
27
28 that can be optimized to
29
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
35
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
38
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
42
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
50
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
56
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
60
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
68
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
75
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
79
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
86
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "tm.h"
91 #include "flags.h"
92 #include "tree.h"
93 #include "tree-flow.h"
94 #include "tree-pass.h"
95 #include "alloc-pool.h"
96 #include "basic-block.h"
97 #include "target.h"
98 #include "gimple-pretty-print.h"
99
100 /* FIXME: RTL headers have to be included here for optabs. */
101 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
102 #include "expr.h" /* Because optabs.h wants sepops. */
103 #include "optabs.h"
104
105 /* This structure represents one basic block that either computes a
106 division, or is a common dominator for basic block that compute a
107 division. */
108 struct occurrence {
109 /* The basic block represented by this structure. */
110 basic_block bb;
111
112 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
113 inserted in BB. */
114 tree recip_def;
115
116 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
117 was inserted in BB. */
118 gimple recip_def_stmt;
119
120 /* Pointer to a list of "struct occurrence"s for blocks dominated
121 by BB. */
122 struct occurrence *children;
123
124 /* Pointer to the next "struct occurrence"s in the list of blocks
125 sharing a common dominator. */
126 struct occurrence *next;
127
128 /* The number of divisions that are in BB before compute_merit. The
129 number of divisions that are in BB or post-dominate it after
130 compute_merit. */
131 int num_divisions;
132
133 /* True if the basic block has a division, false if it is a common
134 dominator for basic blocks that do. If it is false and trapping
135 math is active, BB is not a candidate for inserting a reciprocal. */
136 bool bb_has_division;
137 };
138
139 static struct
140 {
141 /* Number of 1.0/X ops inserted. */
142 int rdivs_inserted;
143
144 /* Number of 1.0/FUNC ops inserted. */
145 int rfuncs_inserted;
146 } reciprocal_stats;
147
148 static struct
149 {
150 /* Number of cexpi calls inserted. */
151 int inserted;
152 } sincos_stats;
153
154 static struct
155 {
156 /* Number of hand-written 16-bit bswaps found. */
157 int found_16bit;
158
159 /* Number of hand-written 32-bit bswaps found. */
160 int found_32bit;
161
162 /* Number of hand-written 64-bit bswaps found. */
163 int found_64bit;
164 } bswap_stats;
165
166 static struct
167 {
168 /* Number of widening multiplication ops inserted. */
169 int widen_mults_inserted;
170
171 /* Number of integer multiply-and-accumulate ops inserted. */
172 int maccs_inserted;
173
174 /* Number of fp fused multiply-add ops inserted. */
175 int fmas_inserted;
176 } widen_mul_stats;
177
178 /* The instance of "struct occurrence" representing the highest
179 interesting block in the dominator tree. */
180 static struct occurrence *occ_head;
181
182 /* Allocation pool for getting instances of "struct occurrence". */
183 static alloc_pool occ_pool;
184
185
186
187 /* Allocate and return a new struct occurrence for basic block BB, and
188 whose children list is headed by CHILDREN. */
189 static struct occurrence *
190 occ_new (basic_block bb, struct occurrence *children)
191 {
192 struct occurrence *occ;
193
194 bb->aux = occ = (struct occurrence *) pool_alloc (occ_pool);
195 memset (occ, 0, sizeof (struct occurrence));
196
197 occ->bb = bb;
198 occ->children = children;
199 return occ;
200 }
201
202
203 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
204 list of "struct occurrence"s, one per basic block, having IDOM as
205 their common dominator.
206
207 We try to insert NEW_OCC as deep as possible in the tree, and we also
208 insert any other block that is a common dominator for BB and one
209 block already in the tree. */
210
211 static void
212 insert_bb (struct occurrence *new_occ, basic_block idom,
213 struct occurrence **p_head)
214 {
215 struct occurrence *occ, **p_occ;
216
217 for (p_occ = p_head; (occ = *p_occ) != NULL; )
218 {
219 basic_block bb = new_occ->bb, occ_bb = occ->bb;
220 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
221 if (dom == bb)
222 {
223 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
224 from its list. */
225 *p_occ = occ->next;
226 occ->next = new_occ->children;
227 new_occ->children = occ;
228
229 /* Try the next block (it may as well be dominated by BB). */
230 }
231
232 else if (dom == occ_bb)
233 {
234 /* OCC_BB dominates BB. Tail recurse to look deeper. */
235 insert_bb (new_occ, dom, &occ->children);
236 return;
237 }
238
239 else if (dom != idom)
240 {
241 gcc_assert (!dom->aux);
242
243 /* There is a dominator between IDOM and BB, add it and make
244 two children out of NEW_OCC and OCC. First, remove OCC from
245 its list. */
246 *p_occ = occ->next;
247 new_occ->next = occ;
248 occ->next = NULL;
249
250 /* None of the previous blocks has DOM as a dominator: if we tail
251 recursed, we would reexamine them uselessly. Just switch BB with
252 DOM, and go on looking for blocks dominated by DOM. */
253 new_occ = occ_new (dom, new_occ);
254 }
255
256 else
257 {
258 /* Nothing special, go on with the next element. */
259 p_occ = &occ->next;
260 }
261 }
262
263 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
264 new_occ->next = *p_head;
265 *p_head = new_occ;
266 }
267
268 /* Register that we found a division in BB. */
269
270 static inline void
271 register_division_in (basic_block bb)
272 {
273 struct occurrence *occ;
274
275 occ = (struct occurrence *) bb->aux;
276 if (!occ)
277 {
278 occ = occ_new (bb, NULL);
279 insert_bb (occ, ENTRY_BLOCK_PTR, &occ_head);
280 }
281
282 occ->bb_has_division = true;
283 occ->num_divisions++;
284 }
285
286
287 /* Compute the number of divisions that postdominate each block in OCC and
288 its children. */
289
290 static void
291 compute_merit (struct occurrence *occ)
292 {
293 struct occurrence *occ_child;
294 basic_block dom = occ->bb;
295
296 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
297 {
298 basic_block bb;
299 if (occ_child->children)
300 compute_merit (occ_child);
301
302 if (flag_exceptions)
303 bb = single_noncomplex_succ (dom);
304 else
305 bb = dom;
306
307 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
308 occ->num_divisions += occ_child->num_divisions;
309 }
310 }
311
312
313 /* Return whether USE_STMT is a floating-point division by DEF. */
314 static inline bool
315 is_division_by (gimple use_stmt, tree def)
316 {
317 return is_gimple_assign (use_stmt)
318 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
319 && gimple_assign_rhs2 (use_stmt) == def
320 /* Do not recognize x / x as valid division, as we are getting
321 confused later by replacing all immediate uses x in such
322 a stmt. */
323 && gimple_assign_rhs1 (use_stmt) != def;
324 }
325
326 /* Walk the subset of the dominator tree rooted at OCC, setting the
327 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
328 the given basic block. The field may be left NULL, of course,
329 if it is not possible or profitable to do the optimization.
330
331 DEF_BSI is an iterator pointing at the statement defining DEF.
332 If RECIP_DEF is set, a dominator already has a computation that can
333 be used. */
334
335 static void
336 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
337 tree def, tree recip_def, int threshold)
338 {
339 tree type;
340 gimple new_stmt;
341 gimple_stmt_iterator gsi;
342 struct occurrence *occ_child;
343
344 if (!recip_def
345 && (occ->bb_has_division || !flag_trapping_math)
346 && occ->num_divisions >= threshold)
347 {
348 /* Make a variable with the replacement and substitute it. */
349 type = TREE_TYPE (def);
350 recip_def = create_tmp_reg (type, "reciptmp");
351 new_stmt = gimple_build_assign_with_ops (RDIV_EXPR, recip_def,
352 build_one_cst (type), def);
353
354 if (occ->bb_has_division)
355 {
356 /* Case 1: insert before an existing division. */
357 gsi = gsi_after_labels (occ->bb);
358 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
359 gsi_next (&gsi);
360
361 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
362 }
363 else if (def_gsi && occ->bb == def_gsi->bb)
364 {
365 /* Case 2: insert right after the definition. Note that this will
366 never happen if the definition statement can throw, because in
367 that case the sole successor of the statement's basic block will
368 dominate all the uses as well. */
369 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
370 }
371 else
372 {
373 /* Case 3: insert in a basic block not containing defs/uses. */
374 gsi = gsi_after_labels (occ->bb);
375 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
376 }
377
378 reciprocal_stats.rdivs_inserted++;
379
380 occ->recip_def_stmt = new_stmt;
381 }
382
383 occ->recip_def = recip_def;
384 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
385 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
386 }
387
388
389 /* Replace the division at USE_P with a multiplication by the reciprocal, if
390 possible. */
391
392 static inline void
393 replace_reciprocal (use_operand_p use_p)
394 {
395 gimple use_stmt = USE_STMT (use_p);
396 basic_block bb = gimple_bb (use_stmt);
397 struct occurrence *occ = (struct occurrence *) bb->aux;
398
399 if (optimize_bb_for_speed_p (bb)
400 && occ->recip_def && use_stmt != occ->recip_def_stmt)
401 {
402 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
403 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
404 SET_USE (use_p, occ->recip_def);
405 fold_stmt_inplace (&gsi);
406 update_stmt (use_stmt);
407 }
408 }
409
410
411 /* Free OCC and return one more "struct occurrence" to be freed. */
412
413 static struct occurrence *
414 free_bb (struct occurrence *occ)
415 {
416 struct occurrence *child, *next;
417
418 /* First get the two pointers hanging off OCC. */
419 next = occ->next;
420 child = occ->children;
421 occ->bb->aux = NULL;
422 pool_free (occ_pool, occ);
423
424 /* Now ensure that we don't recurse unless it is necessary. */
425 if (!child)
426 return next;
427 else
428 {
429 while (next)
430 next = free_bb (next);
431
432 return child;
433 }
434 }
435
436
437 /* Look for floating-point divisions among DEF's uses, and try to
438 replace them by multiplications with the reciprocal. Add
439 as many statements computing the reciprocal as needed.
440
441 DEF must be a GIMPLE register of a floating-point type. */
442
443 static void
444 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
445 {
446 use_operand_p use_p;
447 imm_use_iterator use_iter;
448 struct occurrence *occ;
449 int count = 0, threshold;
450
451 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
452
453 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
454 {
455 gimple use_stmt = USE_STMT (use_p);
456 if (is_division_by (use_stmt, def))
457 {
458 register_division_in (gimple_bb (use_stmt));
459 count++;
460 }
461 }
462
463 /* Do the expensive part only if we can hope to optimize something. */
464 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
465 if (count >= threshold)
466 {
467 gimple use_stmt;
468 for (occ = occ_head; occ; occ = occ->next)
469 {
470 compute_merit (occ);
471 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
472 }
473
474 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
475 {
476 if (is_division_by (use_stmt, def))
477 {
478 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
479 replace_reciprocal (use_p);
480 }
481 }
482 }
483
484 for (occ = occ_head; occ; )
485 occ = free_bb (occ);
486
487 occ_head = NULL;
488 }
489
490 static bool
491 gate_cse_reciprocals (void)
492 {
493 return optimize && flag_reciprocal_math;
494 }
495
496 /* Go through all the floating-point SSA_NAMEs, and call
497 execute_cse_reciprocals_1 on each of them. */
498 static unsigned int
499 execute_cse_reciprocals (void)
500 {
501 basic_block bb;
502 tree arg;
503
504 occ_pool = create_alloc_pool ("dominators for recip",
505 sizeof (struct occurrence),
506 n_basic_blocks / 3 + 1);
507
508 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
509 calculate_dominance_info (CDI_DOMINATORS);
510 calculate_dominance_info (CDI_POST_DOMINATORS);
511
512 #ifdef ENABLE_CHECKING
513 FOR_EACH_BB (bb)
514 gcc_assert (!bb->aux);
515 #endif
516
517 for (arg = DECL_ARGUMENTS (cfun->decl); arg; arg = DECL_CHAIN (arg))
518 if (FLOAT_TYPE_P (TREE_TYPE (arg))
519 && is_gimple_reg (arg))
520 {
521 tree name = ssa_default_def (cfun, arg);
522 if (name)
523 execute_cse_reciprocals_1 (NULL, name);
524 }
525
526 FOR_EACH_BB (bb)
527 {
528 gimple_stmt_iterator gsi;
529 gimple phi;
530 tree def;
531
532 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
533 {
534 phi = gsi_stmt (gsi);
535 def = PHI_RESULT (phi);
536 if (! virtual_operand_p (def)
537 && FLOAT_TYPE_P (TREE_TYPE (def)))
538 execute_cse_reciprocals_1 (NULL, def);
539 }
540
541 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
542 {
543 gimple stmt = gsi_stmt (gsi);
544
545 if (gimple_has_lhs (stmt)
546 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
547 && FLOAT_TYPE_P (TREE_TYPE (def))
548 && TREE_CODE (def) == SSA_NAME)
549 execute_cse_reciprocals_1 (&gsi, def);
550 }
551
552 if (optimize_bb_for_size_p (bb))
553 continue;
554
555 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
556 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
557 {
558 gimple stmt = gsi_stmt (gsi);
559 tree fndecl;
560
561 if (is_gimple_assign (stmt)
562 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
563 {
564 tree arg1 = gimple_assign_rhs2 (stmt);
565 gimple stmt1;
566
567 if (TREE_CODE (arg1) != SSA_NAME)
568 continue;
569
570 stmt1 = SSA_NAME_DEF_STMT (arg1);
571
572 if (is_gimple_call (stmt1)
573 && gimple_call_lhs (stmt1)
574 && (fndecl = gimple_call_fndecl (stmt1))
575 && (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
576 || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD))
577 {
578 enum built_in_function code;
579 bool md_code, fail;
580 imm_use_iterator ui;
581 use_operand_p use_p;
582
583 code = DECL_FUNCTION_CODE (fndecl);
584 md_code = DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD;
585
586 fndecl = targetm.builtin_reciprocal (code, md_code, false);
587 if (!fndecl)
588 continue;
589
590 /* Check that all uses of the SSA name are divisions,
591 otherwise replacing the defining statement will do
592 the wrong thing. */
593 fail = false;
594 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
595 {
596 gimple stmt2 = USE_STMT (use_p);
597 if (is_gimple_debug (stmt2))
598 continue;
599 if (!is_gimple_assign (stmt2)
600 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
601 || gimple_assign_rhs1 (stmt2) == arg1
602 || gimple_assign_rhs2 (stmt2) != arg1)
603 {
604 fail = true;
605 break;
606 }
607 }
608 if (fail)
609 continue;
610
611 gimple_replace_lhs (stmt1, arg1);
612 gimple_call_set_fndecl (stmt1, fndecl);
613 update_stmt (stmt1);
614 reciprocal_stats.rfuncs_inserted++;
615
616 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
617 {
618 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
619 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
620 fold_stmt_inplace (&gsi);
621 update_stmt (stmt);
622 }
623 }
624 }
625 }
626 }
627
628 statistics_counter_event (cfun, "reciprocal divs inserted",
629 reciprocal_stats.rdivs_inserted);
630 statistics_counter_event (cfun, "reciprocal functions inserted",
631 reciprocal_stats.rfuncs_inserted);
632
633 free_dominance_info (CDI_DOMINATORS);
634 free_dominance_info (CDI_POST_DOMINATORS);
635 free_alloc_pool (occ_pool);
636 return 0;
637 }
638
639 struct gimple_opt_pass pass_cse_reciprocals =
640 {
641 {
642 GIMPLE_PASS,
643 "recip", /* name */
644 OPTGROUP_NONE, /* optinfo_flags */
645 gate_cse_reciprocals, /* gate */
646 execute_cse_reciprocals, /* execute */
647 NULL, /* sub */
648 NULL, /* next */
649 0, /* static_pass_number */
650 TV_NONE, /* tv_id */
651 PROP_ssa, /* properties_required */
652 0, /* properties_provided */
653 0, /* properties_destroyed */
654 0, /* todo_flags_start */
655 TODO_update_ssa | TODO_verify_ssa
656 | TODO_verify_stmts /* todo_flags_finish */
657 }
658 };
659
660 /* Records an occurrence at statement USE_STMT in the vector of trees
661 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
662 is not yet initialized. Returns true if the occurrence was pushed on
663 the vector. Adjusts *TOP_BB to be the basic block dominating all
664 statements in the vector. */
665
666 static bool
667 maybe_record_sincos (vec<gimple> *stmts,
668 basic_block *top_bb, gimple use_stmt)
669 {
670 basic_block use_bb = gimple_bb (use_stmt);
671 if (*top_bb
672 && (*top_bb == use_bb
673 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
674 stmts->safe_push (use_stmt);
675 else if (!*top_bb
676 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
677 {
678 stmts->safe_push (use_stmt);
679 *top_bb = use_bb;
680 }
681 else
682 return false;
683
684 return true;
685 }
686
687 /* Look for sin, cos and cexpi calls with the same argument NAME and
688 create a single call to cexpi CSEing the result in this case.
689 We first walk over all immediate uses of the argument collecting
690 statements that we can CSE in a vector and in a second pass replace
691 the statement rhs with a REALPART or IMAGPART expression on the
692 result of the cexpi call we insert before the use statement that
693 dominates all other candidates. */
694
695 static bool
696 execute_cse_sincos_1 (tree name)
697 {
698 gimple_stmt_iterator gsi;
699 imm_use_iterator use_iter;
700 tree fndecl, res, type;
701 gimple def_stmt, use_stmt, stmt;
702 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
703 vec<gimple> stmts = vNULL;
704 basic_block top_bb = NULL;
705 int i;
706 bool cfg_changed = false;
707
708 type = TREE_TYPE (name);
709 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
710 {
711 if (gimple_code (use_stmt) != GIMPLE_CALL
712 || !gimple_call_lhs (use_stmt)
713 || !(fndecl = gimple_call_fndecl (use_stmt))
714 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
715 continue;
716
717 switch (DECL_FUNCTION_CODE (fndecl))
718 {
719 CASE_FLT_FN (BUILT_IN_COS):
720 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
721 break;
722
723 CASE_FLT_FN (BUILT_IN_SIN):
724 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
725 break;
726
727 CASE_FLT_FN (BUILT_IN_CEXPI):
728 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
729 break;
730
731 default:;
732 }
733 }
734
735 if (seen_cos + seen_sin + seen_cexpi <= 1)
736 {
737 stmts.release ();
738 return false;
739 }
740
741 /* Simply insert cexpi at the beginning of top_bb but not earlier than
742 the name def statement. */
743 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
744 if (!fndecl)
745 return false;
746 stmt = gimple_build_call (fndecl, 1, name);
747 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
748 gimple_call_set_lhs (stmt, res);
749
750 def_stmt = SSA_NAME_DEF_STMT (name);
751 if (!SSA_NAME_IS_DEFAULT_DEF (name)
752 && gimple_code (def_stmt) != GIMPLE_PHI
753 && gimple_bb (def_stmt) == top_bb)
754 {
755 gsi = gsi_for_stmt (def_stmt);
756 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
757 }
758 else
759 {
760 gsi = gsi_after_labels (top_bb);
761 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
762 }
763 sincos_stats.inserted++;
764
765 /* And adjust the recorded old call sites. */
766 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
767 {
768 tree rhs = NULL;
769 fndecl = gimple_call_fndecl (use_stmt);
770
771 switch (DECL_FUNCTION_CODE (fndecl))
772 {
773 CASE_FLT_FN (BUILT_IN_COS):
774 rhs = fold_build1 (REALPART_EXPR, type, res);
775 break;
776
777 CASE_FLT_FN (BUILT_IN_SIN):
778 rhs = fold_build1 (IMAGPART_EXPR, type, res);
779 break;
780
781 CASE_FLT_FN (BUILT_IN_CEXPI):
782 rhs = res;
783 break;
784
785 default:;
786 gcc_unreachable ();
787 }
788
789 /* Replace call with a copy. */
790 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
791
792 gsi = gsi_for_stmt (use_stmt);
793 gsi_replace (&gsi, stmt, true);
794 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
795 cfg_changed = true;
796 }
797
798 stmts.release ();
799
800 return cfg_changed;
801 }
802
803 /* To evaluate powi(x,n), the floating point value x raised to the
804 constant integer exponent n, we use a hybrid algorithm that
805 combines the "window method" with look-up tables. For an
806 introduction to exponentiation algorithms and "addition chains",
807 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
808 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
809 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
810 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
811
812 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
813 multiplications to inline before calling the system library's pow
814 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
815 so this default never requires calling pow, powf or powl. */
816
817 #ifndef POWI_MAX_MULTS
818 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
819 #endif
820
821 /* The size of the "optimal power tree" lookup table. All
822 exponents less than this value are simply looked up in the
823 powi_table below. This threshold is also used to size the
824 cache of pseudo registers that hold intermediate results. */
825 #define POWI_TABLE_SIZE 256
826
827 /* The size, in bits of the window, used in the "window method"
828 exponentiation algorithm. This is equivalent to a radix of
829 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
830 #define POWI_WINDOW_SIZE 3
831
832 /* The following table is an efficient representation of an
833 "optimal power tree". For each value, i, the corresponding
834 value, j, in the table states than an optimal evaluation
835 sequence for calculating pow(x,i) can be found by evaluating
836 pow(x,j)*pow(x,i-j). An optimal power tree for the first
837 100 integers is given in Knuth's "Seminumerical algorithms". */
838
839 static const unsigned char powi_table[POWI_TABLE_SIZE] =
840 {
841 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
842 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
843 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
844 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
845 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
846 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
847 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
848 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
849 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
850 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
851 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
852 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
853 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
854 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
855 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
856 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
857 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
858 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
859 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
860 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
861 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
862 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
863 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
864 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
865 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
866 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
867 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
868 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
869 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
870 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
871 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
872 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
873 };
874
875
876 /* Return the number of multiplications required to calculate
877 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
878 subroutine of powi_cost. CACHE is an array indicating
879 which exponents have already been calculated. */
880
881 static int
882 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
883 {
884 /* If we've already calculated this exponent, then this evaluation
885 doesn't require any additional multiplications. */
886 if (cache[n])
887 return 0;
888
889 cache[n] = true;
890 return powi_lookup_cost (n - powi_table[n], cache)
891 + powi_lookup_cost (powi_table[n], cache) + 1;
892 }
893
894 /* Return the number of multiplications required to calculate
895 powi(x,n) for an arbitrary x, given the exponent N. This
896 function needs to be kept in sync with powi_as_mults below. */
897
898 static int
899 powi_cost (HOST_WIDE_INT n)
900 {
901 bool cache[POWI_TABLE_SIZE];
902 unsigned HOST_WIDE_INT digit;
903 unsigned HOST_WIDE_INT val;
904 int result;
905
906 if (n == 0)
907 return 0;
908
909 /* Ignore the reciprocal when calculating the cost. */
910 val = (n < 0) ? -n : n;
911
912 /* Initialize the exponent cache. */
913 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
914 cache[1] = true;
915
916 result = 0;
917
918 while (val >= POWI_TABLE_SIZE)
919 {
920 if (val & 1)
921 {
922 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
923 result += powi_lookup_cost (digit, cache)
924 + POWI_WINDOW_SIZE + 1;
925 val >>= POWI_WINDOW_SIZE;
926 }
927 else
928 {
929 val >>= 1;
930 result++;
931 }
932 }
933
934 return result + powi_lookup_cost (val, cache);
935 }
936
937 /* Recursive subroutine of powi_as_mults. This function takes the
938 array, CACHE, of already calculated exponents and an exponent N and
939 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
940
941 static tree
942 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
943 HOST_WIDE_INT n, tree *cache)
944 {
945 tree op0, op1, ssa_target;
946 unsigned HOST_WIDE_INT digit;
947 gimple mult_stmt;
948
949 if (n < POWI_TABLE_SIZE && cache[n])
950 return cache[n];
951
952 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
953
954 if (n < POWI_TABLE_SIZE)
955 {
956 cache[n] = ssa_target;
957 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
958 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
959 }
960 else if (n & 1)
961 {
962 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
963 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
964 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
965 }
966 else
967 {
968 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
969 op1 = op0;
970 }
971
972 mult_stmt = gimple_build_assign_with_ops (MULT_EXPR, ssa_target, op0, op1);
973 gimple_set_location (mult_stmt, loc);
974 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
975
976 return ssa_target;
977 }
978
979 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
980 This function needs to be kept in sync with powi_cost above. */
981
982 static tree
983 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
984 tree arg0, HOST_WIDE_INT n)
985 {
986 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
987 gimple div_stmt;
988 tree target;
989
990 if (n == 0)
991 return build_real (type, dconst1);
992
993 memset (cache, 0, sizeof (cache));
994 cache[1] = arg0;
995
996 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
997 if (n >= 0)
998 return result;
999
1000 /* If the original exponent was negative, reciprocate the result. */
1001 target = make_temp_ssa_name (type, NULL, "powmult");
1002 div_stmt = gimple_build_assign_with_ops (RDIV_EXPR, target,
1003 build_real (type, dconst1),
1004 result);
1005 gimple_set_location (div_stmt, loc);
1006 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1007
1008 return target;
1009 }
1010
1011 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1012 location info LOC. If the arguments are appropriate, create an
1013 equivalent sequence of statements prior to GSI using an optimal
1014 number of multiplications, and return an expession holding the
1015 result. */
1016
1017 static tree
1018 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1019 tree arg0, HOST_WIDE_INT n)
1020 {
1021 /* Avoid largest negative number. */
1022 if (n != -n
1023 && ((n >= -1 && n <= 2)
1024 || (optimize_function_for_speed_p (cfun)
1025 && powi_cost (n) <= POWI_MAX_MULTS)))
1026 return powi_as_mults (gsi, loc, arg0, n);
1027
1028 return NULL_TREE;
1029 }
1030
1031 /* Build a gimple call statement that calls FN with argument ARG.
1032 Set the lhs of the call statement to a fresh SSA name. Insert the
1033 statement prior to GSI's current position, and return the fresh
1034 SSA name. */
1035
1036 static tree
1037 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1038 tree fn, tree arg)
1039 {
1040 gimple call_stmt;
1041 tree ssa_target;
1042
1043 call_stmt = gimple_build_call (fn, 1, arg);
1044 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1045 gimple_set_lhs (call_stmt, ssa_target);
1046 gimple_set_location (call_stmt, loc);
1047 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1048
1049 return ssa_target;
1050 }
1051
1052 /* Build a gimple binary operation with the given CODE and arguments
1053 ARG0, ARG1, assigning the result to a new SSA name for variable
1054 TARGET. Insert the statement prior to GSI's current position, and
1055 return the fresh SSA name.*/
1056
1057 static tree
1058 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1059 const char *name, enum tree_code code,
1060 tree arg0, tree arg1)
1061 {
1062 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1063 gimple stmt = gimple_build_assign_with_ops (code, result, arg0, arg1);
1064 gimple_set_location (stmt, loc);
1065 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1066 return result;
1067 }
1068
1069 /* Build a gimple reference operation with the given CODE and argument
1070 ARG, assigning the result to a new SSA name of TYPE with NAME.
1071 Insert the statement prior to GSI's current position, and return
1072 the fresh SSA name. */
1073
1074 static inline tree
1075 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1076 const char *name, enum tree_code code, tree arg0)
1077 {
1078 tree result = make_temp_ssa_name (type, NULL, name);
1079 gimple stmt = gimple_build_assign (result, build1 (code, type, arg0));
1080 gimple_set_location (stmt, loc);
1081 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1082 return result;
1083 }
1084
1085 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1086 prior to GSI's current position, and return the fresh SSA name. */
1087
1088 static tree
1089 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1090 tree type, tree val)
1091 {
1092 tree result = make_ssa_name (type, NULL);
1093 gimple stmt = gimple_build_assign_with_ops (NOP_EXPR, result, val, NULL_TREE);
1094 gimple_set_location (stmt, loc);
1095 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1096 return result;
1097 }
1098
1099 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1100 with location info LOC. If possible, create an equivalent and
1101 less expensive sequence of statements prior to GSI, and return an
1102 expession holding the result. */
1103
1104 static tree
1105 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1106 tree arg0, tree arg1)
1107 {
1108 REAL_VALUE_TYPE c, cint, dconst1_4, dconst3_4, dconst1_3, dconst1_6;
1109 REAL_VALUE_TYPE c2, dconst3;
1110 HOST_WIDE_INT n;
1111 tree type, sqrtfn, cbrtfn, sqrt_arg0, sqrt_sqrt, result, cbrt_x, powi_cbrt_x;
1112 enum machine_mode mode;
1113 bool hw_sqrt_exists, c_is_int, c2_is_int;
1114
1115 /* If the exponent isn't a constant, there's nothing of interest
1116 to be done. */
1117 if (TREE_CODE (arg1) != REAL_CST)
1118 return NULL_TREE;
1119
1120 /* If the exponent is equivalent to an integer, expand to an optimal
1121 multiplication sequence when profitable. */
1122 c = TREE_REAL_CST (arg1);
1123 n = real_to_integer (&c);
1124 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1125 c_is_int = real_identical (&c, &cint);
1126
1127 if (c_is_int
1128 && ((n >= -1 && n <= 2)
1129 || (flag_unsafe_math_optimizations
1130 && optimize_insn_for_speed_p ()
1131 && powi_cost (n) <= POWI_MAX_MULTS)))
1132 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1133
1134 /* Attempt various optimizations using sqrt and cbrt. */
1135 type = TREE_TYPE (arg0);
1136 mode = TYPE_MODE (type);
1137 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1138
1139 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1140 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1141 sqrt(-0) = -0. */
1142 if (sqrtfn
1143 && REAL_VALUES_EQUAL (c, dconsthalf)
1144 && !HONOR_SIGNED_ZEROS (mode))
1145 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1146
1147 /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
1148 a builtin sqrt instruction is smaller than a call to pow with 0.25,
1149 so do this optimization even if -Os. Don't do this optimization
1150 if we don't have a hardware sqrt insn. */
1151 dconst1_4 = dconst1;
1152 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1153 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1154
1155 if (flag_unsafe_math_optimizations
1156 && sqrtfn
1157 && REAL_VALUES_EQUAL (c, dconst1_4)
1158 && hw_sqrt_exists)
1159 {
1160 /* sqrt(x) */
1161 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1162
1163 /* sqrt(sqrt(x)) */
1164 return build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
1165 }
1166
1167 /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
1168 optimizing for space. Don't do this optimization if we don't have
1169 a hardware sqrt insn. */
1170 real_from_integer (&dconst3_4, VOIDmode, 3, 0, 0);
1171 SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2);
1172
1173 if (flag_unsafe_math_optimizations
1174 && sqrtfn
1175 && optimize_function_for_speed_p (cfun)
1176 && REAL_VALUES_EQUAL (c, dconst3_4)
1177 && hw_sqrt_exists)
1178 {
1179 /* sqrt(x) */
1180 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1181
1182 /* sqrt(sqrt(x)) */
1183 sqrt_sqrt = build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
1184
1185 /* sqrt(x) * sqrt(sqrt(x)) */
1186 return build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1187 sqrt_arg0, sqrt_sqrt);
1188 }
1189
1190 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1191 optimizations since 1./3. is not exactly representable. If x
1192 is negative and finite, the correct value of pow(x,1./3.) is
1193 a NaN with the "invalid" exception raised, because the value
1194 of 1./3. actually has an even denominator. The correct value
1195 of cbrt(x) is a negative real value. */
1196 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1197 dconst1_3 = real_value_truncate (mode, dconst_third ());
1198
1199 if (flag_unsafe_math_optimizations
1200 && cbrtfn
1201 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1202 && REAL_VALUES_EQUAL (c, dconst1_3))
1203 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1204
1205 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1206 if we don't have a hardware sqrt insn. */
1207 dconst1_6 = dconst1_3;
1208 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1209
1210 if (flag_unsafe_math_optimizations
1211 && sqrtfn
1212 && cbrtfn
1213 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1214 && optimize_function_for_speed_p (cfun)
1215 && hw_sqrt_exists
1216 && REAL_VALUES_EQUAL (c, dconst1_6))
1217 {
1218 /* sqrt(x) */
1219 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1220
1221 /* cbrt(sqrt(x)) */
1222 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1223 }
1224
1225 /* Optimize pow(x,c), where n = 2c for some nonzero integer n
1226 and c not an integer, into
1227
1228 sqrt(x) * powi(x, n/2), n > 0;
1229 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
1230
1231 Do not calculate the powi factor when n/2 = 0. */
1232 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1233 n = real_to_integer (&c2);
1234 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1235 c2_is_int = real_identical (&c2, &cint);
1236
1237 if (flag_unsafe_math_optimizations
1238 && sqrtfn
1239 && c2_is_int
1240 && !c_is_int
1241 && optimize_function_for_speed_p (cfun))
1242 {
1243 tree powi_x_ndiv2 = NULL_TREE;
1244
1245 /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
1246 possible or profitable, give up. Skip the degenerate case when
1247 n is 1 or -1, where the result is always 1. */
1248 if (absu_hwi (n) != 1)
1249 {
1250 powi_x_ndiv2 = gimple_expand_builtin_powi (gsi, loc, arg0,
1251 abs_hwi (n / 2));
1252 if (!powi_x_ndiv2)
1253 return NULL_TREE;
1254 }
1255
1256 /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
1257 result of the optimal multiply sequence just calculated. */
1258 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1259
1260 if (absu_hwi (n) == 1)
1261 result = sqrt_arg0;
1262 else
1263 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1264 sqrt_arg0, powi_x_ndiv2);
1265
1266 /* If n is negative, reciprocate the result. */
1267 if (n < 0)
1268 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1269 build_real (type, dconst1), result);
1270 return result;
1271 }
1272
1273 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1274
1275 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1276 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1277
1278 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1279 different from pow(x, 1./3.) due to rounding and behavior with
1280 negative x, we need to constrain this transformation to unsafe
1281 math and positive x or finite math. */
1282 real_from_integer (&dconst3, VOIDmode, 3, 0, 0);
1283 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1284 real_round (&c2, mode, &c2);
1285 n = real_to_integer (&c2);
1286 real_from_integer (&cint, VOIDmode, n, n < 0 ? -1 : 0, 0);
1287 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1288 real_convert (&c2, mode, &c2);
1289
1290 if (flag_unsafe_math_optimizations
1291 && cbrtfn
1292 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1293 && real_identical (&c2, &c)
1294 && !c2_is_int
1295 && optimize_function_for_speed_p (cfun)
1296 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1297 {
1298 tree powi_x_ndiv3 = NULL_TREE;
1299
1300 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1301 possible or profitable, give up. Skip the degenerate case when
1302 abs(n) < 3, where the result is always 1. */
1303 if (absu_hwi (n) >= 3)
1304 {
1305 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1306 abs_hwi (n / 3));
1307 if (!powi_x_ndiv3)
1308 return NULL_TREE;
1309 }
1310
1311 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1312 as that creates an unnecessary variable. Instead, just produce
1313 either cbrt(x) or cbrt(x) * cbrt(x). */
1314 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1315
1316 if (absu_hwi (n) % 3 == 1)
1317 powi_cbrt_x = cbrt_x;
1318 else
1319 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1320 cbrt_x, cbrt_x);
1321
1322 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1323 if (absu_hwi (n) < 3)
1324 result = powi_cbrt_x;
1325 else
1326 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1327 powi_x_ndiv3, powi_cbrt_x);
1328
1329 /* If n is negative, reciprocate the result. */
1330 if (n < 0)
1331 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1332 build_real (type, dconst1), result);
1333
1334 return result;
1335 }
1336
1337 /* No optimizations succeeded. */
1338 return NULL_TREE;
1339 }
1340
1341 /* ARG is the argument to a cabs builtin call in GSI with location info
1342 LOC. Create a sequence of statements prior to GSI that calculates
1343 sqrt(R*R + I*I), where R and I are the real and imaginary components
1344 of ARG, respectively. Return an expression holding the result. */
1345
1346 static tree
1347 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1348 {
1349 tree real_part, imag_part, addend1, addend2, sum, result;
1350 tree type = TREE_TYPE (TREE_TYPE (arg));
1351 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1352 enum machine_mode mode = TYPE_MODE (type);
1353
1354 if (!flag_unsafe_math_optimizations
1355 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1356 || !sqrtfn
1357 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1358 return NULL_TREE;
1359
1360 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1361 REALPART_EXPR, arg);
1362 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1363 real_part, real_part);
1364 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1365 IMAGPART_EXPR, arg);
1366 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1367 imag_part, imag_part);
1368 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1369 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1370
1371 return result;
1372 }
1373
1374 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1375 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1376 an optimal number of multiplies, when n is a constant. */
1377
1378 static unsigned int
1379 execute_cse_sincos (void)
1380 {
1381 basic_block bb;
1382 bool cfg_changed = false;
1383
1384 calculate_dominance_info (CDI_DOMINATORS);
1385 memset (&sincos_stats, 0, sizeof (sincos_stats));
1386
1387 FOR_EACH_BB (bb)
1388 {
1389 gimple_stmt_iterator gsi;
1390 bool cleanup_eh = false;
1391
1392 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1393 {
1394 gimple stmt = gsi_stmt (gsi);
1395 tree fndecl;
1396
1397 /* Only the last stmt in a bb could throw, no need to call
1398 gimple_purge_dead_eh_edges if we change something in the middle
1399 of a basic block. */
1400 cleanup_eh = false;
1401
1402 if (is_gimple_call (stmt)
1403 && gimple_call_lhs (stmt)
1404 && (fndecl = gimple_call_fndecl (stmt))
1405 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1406 {
1407 tree arg, arg0, arg1, result;
1408 HOST_WIDE_INT n;
1409 location_t loc;
1410
1411 switch (DECL_FUNCTION_CODE (fndecl))
1412 {
1413 CASE_FLT_FN (BUILT_IN_COS):
1414 CASE_FLT_FN (BUILT_IN_SIN):
1415 CASE_FLT_FN (BUILT_IN_CEXPI):
1416 /* Make sure we have either sincos or cexp. */
1417 if (!TARGET_HAS_SINCOS && !TARGET_C99_FUNCTIONS)
1418 break;
1419
1420 arg = gimple_call_arg (stmt, 0);
1421 if (TREE_CODE (arg) == SSA_NAME)
1422 cfg_changed |= execute_cse_sincos_1 (arg);
1423 break;
1424
1425 CASE_FLT_FN (BUILT_IN_POW):
1426 arg0 = gimple_call_arg (stmt, 0);
1427 arg1 = gimple_call_arg (stmt, 1);
1428
1429 loc = gimple_location (stmt);
1430 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1431
1432 if (result)
1433 {
1434 tree lhs = gimple_get_lhs (stmt);
1435 gimple new_stmt = gimple_build_assign (lhs, result);
1436 gimple_set_location (new_stmt, loc);
1437 unlink_stmt_vdef (stmt);
1438 gsi_replace (&gsi, new_stmt, true);
1439 cleanup_eh = true;
1440 if (gimple_vdef (stmt))
1441 release_ssa_name (gimple_vdef (stmt));
1442 }
1443 break;
1444
1445 CASE_FLT_FN (BUILT_IN_POWI):
1446 arg0 = gimple_call_arg (stmt, 0);
1447 arg1 = gimple_call_arg (stmt, 1);
1448 if (!host_integerp (arg1, 0))
1449 break;
1450
1451 n = TREE_INT_CST_LOW (arg1);
1452 loc = gimple_location (stmt);
1453 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1454
1455 if (result)
1456 {
1457 tree lhs = gimple_get_lhs (stmt);
1458 gimple new_stmt = gimple_build_assign (lhs, result);
1459 gimple_set_location (new_stmt, loc);
1460 unlink_stmt_vdef (stmt);
1461 gsi_replace (&gsi, new_stmt, true);
1462 cleanup_eh = true;
1463 if (gimple_vdef (stmt))
1464 release_ssa_name (gimple_vdef (stmt));
1465 }
1466 break;
1467
1468 CASE_FLT_FN (BUILT_IN_CABS):
1469 arg0 = gimple_call_arg (stmt, 0);
1470 loc = gimple_location (stmt);
1471 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1472
1473 if (result)
1474 {
1475 tree lhs = gimple_get_lhs (stmt);
1476 gimple new_stmt = gimple_build_assign (lhs, result);
1477 gimple_set_location (new_stmt, loc);
1478 unlink_stmt_vdef (stmt);
1479 gsi_replace (&gsi, new_stmt, true);
1480 cleanup_eh = true;
1481 if (gimple_vdef (stmt))
1482 release_ssa_name (gimple_vdef (stmt));
1483 }
1484 break;
1485
1486 default:;
1487 }
1488 }
1489 }
1490 if (cleanup_eh)
1491 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1492 }
1493
1494 statistics_counter_event (cfun, "sincos statements inserted",
1495 sincos_stats.inserted);
1496
1497 free_dominance_info (CDI_DOMINATORS);
1498 return cfg_changed ? TODO_cleanup_cfg : 0;
1499 }
1500
1501 static bool
1502 gate_cse_sincos (void)
1503 {
1504 /* We no longer require either sincos or cexp, since powi expansion
1505 piggybacks on this pass. */
1506 return optimize;
1507 }
1508
1509 struct gimple_opt_pass pass_cse_sincos =
1510 {
1511 {
1512 GIMPLE_PASS,
1513 "sincos", /* name */
1514 OPTGROUP_NONE, /* optinfo_flags */
1515 gate_cse_sincos, /* gate */
1516 execute_cse_sincos, /* execute */
1517 NULL, /* sub */
1518 NULL, /* next */
1519 0, /* static_pass_number */
1520 TV_NONE, /* tv_id */
1521 PROP_ssa, /* properties_required */
1522 0, /* properties_provided */
1523 0, /* properties_destroyed */
1524 0, /* todo_flags_start */
1525 TODO_update_ssa | TODO_verify_ssa
1526 | TODO_verify_stmts /* todo_flags_finish */
1527 }
1528 };
1529
1530 /* A symbolic number is used to detect byte permutation and selection
1531 patterns. Therefore the field N contains an artificial number
1532 consisting of byte size markers:
1533
1534 0 - byte has the value 0
1535 1..size - byte contains the content of the byte
1536 number indexed with that value minus one */
1537
1538 struct symbolic_number {
1539 unsigned HOST_WIDEST_INT n;
1540 int size;
1541 };
1542
1543 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1544 number N. Return false if the requested operation is not permitted
1545 on a symbolic number. */
1546
1547 static inline bool
1548 do_shift_rotate (enum tree_code code,
1549 struct symbolic_number *n,
1550 int count)
1551 {
1552 if (count % 8 != 0)
1553 return false;
1554
1555 /* Zero out the extra bits of N in order to avoid them being shifted
1556 into the significant bits. */
1557 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1558 n->n &= ((unsigned HOST_WIDEST_INT)1 << (n->size * BITS_PER_UNIT)) - 1;
1559
1560 switch (code)
1561 {
1562 case LSHIFT_EXPR:
1563 n->n <<= count;
1564 break;
1565 case RSHIFT_EXPR:
1566 n->n >>= count;
1567 break;
1568 case LROTATE_EXPR:
1569 n->n = (n->n << count) | (n->n >> ((n->size * BITS_PER_UNIT) - count));
1570 break;
1571 case RROTATE_EXPR:
1572 n->n = (n->n >> count) | (n->n << ((n->size * BITS_PER_UNIT) - count));
1573 break;
1574 default:
1575 return false;
1576 }
1577 /* Zero unused bits for size. */
1578 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1579 n->n &= ((unsigned HOST_WIDEST_INT)1 << (n->size * BITS_PER_UNIT)) - 1;
1580 return true;
1581 }
1582
1583 /* Perform sanity checking for the symbolic number N and the gimple
1584 statement STMT. */
1585
1586 static inline bool
1587 verify_symbolic_number_p (struct symbolic_number *n, gimple stmt)
1588 {
1589 tree lhs_type;
1590
1591 lhs_type = gimple_expr_type (stmt);
1592
1593 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
1594 return false;
1595
1596 if (TYPE_PRECISION (lhs_type) != n->size * BITS_PER_UNIT)
1597 return false;
1598
1599 return true;
1600 }
1601
1602 /* find_bswap_1 invokes itself recursively with N and tries to perform
1603 the operation given by the rhs of STMT on the result. If the
1604 operation could successfully be executed the function returns the
1605 tree expression of the source operand and NULL otherwise. */
1606
1607 static tree
1608 find_bswap_1 (gimple stmt, struct symbolic_number *n, int limit)
1609 {
1610 enum tree_code code;
1611 tree rhs1, rhs2 = NULL;
1612 gimple rhs1_stmt, rhs2_stmt;
1613 tree source_expr1;
1614 enum gimple_rhs_class rhs_class;
1615
1616 if (!limit || !is_gimple_assign (stmt))
1617 return NULL_TREE;
1618
1619 rhs1 = gimple_assign_rhs1 (stmt);
1620
1621 if (TREE_CODE (rhs1) != SSA_NAME)
1622 return NULL_TREE;
1623
1624 code = gimple_assign_rhs_code (stmt);
1625 rhs_class = gimple_assign_rhs_class (stmt);
1626 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
1627
1628 if (rhs_class == GIMPLE_BINARY_RHS)
1629 rhs2 = gimple_assign_rhs2 (stmt);
1630
1631 /* Handle unary rhs and binary rhs with integer constants as second
1632 operand. */
1633
1634 if (rhs_class == GIMPLE_UNARY_RHS
1635 || (rhs_class == GIMPLE_BINARY_RHS
1636 && TREE_CODE (rhs2) == INTEGER_CST))
1637 {
1638 if (code != BIT_AND_EXPR
1639 && code != LSHIFT_EXPR
1640 && code != RSHIFT_EXPR
1641 && code != LROTATE_EXPR
1642 && code != RROTATE_EXPR
1643 && code != NOP_EXPR
1644 && code != CONVERT_EXPR)
1645 return NULL_TREE;
1646
1647 source_expr1 = find_bswap_1 (rhs1_stmt, n, limit - 1);
1648
1649 /* If find_bswap_1 returned NULL STMT is a leaf node and we have
1650 to initialize the symbolic number. */
1651 if (!source_expr1)
1652 {
1653 /* Set up the symbolic number N by setting each byte to a
1654 value between 1 and the byte size of rhs1. The highest
1655 order byte is set to n->size and the lowest order
1656 byte to 1. */
1657 n->size = TYPE_PRECISION (TREE_TYPE (rhs1));
1658 if (n->size % BITS_PER_UNIT != 0)
1659 return NULL_TREE;
1660 n->size /= BITS_PER_UNIT;
1661 n->n = (sizeof (HOST_WIDEST_INT) < 8 ? 0 :
1662 (unsigned HOST_WIDEST_INT)0x08070605 << 32 | 0x04030201);
1663
1664 if (n->size < (int)sizeof (HOST_WIDEST_INT))
1665 n->n &= ((unsigned HOST_WIDEST_INT)1 <<
1666 (n->size * BITS_PER_UNIT)) - 1;
1667
1668 source_expr1 = rhs1;
1669 }
1670
1671 switch (code)
1672 {
1673 case BIT_AND_EXPR:
1674 {
1675 int i;
1676 unsigned HOST_WIDEST_INT val = widest_int_cst_value (rhs2);
1677 unsigned HOST_WIDEST_INT tmp = val;
1678
1679 /* Only constants masking full bytes are allowed. */
1680 for (i = 0; i < n->size; i++, tmp >>= BITS_PER_UNIT)
1681 if ((tmp & 0xff) != 0 && (tmp & 0xff) != 0xff)
1682 return NULL_TREE;
1683
1684 n->n &= val;
1685 }
1686 break;
1687 case LSHIFT_EXPR:
1688 case RSHIFT_EXPR:
1689 case LROTATE_EXPR:
1690 case RROTATE_EXPR:
1691 if (!do_shift_rotate (code, n, (int)TREE_INT_CST_LOW (rhs2)))
1692 return NULL_TREE;
1693 break;
1694 CASE_CONVERT:
1695 {
1696 int type_size;
1697
1698 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1699 if (type_size % BITS_PER_UNIT != 0)
1700 return NULL_TREE;
1701
1702 if (type_size / BITS_PER_UNIT < (int)(sizeof (HOST_WIDEST_INT)))
1703 {
1704 /* If STMT casts to a smaller type mask out the bits not
1705 belonging to the target type. */
1706 n->n &= ((unsigned HOST_WIDEST_INT)1 << type_size) - 1;
1707 }
1708 n->size = type_size / BITS_PER_UNIT;
1709 }
1710 break;
1711 default:
1712 return NULL_TREE;
1713 };
1714 return verify_symbolic_number_p (n, stmt) ? source_expr1 : NULL;
1715 }
1716
1717 /* Handle binary rhs. */
1718
1719 if (rhs_class == GIMPLE_BINARY_RHS)
1720 {
1721 struct symbolic_number n1, n2;
1722 tree source_expr2;
1723
1724 if (code != BIT_IOR_EXPR)
1725 return NULL_TREE;
1726
1727 if (TREE_CODE (rhs2) != SSA_NAME)
1728 return NULL_TREE;
1729
1730 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
1731
1732 switch (code)
1733 {
1734 case BIT_IOR_EXPR:
1735 source_expr1 = find_bswap_1 (rhs1_stmt, &n1, limit - 1);
1736
1737 if (!source_expr1)
1738 return NULL_TREE;
1739
1740 source_expr2 = find_bswap_1 (rhs2_stmt, &n2, limit - 1);
1741
1742 if (source_expr1 != source_expr2
1743 || n1.size != n2.size)
1744 return NULL_TREE;
1745
1746 n->size = n1.size;
1747 n->n = n1.n | n2.n;
1748
1749 if (!verify_symbolic_number_p (n, stmt))
1750 return NULL_TREE;
1751
1752 break;
1753 default:
1754 return NULL_TREE;
1755 }
1756 return source_expr1;
1757 }
1758 return NULL_TREE;
1759 }
1760
1761 /* Check if STMT completes a bswap implementation consisting of ORs,
1762 SHIFTs and ANDs. Return the source tree expression on which the
1763 byte swap is performed and NULL if no bswap was found. */
1764
1765 static tree
1766 find_bswap (gimple stmt)
1767 {
1768 /* The number which the find_bswap result should match in order to
1769 have a full byte swap. The number is shifted to the left according
1770 to the size of the symbolic number before using it. */
1771 unsigned HOST_WIDEST_INT cmp =
1772 sizeof (HOST_WIDEST_INT) < 8 ? 0 :
1773 (unsigned HOST_WIDEST_INT)0x01020304 << 32 | 0x05060708;
1774
1775 struct symbolic_number n;
1776 tree source_expr;
1777 int limit;
1778
1779 /* The last parameter determines the depth search limit. It usually
1780 correlates directly to the number of bytes to be touched. We
1781 increase that number by three here in order to also
1782 cover signed -> unsigned converions of the src operand as can be seen
1783 in libgcc, and for initial shift/and operation of the src operand. */
1784 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
1785 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
1786 source_expr = find_bswap_1 (stmt, &n, limit);
1787
1788 if (!source_expr)
1789 return NULL_TREE;
1790
1791 /* Zero out the extra bits of N and CMP. */
1792 if (n.size < (int)sizeof (HOST_WIDEST_INT))
1793 {
1794 unsigned HOST_WIDEST_INT mask =
1795 ((unsigned HOST_WIDEST_INT)1 << (n.size * BITS_PER_UNIT)) - 1;
1796
1797 n.n &= mask;
1798 cmp >>= (sizeof (HOST_WIDEST_INT) - n.size) * BITS_PER_UNIT;
1799 }
1800
1801 /* A complete byte swap should make the symbolic number to start
1802 with the largest digit in the highest order byte. */
1803 if (cmp != n.n)
1804 return NULL_TREE;
1805
1806 return source_expr;
1807 }
1808
1809 /* Find manual byte swap implementations and turn them into a bswap
1810 builtin invokation. */
1811
1812 static unsigned int
1813 execute_optimize_bswap (void)
1814 {
1815 basic_block bb;
1816 bool bswap16_p, bswap32_p, bswap64_p;
1817 bool changed = false;
1818 tree bswap16_type = NULL_TREE, bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
1819
1820 if (BITS_PER_UNIT != 8)
1821 return 0;
1822
1823 if (sizeof (HOST_WIDEST_INT) < 8)
1824 return 0;
1825
1826 bswap16_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP16)
1827 && optab_handler (bswap_optab, HImode) != CODE_FOR_nothing);
1828 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
1829 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
1830 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
1831 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
1832 || (bswap32_p && word_mode == SImode)));
1833
1834 if (!bswap16_p && !bswap32_p && !bswap64_p)
1835 return 0;
1836
1837 /* Determine the argument type of the builtins. The code later on
1838 assumes that the return and argument type are the same. */
1839 if (bswap16_p)
1840 {
1841 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP16);
1842 bswap16_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1843 }
1844
1845 if (bswap32_p)
1846 {
1847 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1848 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1849 }
1850
1851 if (bswap64_p)
1852 {
1853 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1854 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
1855 }
1856
1857 memset (&bswap_stats, 0, sizeof (bswap_stats));
1858
1859 FOR_EACH_BB (bb)
1860 {
1861 gimple_stmt_iterator gsi;
1862
1863 /* We do a reverse scan for bswap patterns to make sure we get the
1864 widest match. As bswap pattern matching doesn't handle
1865 previously inserted smaller bswap replacements as sub-
1866 patterns, the wider variant wouldn't be detected. */
1867 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
1868 {
1869 gimple stmt = gsi_stmt (gsi);
1870 tree bswap_src, bswap_type;
1871 tree bswap_tmp;
1872 tree fndecl = NULL_TREE;
1873 int type_size;
1874 gimple call;
1875
1876 if (!is_gimple_assign (stmt)
1877 || gimple_assign_rhs_code (stmt) != BIT_IOR_EXPR)
1878 continue;
1879
1880 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1881
1882 switch (type_size)
1883 {
1884 case 16:
1885 if (bswap16_p)
1886 {
1887 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP16);
1888 bswap_type = bswap16_type;
1889 }
1890 break;
1891 case 32:
1892 if (bswap32_p)
1893 {
1894 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
1895 bswap_type = bswap32_type;
1896 }
1897 break;
1898 case 64:
1899 if (bswap64_p)
1900 {
1901 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
1902 bswap_type = bswap64_type;
1903 }
1904 break;
1905 default:
1906 continue;
1907 }
1908
1909 if (!fndecl)
1910 continue;
1911
1912 bswap_src = find_bswap (stmt);
1913
1914 if (!bswap_src)
1915 continue;
1916
1917 changed = true;
1918 if (type_size == 16)
1919 bswap_stats.found_16bit++;
1920 else if (type_size == 32)
1921 bswap_stats.found_32bit++;
1922 else
1923 bswap_stats.found_64bit++;
1924
1925 bswap_tmp = bswap_src;
1926
1927 /* Convert the src expression if necessary. */
1928 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
1929 {
1930 gimple convert_stmt;
1931 bswap_tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
1932 convert_stmt = gimple_build_assign_with_ops
1933 (NOP_EXPR, bswap_tmp, bswap_src, NULL);
1934 gsi_insert_before (&gsi, convert_stmt, GSI_SAME_STMT);
1935 }
1936
1937 call = gimple_build_call (fndecl, 1, bswap_tmp);
1938
1939 bswap_tmp = gimple_assign_lhs (stmt);
1940
1941 /* Convert the result if necessary. */
1942 if (!useless_type_conversion_p (TREE_TYPE (bswap_tmp), bswap_type))
1943 {
1944 gimple convert_stmt;
1945 bswap_tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
1946 convert_stmt = gimple_build_assign_with_ops
1947 (NOP_EXPR, gimple_assign_lhs (stmt), bswap_tmp, NULL);
1948 gsi_insert_after (&gsi, convert_stmt, GSI_SAME_STMT);
1949 }
1950
1951 gimple_call_set_lhs (call, bswap_tmp);
1952
1953 if (dump_file)
1954 {
1955 fprintf (dump_file, "%d bit bswap implementation found at: ",
1956 (int)type_size);
1957 print_gimple_stmt (dump_file, stmt, 0, 0);
1958 }
1959
1960 gsi_insert_after (&gsi, call, GSI_SAME_STMT);
1961 gsi_remove (&gsi, true);
1962 }
1963 }
1964
1965 statistics_counter_event (cfun, "16-bit bswap implementations found",
1966 bswap_stats.found_16bit);
1967 statistics_counter_event (cfun, "32-bit bswap implementations found",
1968 bswap_stats.found_32bit);
1969 statistics_counter_event (cfun, "64-bit bswap implementations found",
1970 bswap_stats.found_64bit);
1971
1972 return (changed ? TODO_update_ssa | TODO_verify_ssa
1973 | TODO_verify_stmts : 0);
1974 }
1975
1976 static bool
1977 gate_optimize_bswap (void)
1978 {
1979 return flag_expensive_optimizations && optimize;
1980 }
1981
1982 struct gimple_opt_pass pass_optimize_bswap =
1983 {
1984 {
1985 GIMPLE_PASS,
1986 "bswap", /* name */
1987 OPTGROUP_NONE, /* optinfo_flags */
1988 gate_optimize_bswap, /* gate */
1989 execute_optimize_bswap, /* execute */
1990 NULL, /* sub */
1991 NULL, /* next */
1992 0, /* static_pass_number */
1993 TV_NONE, /* tv_id */
1994 PROP_ssa, /* properties_required */
1995 0, /* properties_provided */
1996 0, /* properties_destroyed */
1997 0, /* todo_flags_start */
1998 0 /* todo_flags_finish */
1999 }
2000 };
2001
2002 /* Return true if stmt is a type conversion operation that can be stripped
2003 when used in a widening multiply operation. */
2004 static bool
2005 widening_mult_conversion_strippable_p (tree result_type, gimple stmt)
2006 {
2007 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2008
2009 if (TREE_CODE (result_type) == INTEGER_TYPE)
2010 {
2011 tree op_type;
2012 tree inner_op_type;
2013
2014 if (!CONVERT_EXPR_CODE_P (rhs_code))
2015 return false;
2016
2017 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2018
2019 /* If the type of OP has the same precision as the result, then
2020 we can strip this conversion. The multiply operation will be
2021 selected to create the correct extension as a by-product. */
2022 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2023 return true;
2024
2025 /* We can also strip a conversion if it preserves the signed-ness of
2026 the operation and doesn't narrow the range. */
2027 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2028
2029 /* If the inner-most type is unsigned, then we can strip any
2030 intermediate widening operation. If it's signed, then the
2031 intermediate widening operation must also be signed. */
2032 if ((TYPE_UNSIGNED (inner_op_type)
2033 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2034 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2035 return true;
2036
2037 return false;
2038 }
2039
2040 return rhs_code == FIXED_CONVERT_EXPR;
2041 }
2042
2043 /* Return true if RHS is a suitable operand for a widening multiplication,
2044 assuming a target type of TYPE.
2045 There are two cases:
2046
2047 - RHS makes some value at least twice as wide. Store that value
2048 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2049
2050 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2051 but leave *TYPE_OUT untouched. */
2052
2053 static bool
2054 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2055 tree *new_rhs_out)
2056 {
2057 gimple stmt;
2058 tree type1, rhs1;
2059
2060 if (TREE_CODE (rhs) == SSA_NAME)
2061 {
2062 stmt = SSA_NAME_DEF_STMT (rhs);
2063 if (is_gimple_assign (stmt))
2064 {
2065 if (! widening_mult_conversion_strippable_p (type, stmt))
2066 rhs1 = rhs;
2067 else
2068 {
2069 rhs1 = gimple_assign_rhs1 (stmt);
2070
2071 if (TREE_CODE (rhs1) == INTEGER_CST)
2072 {
2073 *new_rhs_out = rhs1;
2074 *type_out = NULL;
2075 return true;
2076 }
2077 }
2078 }
2079 else
2080 rhs1 = rhs;
2081
2082 type1 = TREE_TYPE (rhs1);
2083
2084 if (TREE_CODE (type1) != TREE_CODE (type)
2085 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
2086 return false;
2087
2088 *new_rhs_out = rhs1;
2089 *type_out = type1;
2090 return true;
2091 }
2092
2093 if (TREE_CODE (rhs) == INTEGER_CST)
2094 {
2095 *new_rhs_out = rhs;
2096 *type_out = NULL;
2097 return true;
2098 }
2099
2100 return false;
2101 }
2102
2103 /* Return true if STMT performs a widening multiplication, assuming the
2104 output type is TYPE. If so, store the unwidened types of the operands
2105 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2106 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2107 and *TYPE2_OUT would give the operands of the multiplication. */
2108
2109 static bool
2110 is_widening_mult_p (gimple stmt,
2111 tree *type1_out, tree *rhs1_out,
2112 tree *type2_out, tree *rhs2_out)
2113 {
2114 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
2115
2116 if (TREE_CODE (type) != INTEGER_TYPE
2117 && TREE_CODE (type) != FIXED_POINT_TYPE)
2118 return false;
2119
2120 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
2121 rhs1_out))
2122 return false;
2123
2124 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
2125 rhs2_out))
2126 return false;
2127
2128 if (*type1_out == NULL)
2129 {
2130 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
2131 return false;
2132 *type1_out = *type2_out;
2133 }
2134
2135 if (*type2_out == NULL)
2136 {
2137 if (!int_fits_type_p (*rhs2_out, *type1_out))
2138 return false;
2139 *type2_out = *type1_out;
2140 }
2141
2142 /* Ensure that the larger of the two operands comes first. */
2143 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
2144 {
2145 tree tmp;
2146 tmp = *type1_out;
2147 *type1_out = *type2_out;
2148 *type2_out = tmp;
2149 tmp = *rhs1_out;
2150 *rhs1_out = *rhs2_out;
2151 *rhs2_out = tmp;
2152 }
2153
2154 return true;
2155 }
2156
2157 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2158 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2159 value is true iff we converted the statement. */
2160
2161 static bool
2162 convert_mult_to_widen (gimple stmt, gimple_stmt_iterator *gsi)
2163 {
2164 tree lhs, rhs1, rhs2, type, type1, type2;
2165 enum insn_code handler;
2166 enum machine_mode to_mode, from_mode, actual_mode;
2167 optab op;
2168 int actual_precision;
2169 location_t loc = gimple_location (stmt);
2170 bool from_unsigned1, from_unsigned2;
2171
2172 lhs = gimple_assign_lhs (stmt);
2173 type = TREE_TYPE (lhs);
2174 if (TREE_CODE (type) != INTEGER_TYPE)
2175 return false;
2176
2177 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
2178 return false;
2179
2180 to_mode = TYPE_MODE (type);
2181 from_mode = TYPE_MODE (type1);
2182 from_unsigned1 = TYPE_UNSIGNED (type1);
2183 from_unsigned2 = TYPE_UNSIGNED (type2);
2184
2185 if (from_unsigned1 && from_unsigned2)
2186 op = umul_widen_optab;
2187 else if (!from_unsigned1 && !from_unsigned2)
2188 op = smul_widen_optab;
2189 else
2190 op = usmul_widen_optab;
2191
2192 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
2193 0, &actual_mode);
2194
2195 if (handler == CODE_FOR_nothing)
2196 {
2197 if (op != smul_widen_optab)
2198 {
2199 /* We can use a signed multiply with unsigned types as long as
2200 there is a wider mode to use, or it is the smaller of the two
2201 types that is unsigned. Note that type1 >= type2, always. */
2202 if ((TYPE_UNSIGNED (type1)
2203 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2204 || (TYPE_UNSIGNED (type2)
2205 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2206 {
2207 from_mode = GET_MODE_WIDER_MODE (from_mode);
2208 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
2209 return false;
2210 }
2211
2212 op = smul_widen_optab;
2213 handler = find_widening_optab_handler_and_mode (op, to_mode,
2214 from_mode, 0,
2215 &actual_mode);
2216
2217 if (handler == CODE_FOR_nothing)
2218 return false;
2219
2220 from_unsigned1 = from_unsigned2 = false;
2221 }
2222 else
2223 return false;
2224 }
2225
2226 /* Ensure that the inputs to the handler are in the correct precison
2227 for the opcode. This will be the full mode size. */
2228 actual_precision = GET_MODE_PRECISION (actual_mode);
2229 if (2 * actual_precision > TYPE_PRECISION (type))
2230 return false;
2231 if (actual_precision != TYPE_PRECISION (type1)
2232 || from_unsigned1 != TYPE_UNSIGNED (type1))
2233 rhs1 = build_and_insert_cast (gsi, loc,
2234 build_nonstandard_integer_type
2235 (actual_precision, from_unsigned1), rhs1);
2236 if (actual_precision != TYPE_PRECISION (type2)
2237 || from_unsigned2 != TYPE_UNSIGNED (type2))
2238 rhs2 = build_and_insert_cast (gsi, loc,
2239 build_nonstandard_integer_type
2240 (actual_precision, from_unsigned2), rhs2);
2241
2242 /* Handle constants. */
2243 if (TREE_CODE (rhs1) == INTEGER_CST)
2244 rhs1 = fold_convert (type1, rhs1);
2245 if (TREE_CODE (rhs2) == INTEGER_CST)
2246 rhs2 = fold_convert (type2, rhs2);
2247
2248 gimple_assign_set_rhs1 (stmt, rhs1);
2249 gimple_assign_set_rhs2 (stmt, rhs2);
2250 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2251 update_stmt (stmt);
2252 widen_mul_stats.widen_mults_inserted++;
2253 return true;
2254 }
2255
2256 /* Process a single gimple statement STMT, which is found at the
2257 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2258 rhs (given by CODE), and try to convert it into a
2259 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2260 is true iff we converted the statement. */
2261
2262 static bool
2263 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple stmt,
2264 enum tree_code code)
2265 {
2266 gimple rhs1_stmt = NULL, rhs2_stmt = NULL;
2267 gimple conv1_stmt = NULL, conv2_stmt = NULL, conv_stmt;
2268 tree type, type1, type2, optype;
2269 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2270 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2271 optab this_optab;
2272 enum tree_code wmult_code;
2273 enum insn_code handler;
2274 enum machine_mode to_mode, from_mode, actual_mode;
2275 location_t loc = gimple_location (stmt);
2276 int actual_precision;
2277 bool from_unsigned1, from_unsigned2;
2278
2279 lhs = gimple_assign_lhs (stmt);
2280 type = TREE_TYPE (lhs);
2281 if (TREE_CODE (type) != INTEGER_TYPE
2282 && TREE_CODE (type) != FIXED_POINT_TYPE)
2283 return false;
2284
2285 if (code == MINUS_EXPR)
2286 wmult_code = WIDEN_MULT_MINUS_EXPR;
2287 else
2288 wmult_code = WIDEN_MULT_PLUS_EXPR;
2289
2290 rhs1 = gimple_assign_rhs1 (stmt);
2291 rhs2 = gimple_assign_rhs2 (stmt);
2292
2293 if (TREE_CODE (rhs1) == SSA_NAME)
2294 {
2295 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2296 if (is_gimple_assign (rhs1_stmt))
2297 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2298 }
2299
2300 if (TREE_CODE (rhs2) == SSA_NAME)
2301 {
2302 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2303 if (is_gimple_assign (rhs2_stmt))
2304 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2305 }
2306
2307 /* Allow for one conversion statement between the multiply
2308 and addition/subtraction statement. If there are more than
2309 one conversions then we assume they would invalidate this
2310 transformation. If that's not the case then they should have
2311 been folded before now. */
2312 if (CONVERT_EXPR_CODE_P (rhs1_code))
2313 {
2314 conv1_stmt = rhs1_stmt;
2315 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2316 if (TREE_CODE (rhs1) == SSA_NAME)
2317 {
2318 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2319 if (is_gimple_assign (rhs1_stmt))
2320 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2321 }
2322 else
2323 return false;
2324 }
2325 if (CONVERT_EXPR_CODE_P (rhs2_code))
2326 {
2327 conv2_stmt = rhs2_stmt;
2328 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
2329 if (TREE_CODE (rhs2) == SSA_NAME)
2330 {
2331 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2332 if (is_gimple_assign (rhs2_stmt))
2333 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2334 }
2335 else
2336 return false;
2337 }
2338
2339 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2340 is_widening_mult_p, but we still need the rhs returns.
2341
2342 It might also appear that it would be sufficient to use the existing
2343 operands of the widening multiply, but that would limit the choice of
2344 multiply-and-accumulate instructions. */
2345 if (code == PLUS_EXPR
2346 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
2347 {
2348 if (!is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2349 &type2, &mult_rhs2))
2350 return false;
2351 add_rhs = rhs2;
2352 conv_stmt = conv1_stmt;
2353 }
2354 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
2355 {
2356 if (!is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2357 &type2, &mult_rhs2))
2358 return false;
2359 add_rhs = rhs1;
2360 conv_stmt = conv2_stmt;
2361 }
2362 else
2363 return false;
2364
2365 to_mode = TYPE_MODE (type);
2366 from_mode = TYPE_MODE (type1);
2367 from_unsigned1 = TYPE_UNSIGNED (type1);
2368 from_unsigned2 = TYPE_UNSIGNED (type2);
2369 optype = type1;
2370
2371 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2372 if (from_unsigned1 != from_unsigned2)
2373 {
2374 if (!INTEGRAL_TYPE_P (type))
2375 return false;
2376 /* We can use a signed multiply with unsigned types as long as
2377 there is a wider mode to use, or it is the smaller of the two
2378 types that is unsigned. Note that type1 >= type2, always. */
2379 if ((from_unsigned1
2380 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2381 || (from_unsigned2
2382 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2383 {
2384 from_mode = GET_MODE_WIDER_MODE (from_mode);
2385 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
2386 return false;
2387 }
2388
2389 from_unsigned1 = from_unsigned2 = false;
2390 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
2391 false);
2392 }
2393
2394 /* If there was a conversion between the multiply and addition
2395 then we need to make sure it fits a multiply-and-accumulate.
2396 The should be a single mode change which does not change the
2397 value. */
2398 if (conv_stmt)
2399 {
2400 /* We use the original, unmodified data types for this. */
2401 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
2402 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
2403 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
2404 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
2405
2406 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
2407 {
2408 /* Conversion is a truncate. */
2409 if (TYPE_PRECISION (to_type) < data_size)
2410 return false;
2411 }
2412 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
2413 {
2414 /* Conversion is an extend. Check it's the right sort. */
2415 if (TYPE_UNSIGNED (from_type) != is_unsigned
2416 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
2417 return false;
2418 }
2419 /* else convert is a no-op for our purposes. */
2420 }
2421
2422 /* Verify that the machine can perform a widening multiply
2423 accumulate in this mode/signedness combination, otherwise
2424 this transformation is likely to pessimize code. */
2425 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
2426 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
2427 from_mode, 0, &actual_mode);
2428
2429 if (handler == CODE_FOR_nothing)
2430 return false;
2431
2432 /* Ensure that the inputs to the handler are in the correct precison
2433 for the opcode. This will be the full mode size. */
2434 actual_precision = GET_MODE_PRECISION (actual_mode);
2435 if (actual_precision != TYPE_PRECISION (type1)
2436 || from_unsigned1 != TYPE_UNSIGNED (type1))
2437 mult_rhs1 = build_and_insert_cast (gsi, loc,
2438 build_nonstandard_integer_type
2439 (actual_precision, from_unsigned1),
2440 mult_rhs1);
2441 if (actual_precision != TYPE_PRECISION (type2)
2442 || from_unsigned2 != TYPE_UNSIGNED (type2))
2443 mult_rhs2 = build_and_insert_cast (gsi, loc,
2444 build_nonstandard_integer_type
2445 (actual_precision, from_unsigned2),
2446 mult_rhs2);
2447
2448 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
2449 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
2450
2451 /* Handle constants. */
2452 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
2453 mult_rhs1 = fold_convert (type1, mult_rhs1);
2454 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
2455 mult_rhs2 = fold_convert (type2, mult_rhs2);
2456
2457 gimple_assign_set_rhs_with_ops_1 (gsi, wmult_code, mult_rhs1, mult_rhs2,
2458 add_rhs);
2459 update_stmt (gsi_stmt (*gsi));
2460 widen_mul_stats.maccs_inserted++;
2461 return true;
2462 }
2463
2464 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2465 with uses in additions and subtractions to form fused multiply-add
2466 operations. Returns true if successful and MUL_STMT should be removed. */
2467
2468 static bool
2469 convert_mult_to_fma (gimple mul_stmt, tree op1, tree op2)
2470 {
2471 tree mul_result = gimple_get_lhs (mul_stmt);
2472 tree type = TREE_TYPE (mul_result);
2473 gimple use_stmt, neguse_stmt, fma_stmt;
2474 use_operand_p use_p;
2475 imm_use_iterator imm_iter;
2476
2477 if (FLOAT_TYPE_P (type)
2478 && flag_fp_contract_mode == FP_CONTRACT_OFF)
2479 return false;
2480
2481 /* We don't want to do bitfield reduction ops. */
2482 if (INTEGRAL_TYPE_P (type)
2483 && (TYPE_PRECISION (type)
2484 != GET_MODE_PRECISION (TYPE_MODE (type))))
2485 return false;
2486
2487 /* If the target doesn't support it, don't generate it. We assume that
2488 if fma isn't available then fms, fnma or fnms are not either. */
2489 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
2490 return false;
2491
2492 /* If the multiplication has zero uses, it is kept around probably because
2493 of -fnon-call-exceptions. Don't optimize it away in that case,
2494 it is DCE job. */
2495 if (has_zero_uses (mul_result))
2496 return false;
2497
2498 /* Make sure that the multiplication statement becomes dead after
2499 the transformation, thus that all uses are transformed to FMAs.
2500 This means we assume that an FMA operation has the same cost
2501 as an addition. */
2502 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
2503 {
2504 enum tree_code use_code;
2505 tree result = mul_result;
2506 bool negate_p = false;
2507
2508 use_stmt = USE_STMT (use_p);
2509
2510 if (is_gimple_debug (use_stmt))
2511 continue;
2512
2513 /* For now restrict this operations to single basic blocks. In theory
2514 we would want to support sinking the multiplication in
2515 m = a*b;
2516 if ()
2517 ma = m + c;
2518 else
2519 d = m;
2520 to form a fma in the then block and sink the multiplication to the
2521 else block. */
2522 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2523 return false;
2524
2525 if (!is_gimple_assign (use_stmt))
2526 return false;
2527
2528 use_code = gimple_assign_rhs_code (use_stmt);
2529
2530 /* A negate on the multiplication leads to FNMA. */
2531 if (use_code == NEGATE_EXPR)
2532 {
2533 ssa_op_iter iter;
2534 use_operand_p usep;
2535
2536 result = gimple_assign_lhs (use_stmt);
2537
2538 /* Make sure the negate statement becomes dead with this
2539 single transformation. */
2540 if (!single_imm_use (gimple_assign_lhs (use_stmt),
2541 &use_p, &neguse_stmt))
2542 return false;
2543
2544 /* Make sure the multiplication isn't also used on that stmt. */
2545 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
2546 if (USE_FROM_PTR (usep) == mul_result)
2547 return false;
2548
2549 /* Re-validate. */
2550 use_stmt = neguse_stmt;
2551 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2552 return false;
2553 if (!is_gimple_assign (use_stmt))
2554 return false;
2555
2556 use_code = gimple_assign_rhs_code (use_stmt);
2557 negate_p = true;
2558 }
2559
2560 switch (use_code)
2561 {
2562 case MINUS_EXPR:
2563 if (gimple_assign_rhs2 (use_stmt) == result)
2564 negate_p = !negate_p;
2565 break;
2566 case PLUS_EXPR:
2567 break;
2568 default:
2569 /* FMA can only be formed from PLUS and MINUS. */
2570 return false;
2571 }
2572
2573 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
2574 by a MULT_EXPR that we'll visit later, we might be able to
2575 get a more profitable match with fnma.
2576 OTOH, if we don't, a negate / fma pair has likely lower latency
2577 that a mult / subtract pair. */
2578 if (use_code == MINUS_EXPR && !negate_p
2579 && gimple_assign_rhs1 (use_stmt) == result
2580 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
2581 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
2582 {
2583 tree rhs2 = gimple_assign_rhs2 (use_stmt);
2584 gimple stmt2 = SSA_NAME_DEF_STMT (rhs2);
2585
2586 if (has_single_use (rhs2)
2587 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
2588 return false;
2589 }
2590
2591 /* We can't handle a * b + a * b. */
2592 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
2593 return false;
2594
2595 /* While it is possible to validate whether or not the exact form
2596 that we've recognized is available in the backend, the assumption
2597 is that the transformation is never a loss. For instance, suppose
2598 the target only has the plain FMA pattern available. Consider
2599 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
2600 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
2601 still have 3 operations, but in the FMA form the two NEGs are
2602 independent and could be run in parallel. */
2603 }
2604
2605 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
2606 {
2607 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
2608 enum tree_code use_code;
2609 tree addop, mulop1 = op1, result = mul_result;
2610 bool negate_p = false;
2611
2612 if (is_gimple_debug (use_stmt))
2613 continue;
2614
2615 use_code = gimple_assign_rhs_code (use_stmt);
2616 if (use_code == NEGATE_EXPR)
2617 {
2618 result = gimple_assign_lhs (use_stmt);
2619 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
2620 gsi_remove (&gsi, true);
2621 release_defs (use_stmt);
2622
2623 use_stmt = neguse_stmt;
2624 gsi = gsi_for_stmt (use_stmt);
2625 use_code = gimple_assign_rhs_code (use_stmt);
2626 negate_p = true;
2627 }
2628
2629 if (gimple_assign_rhs1 (use_stmt) == result)
2630 {
2631 addop = gimple_assign_rhs2 (use_stmt);
2632 /* a * b - c -> a * b + (-c) */
2633 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2634 addop = force_gimple_operand_gsi (&gsi,
2635 build1 (NEGATE_EXPR,
2636 type, addop),
2637 true, NULL_TREE, true,
2638 GSI_SAME_STMT);
2639 }
2640 else
2641 {
2642 addop = gimple_assign_rhs1 (use_stmt);
2643 /* a - b * c -> (-b) * c + a */
2644 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
2645 negate_p = !negate_p;
2646 }
2647
2648 if (negate_p)
2649 mulop1 = force_gimple_operand_gsi (&gsi,
2650 build1 (NEGATE_EXPR,
2651 type, mulop1),
2652 true, NULL_TREE, true,
2653 GSI_SAME_STMT);
2654
2655 fma_stmt = gimple_build_assign_with_ops (FMA_EXPR,
2656 gimple_assign_lhs (use_stmt),
2657 mulop1, op2,
2658 addop);
2659 gsi_replace (&gsi, fma_stmt, true);
2660 widen_mul_stats.fmas_inserted++;
2661 }
2662
2663 return true;
2664 }
2665
2666 /* Find integer multiplications where the operands are extended from
2667 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
2668 where appropriate. */
2669
2670 static unsigned int
2671 execute_optimize_widening_mul (void)
2672 {
2673 basic_block bb;
2674 bool cfg_changed = false;
2675
2676 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
2677
2678 FOR_EACH_BB (bb)
2679 {
2680 gimple_stmt_iterator gsi;
2681
2682 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
2683 {
2684 gimple stmt = gsi_stmt (gsi);
2685 enum tree_code code;
2686
2687 if (is_gimple_assign (stmt))
2688 {
2689 code = gimple_assign_rhs_code (stmt);
2690 switch (code)
2691 {
2692 case MULT_EXPR:
2693 if (!convert_mult_to_widen (stmt, &gsi)
2694 && convert_mult_to_fma (stmt,
2695 gimple_assign_rhs1 (stmt),
2696 gimple_assign_rhs2 (stmt)))
2697 {
2698 gsi_remove (&gsi, true);
2699 release_defs (stmt);
2700 continue;
2701 }
2702 break;
2703
2704 case PLUS_EXPR:
2705 case MINUS_EXPR:
2706 convert_plusminus_to_widen (&gsi, stmt, code);
2707 break;
2708
2709 default:;
2710 }
2711 }
2712 else if (is_gimple_call (stmt)
2713 && gimple_call_lhs (stmt))
2714 {
2715 tree fndecl = gimple_call_fndecl (stmt);
2716 if (fndecl
2717 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
2718 {
2719 switch (DECL_FUNCTION_CODE (fndecl))
2720 {
2721 case BUILT_IN_POWF:
2722 case BUILT_IN_POW:
2723 case BUILT_IN_POWL:
2724 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
2725 && REAL_VALUES_EQUAL
2726 (TREE_REAL_CST (gimple_call_arg (stmt, 1)),
2727 dconst2)
2728 && convert_mult_to_fma (stmt,
2729 gimple_call_arg (stmt, 0),
2730 gimple_call_arg (stmt, 0)))
2731 {
2732 unlink_stmt_vdef (stmt);
2733 if (gsi_remove (&gsi, true)
2734 && gimple_purge_dead_eh_edges (bb))
2735 cfg_changed = true;
2736 release_defs (stmt);
2737 continue;
2738 }
2739 break;
2740
2741 default:;
2742 }
2743 }
2744 }
2745 gsi_next (&gsi);
2746 }
2747 }
2748
2749 statistics_counter_event (cfun, "widening multiplications inserted",
2750 widen_mul_stats.widen_mults_inserted);
2751 statistics_counter_event (cfun, "widening maccs inserted",
2752 widen_mul_stats.maccs_inserted);
2753 statistics_counter_event (cfun, "fused multiply-adds inserted",
2754 widen_mul_stats.fmas_inserted);
2755
2756 return cfg_changed ? TODO_cleanup_cfg : 0;
2757 }
2758
2759 static bool
2760 gate_optimize_widening_mul (void)
2761 {
2762 return flag_expensive_optimizations && optimize;
2763 }
2764
2765 struct gimple_opt_pass pass_optimize_widening_mul =
2766 {
2767 {
2768 GIMPLE_PASS,
2769 "widening_mul", /* name */
2770 OPTGROUP_NONE, /* optinfo_flags */
2771 gate_optimize_widening_mul, /* gate */
2772 execute_optimize_widening_mul, /* execute */
2773 NULL, /* sub */
2774 NULL, /* next */
2775 0, /* static_pass_number */
2776 TV_NONE, /* tv_id */
2777 PROP_ssa, /* properties_required */
2778 0, /* properties_provided */
2779 0, /* properties_destroyed */
2780 0, /* todo_flags_start */
2781 TODO_verify_ssa
2782 | TODO_verify_stmts
2783 | TODO_update_ssa /* todo_flags_finish */
2784 }
2785 };