tree-ssa-math-opts.c (find_bswap_or_nop_load): Check return value of init_symbolic_nu...
[gcc.git] / gcc / tree-ssa-math-opts.c
1 /* Global, SSA-based optimizations using mathematical identities.
2 Copyright (C) 2005-2014 Free Software Foundation, Inc.
3
4 This file is part of GCC.
5
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by the
8 Free Software Foundation; either version 3, or (at your option) any
9 later version.
10
11 GCC is distributed in the hope that it will be useful, but WITHOUT
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
19
20 /* Currently, the only mini-pass in this file tries to CSE reciprocal
21 operations. These are common in sequences such as this one:
22
23 modulus = sqrt(x*x + y*y + z*z);
24 x = x / modulus;
25 y = y / modulus;
26 z = z / modulus;
27
28 that can be optimized to
29
30 modulus = sqrt(x*x + y*y + z*z);
31 rmodulus = 1.0 / modulus;
32 x = x * rmodulus;
33 y = y * rmodulus;
34 z = z * rmodulus;
35
36 We do this for loop invariant divisors, and with this pass whenever
37 we notice that a division has the same divisor multiple times.
38
39 Of course, like in PRE, we don't insert a division if a dominator
40 already has one. However, this cannot be done as an extension of
41 PRE for several reasons.
42
43 First of all, with some experiments it was found out that the
44 transformation is not always useful if there are only two divisions
45 hy the same divisor. This is probably because modern processors
46 can pipeline the divisions; on older, in-order processors it should
47 still be effective to optimize two divisions by the same number.
48 We make this a param, and it shall be called N in the remainder of
49 this comment.
50
51 Second, if trapping math is active, we have less freedom on where
52 to insert divisions: we can only do so in basic blocks that already
53 contain one. (If divisions don't trap, instead, we can insert
54 divisions elsewhere, which will be in blocks that are common dominators
55 of those that have the division).
56
57 We really don't want to compute the reciprocal unless a division will
58 be found. To do this, we won't insert the division in a basic block
59 that has less than N divisions *post-dominating* it.
60
61 The algorithm constructs a subset of the dominator tree, holding the
62 blocks containing the divisions and the common dominators to them,
63 and walk it twice. The first walk is in post-order, and it annotates
64 each block with the number of divisions that post-dominate it: this
65 gives information on where divisions can be inserted profitably.
66 The second walk is in pre-order, and it inserts divisions as explained
67 above, and replaces divisions by multiplications.
68
69 In the best case, the cost of the pass is O(n_statements). In the
70 worst-case, the cost is due to creating the dominator tree subset,
71 with a cost of O(n_basic_blocks ^ 2); however this can only happen
72 for n_statements / n_basic_blocks statements. So, the amortized cost
73 of creating the dominator tree subset is O(n_basic_blocks) and the
74 worst-case cost of the pass is O(n_statements * n_basic_blocks).
75
76 More practically, the cost will be small because there are few
77 divisions, and they tend to be in the same basic block, so insert_bb
78 is called very few times.
79
80 If we did this using domwalk.c, an efficient implementation would have
81 to work on all the variables in a single pass, because we could not
82 work on just a subset of the dominator tree, as we do now, and the
83 cost would also be something like O(n_statements * n_basic_blocks).
84 The data structures would be more complex in order to work on all the
85 variables in a single pass. */
86
87 #include "config.h"
88 #include "system.h"
89 #include "coretypes.h"
90 #include "tm.h"
91 #include "flags.h"
92 #include "tree.h"
93 #include "basic-block.h"
94 #include "tree-ssa-alias.h"
95 #include "internal-fn.h"
96 #include "gimple-fold.h"
97 #include "gimple-expr.h"
98 #include "is-a.h"
99 #include "gimple.h"
100 #include "gimple-iterator.h"
101 #include "gimplify.h"
102 #include "gimplify-me.h"
103 #include "stor-layout.h"
104 #include "gimple-ssa.h"
105 #include "tree-cfg.h"
106 #include "tree-phinodes.h"
107 #include "ssa-iterators.h"
108 #include "stringpool.h"
109 #include "tree-ssanames.h"
110 #include "expr.h"
111 #include "tree-dfa.h"
112 #include "tree-ssa.h"
113 #include "tree-pass.h"
114 #include "alloc-pool.h"
115 #include "target.h"
116 #include "gimple-pretty-print.h"
117 #include "builtins.h"
118
119 /* FIXME: RTL headers have to be included here for optabs. */
120 #include "rtl.h" /* Because optabs.h wants enum rtx_code. */
121 #include "expr.h" /* Because optabs.h wants sepops. */
122 #include "optabs.h"
123
124 /* This structure represents one basic block that either computes a
125 division, or is a common dominator for basic block that compute a
126 division. */
127 struct occurrence {
128 /* The basic block represented by this structure. */
129 basic_block bb;
130
131 /* If non-NULL, the SSA_NAME holding the definition for a reciprocal
132 inserted in BB. */
133 tree recip_def;
134
135 /* If non-NULL, the GIMPLE_ASSIGN for a reciprocal computation that
136 was inserted in BB. */
137 gimple recip_def_stmt;
138
139 /* Pointer to a list of "struct occurrence"s for blocks dominated
140 by BB. */
141 struct occurrence *children;
142
143 /* Pointer to the next "struct occurrence"s in the list of blocks
144 sharing a common dominator. */
145 struct occurrence *next;
146
147 /* The number of divisions that are in BB before compute_merit. The
148 number of divisions that are in BB or post-dominate it after
149 compute_merit. */
150 int num_divisions;
151
152 /* True if the basic block has a division, false if it is a common
153 dominator for basic blocks that do. If it is false and trapping
154 math is active, BB is not a candidate for inserting a reciprocal. */
155 bool bb_has_division;
156 };
157
158 static struct
159 {
160 /* Number of 1.0/X ops inserted. */
161 int rdivs_inserted;
162
163 /* Number of 1.0/FUNC ops inserted. */
164 int rfuncs_inserted;
165 } reciprocal_stats;
166
167 static struct
168 {
169 /* Number of cexpi calls inserted. */
170 int inserted;
171 } sincos_stats;
172
173 static struct
174 {
175 /* Number of hand-written 16-bit nop / bswaps found. */
176 int found_16bit;
177
178 /* Number of hand-written 32-bit nop / bswaps found. */
179 int found_32bit;
180
181 /* Number of hand-written 64-bit nop / bswaps found. */
182 int found_64bit;
183 } nop_stats, bswap_stats;
184
185 static struct
186 {
187 /* Number of widening multiplication ops inserted. */
188 int widen_mults_inserted;
189
190 /* Number of integer multiply-and-accumulate ops inserted. */
191 int maccs_inserted;
192
193 /* Number of fp fused multiply-add ops inserted. */
194 int fmas_inserted;
195 } widen_mul_stats;
196
197 /* The instance of "struct occurrence" representing the highest
198 interesting block in the dominator tree. */
199 static struct occurrence *occ_head;
200
201 /* Allocation pool for getting instances of "struct occurrence". */
202 static alloc_pool occ_pool;
203
204
205
206 /* Allocate and return a new struct occurrence for basic block BB, and
207 whose children list is headed by CHILDREN. */
208 static struct occurrence *
209 occ_new (basic_block bb, struct occurrence *children)
210 {
211 struct occurrence *occ;
212
213 bb->aux = occ = (struct occurrence *) pool_alloc (occ_pool);
214 memset (occ, 0, sizeof (struct occurrence));
215
216 occ->bb = bb;
217 occ->children = children;
218 return occ;
219 }
220
221
222 /* Insert NEW_OCC into our subset of the dominator tree. P_HEAD points to a
223 list of "struct occurrence"s, one per basic block, having IDOM as
224 their common dominator.
225
226 We try to insert NEW_OCC as deep as possible in the tree, and we also
227 insert any other block that is a common dominator for BB and one
228 block already in the tree. */
229
230 static void
231 insert_bb (struct occurrence *new_occ, basic_block idom,
232 struct occurrence **p_head)
233 {
234 struct occurrence *occ, **p_occ;
235
236 for (p_occ = p_head; (occ = *p_occ) != NULL; )
237 {
238 basic_block bb = new_occ->bb, occ_bb = occ->bb;
239 basic_block dom = nearest_common_dominator (CDI_DOMINATORS, occ_bb, bb);
240 if (dom == bb)
241 {
242 /* BB dominates OCC_BB. OCC becomes NEW_OCC's child: remove OCC
243 from its list. */
244 *p_occ = occ->next;
245 occ->next = new_occ->children;
246 new_occ->children = occ;
247
248 /* Try the next block (it may as well be dominated by BB). */
249 }
250
251 else if (dom == occ_bb)
252 {
253 /* OCC_BB dominates BB. Tail recurse to look deeper. */
254 insert_bb (new_occ, dom, &occ->children);
255 return;
256 }
257
258 else if (dom != idom)
259 {
260 gcc_assert (!dom->aux);
261
262 /* There is a dominator between IDOM and BB, add it and make
263 two children out of NEW_OCC and OCC. First, remove OCC from
264 its list. */
265 *p_occ = occ->next;
266 new_occ->next = occ;
267 occ->next = NULL;
268
269 /* None of the previous blocks has DOM as a dominator: if we tail
270 recursed, we would reexamine them uselessly. Just switch BB with
271 DOM, and go on looking for blocks dominated by DOM. */
272 new_occ = occ_new (dom, new_occ);
273 }
274
275 else
276 {
277 /* Nothing special, go on with the next element. */
278 p_occ = &occ->next;
279 }
280 }
281
282 /* No place was found as a child of IDOM. Make BB a sibling of IDOM. */
283 new_occ->next = *p_head;
284 *p_head = new_occ;
285 }
286
287 /* Register that we found a division in BB. */
288
289 static inline void
290 register_division_in (basic_block bb)
291 {
292 struct occurrence *occ;
293
294 occ = (struct occurrence *) bb->aux;
295 if (!occ)
296 {
297 occ = occ_new (bb, NULL);
298 insert_bb (occ, ENTRY_BLOCK_PTR_FOR_FN (cfun), &occ_head);
299 }
300
301 occ->bb_has_division = true;
302 occ->num_divisions++;
303 }
304
305
306 /* Compute the number of divisions that postdominate each block in OCC and
307 its children. */
308
309 static void
310 compute_merit (struct occurrence *occ)
311 {
312 struct occurrence *occ_child;
313 basic_block dom = occ->bb;
314
315 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
316 {
317 basic_block bb;
318 if (occ_child->children)
319 compute_merit (occ_child);
320
321 if (flag_exceptions)
322 bb = single_noncomplex_succ (dom);
323 else
324 bb = dom;
325
326 if (dominated_by_p (CDI_POST_DOMINATORS, bb, occ_child->bb))
327 occ->num_divisions += occ_child->num_divisions;
328 }
329 }
330
331
332 /* Return whether USE_STMT is a floating-point division by DEF. */
333 static inline bool
334 is_division_by (gimple use_stmt, tree def)
335 {
336 return is_gimple_assign (use_stmt)
337 && gimple_assign_rhs_code (use_stmt) == RDIV_EXPR
338 && gimple_assign_rhs2 (use_stmt) == def
339 /* Do not recognize x / x as valid division, as we are getting
340 confused later by replacing all immediate uses x in such
341 a stmt. */
342 && gimple_assign_rhs1 (use_stmt) != def;
343 }
344
345 /* Walk the subset of the dominator tree rooted at OCC, setting the
346 RECIP_DEF field to a definition of 1.0 / DEF that can be used in
347 the given basic block. The field may be left NULL, of course,
348 if it is not possible or profitable to do the optimization.
349
350 DEF_BSI is an iterator pointing at the statement defining DEF.
351 If RECIP_DEF is set, a dominator already has a computation that can
352 be used. */
353
354 static void
355 insert_reciprocals (gimple_stmt_iterator *def_gsi, struct occurrence *occ,
356 tree def, tree recip_def, int threshold)
357 {
358 tree type;
359 gimple new_stmt;
360 gimple_stmt_iterator gsi;
361 struct occurrence *occ_child;
362
363 if (!recip_def
364 && (occ->bb_has_division || !flag_trapping_math)
365 && occ->num_divisions >= threshold)
366 {
367 /* Make a variable with the replacement and substitute it. */
368 type = TREE_TYPE (def);
369 recip_def = create_tmp_reg (type, "reciptmp");
370 new_stmt = gimple_build_assign_with_ops (RDIV_EXPR, recip_def,
371 build_one_cst (type), def);
372
373 if (occ->bb_has_division)
374 {
375 /* Case 1: insert before an existing division. */
376 gsi = gsi_after_labels (occ->bb);
377 while (!gsi_end_p (gsi) && !is_division_by (gsi_stmt (gsi), def))
378 gsi_next (&gsi);
379
380 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
381 }
382 else if (def_gsi && occ->bb == def_gsi->bb)
383 {
384 /* Case 2: insert right after the definition. Note that this will
385 never happen if the definition statement can throw, because in
386 that case the sole successor of the statement's basic block will
387 dominate all the uses as well. */
388 gsi_insert_after (def_gsi, new_stmt, GSI_NEW_STMT);
389 }
390 else
391 {
392 /* Case 3: insert in a basic block not containing defs/uses. */
393 gsi = gsi_after_labels (occ->bb);
394 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
395 }
396
397 reciprocal_stats.rdivs_inserted++;
398
399 occ->recip_def_stmt = new_stmt;
400 }
401
402 occ->recip_def = recip_def;
403 for (occ_child = occ->children; occ_child; occ_child = occ_child->next)
404 insert_reciprocals (def_gsi, occ_child, def, recip_def, threshold);
405 }
406
407
408 /* Replace the division at USE_P with a multiplication by the reciprocal, if
409 possible. */
410
411 static inline void
412 replace_reciprocal (use_operand_p use_p)
413 {
414 gimple use_stmt = USE_STMT (use_p);
415 basic_block bb = gimple_bb (use_stmt);
416 struct occurrence *occ = (struct occurrence *) bb->aux;
417
418 if (optimize_bb_for_speed_p (bb)
419 && occ->recip_def && use_stmt != occ->recip_def_stmt)
420 {
421 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
422 gimple_assign_set_rhs_code (use_stmt, MULT_EXPR);
423 SET_USE (use_p, occ->recip_def);
424 fold_stmt_inplace (&gsi);
425 update_stmt (use_stmt);
426 }
427 }
428
429
430 /* Free OCC and return one more "struct occurrence" to be freed. */
431
432 static struct occurrence *
433 free_bb (struct occurrence *occ)
434 {
435 struct occurrence *child, *next;
436
437 /* First get the two pointers hanging off OCC. */
438 next = occ->next;
439 child = occ->children;
440 occ->bb->aux = NULL;
441 pool_free (occ_pool, occ);
442
443 /* Now ensure that we don't recurse unless it is necessary. */
444 if (!child)
445 return next;
446 else
447 {
448 while (next)
449 next = free_bb (next);
450
451 return child;
452 }
453 }
454
455
456 /* Look for floating-point divisions among DEF's uses, and try to
457 replace them by multiplications with the reciprocal. Add
458 as many statements computing the reciprocal as needed.
459
460 DEF must be a GIMPLE register of a floating-point type. */
461
462 static void
463 execute_cse_reciprocals_1 (gimple_stmt_iterator *def_gsi, tree def)
464 {
465 use_operand_p use_p;
466 imm_use_iterator use_iter;
467 struct occurrence *occ;
468 int count = 0, threshold;
469
470 gcc_assert (FLOAT_TYPE_P (TREE_TYPE (def)) && is_gimple_reg (def));
471
472 FOR_EACH_IMM_USE_FAST (use_p, use_iter, def)
473 {
474 gimple use_stmt = USE_STMT (use_p);
475 if (is_division_by (use_stmt, def))
476 {
477 register_division_in (gimple_bb (use_stmt));
478 count++;
479 }
480 }
481
482 /* Do the expensive part only if we can hope to optimize something. */
483 threshold = targetm.min_divisions_for_recip_mul (TYPE_MODE (TREE_TYPE (def)));
484 if (count >= threshold)
485 {
486 gimple use_stmt;
487 for (occ = occ_head; occ; occ = occ->next)
488 {
489 compute_merit (occ);
490 insert_reciprocals (def_gsi, occ, def, NULL, threshold);
491 }
492
493 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, def)
494 {
495 if (is_division_by (use_stmt, def))
496 {
497 FOR_EACH_IMM_USE_ON_STMT (use_p, use_iter)
498 replace_reciprocal (use_p);
499 }
500 }
501 }
502
503 for (occ = occ_head; occ; )
504 occ = free_bb (occ);
505
506 occ_head = NULL;
507 }
508
509 /* Go through all the floating-point SSA_NAMEs, and call
510 execute_cse_reciprocals_1 on each of them. */
511 namespace {
512
513 const pass_data pass_data_cse_reciprocals =
514 {
515 GIMPLE_PASS, /* type */
516 "recip", /* name */
517 OPTGROUP_NONE, /* optinfo_flags */
518 true, /* has_execute */
519 TV_NONE, /* tv_id */
520 PROP_ssa, /* properties_required */
521 0, /* properties_provided */
522 0, /* properties_destroyed */
523 0, /* todo_flags_start */
524 TODO_update_ssa, /* todo_flags_finish */
525 };
526
527 class pass_cse_reciprocals : public gimple_opt_pass
528 {
529 public:
530 pass_cse_reciprocals (gcc::context *ctxt)
531 : gimple_opt_pass (pass_data_cse_reciprocals, ctxt)
532 {}
533
534 /* opt_pass methods: */
535 virtual bool gate (function *) { return optimize && flag_reciprocal_math; }
536 virtual unsigned int execute (function *);
537
538 }; // class pass_cse_reciprocals
539
540 unsigned int
541 pass_cse_reciprocals::execute (function *fun)
542 {
543 basic_block bb;
544 tree arg;
545
546 occ_pool = create_alloc_pool ("dominators for recip",
547 sizeof (struct occurrence),
548 n_basic_blocks_for_fn (fun) / 3 + 1);
549
550 memset (&reciprocal_stats, 0, sizeof (reciprocal_stats));
551 calculate_dominance_info (CDI_DOMINATORS);
552 calculate_dominance_info (CDI_POST_DOMINATORS);
553
554 #ifdef ENABLE_CHECKING
555 FOR_EACH_BB_FN (bb, fun)
556 gcc_assert (!bb->aux);
557 #endif
558
559 for (arg = DECL_ARGUMENTS (fun->decl); arg; arg = DECL_CHAIN (arg))
560 if (FLOAT_TYPE_P (TREE_TYPE (arg))
561 && is_gimple_reg (arg))
562 {
563 tree name = ssa_default_def (fun, arg);
564 if (name)
565 execute_cse_reciprocals_1 (NULL, name);
566 }
567
568 FOR_EACH_BB_FN (bb, fun)
569 {
570 gimple_stmt_iterator gsi;
571 gimple phi;
572 tree def;
573
574 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
575 {
576 phi = gsi_stmt (gsi);
577 def = PHI_RESULT (phi);
578 if (! virtual_operand_p (def)
579 && FLOAT_TYPE_P (TREE_TYPE (def)))
580 execute_cse_reciprocals_1 (NULL, def);
581 }
582
583 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
584 {
585 gimple stmt = gsi_stmt (gsi);
586
587 if (gimple_has_lhs (stmt)
588 && (def = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_DEF)) != NULL
589 && FLOAT_TYPE_P (TREE_TYPE (def))
590 && TREE_CODE (def) == SSA_NAME)
591 execute_cse_reciprocals_1 (&gsi, def);
592 }
593
594 if (optimize_bb_for_size_p (bb))
595 continue;
596
597 /* Scan for a/func(b) and convert it to reciprocal a*rfunc(b). */
598 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
599 {
600 gimple stmt = gsi_stmt (gsi);
601 tree fndecl;
602
603 if (is_gimple_assign (stmt)
604 && gimple_assign_rhs_code (stmt) == RDIV_EXPR)
605 {
606 tree arg1 = gimple_assign_rhs2 (stmt);
607 gimple stmt1;
608
609 if (TREE_CODE (arg1) != SSA_NAME)
610 continue;
611
612 stmt1 = SSA_NAME_DEF_STMT (arg1);
613
614 if (is_gimple_call (stmt1)
615 && gimple_call_lhs (stmt1)
616 && (fndecl = gimple_call_fndecl (stmt1))
617 && (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
618 || DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD))
619 {
620 enum built_in_function code;
621 bool md_code, fail;
622 imm_use_iterator ui;
623 use_operand_p use_p;
624
625 code = DECL_FUNCTION_CODE (fndecl);
626 md_code = DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD;
627
628 fndecl = targetm.builtin_reciprocal (code, md_code, false);
629 if (!fndecl)
630 continue;
631
632 /* Check that all uses of the SSA name are divisions,
633 otherwise replacing the defining statement will do
634 the wrong thing. */
635 fail = false;
636 FOR_EACH_IMM_USE_FAST (use_p, ui, arg1)
637 {
638 gimple stmt2 = USE_STMT (use_p);
639 if (is_gimple_debug (stmt2))
640 continue;
641 if (!is_gimple_assign (stmt2)
642 || gimple_assign_rhs_code (stmt2) != RDIV_EXPR
643 || gimple_assign_rhs1 (stmt2) == arg1
644 || gimple_assign_rhs2 (stmt2) != arg1)
645 {
646 fail = true;
647 break;
648 }
649 }
650 if (fail)
651 continue;
652
653 gimple_replace_ssa_lhs (stmt1, arg1);
654 gimple_call_set_fndecl (stmt1, fndecl);
655 update_stmt (stmt1);
656 reciprocal_stats.rfuncs_inserted++;
657
658 FOR_EACH_IMM_USE_STMT (stmt, ui, arg1)
659 {
660 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
661 gimple_assign_set_rhs_code (stmt, MULT_EXPR);
662 fold_stmt_inplace (&gsi);
663 update_stmt (stmt);
664 }
665 }
666 }
667 }
668 }
669
670 statistics_counter_event (fun, "reciprocal divs inserted",
671 reciprocal_stats.rdivs_inserted);
672 statistics_counter_event (fun, "reciprocal functions inserted",
673 reciprocal_stats.rfuncs_inserted);
674
675 free_dominance_info (CDI_DOMINATORS);
676 free_dominance_info (CDI_POST_DOMINATORS);
677 free_alloc_pool (occ_pool);
678 return 0;
679 }
680
681 } // anon namespace
682
683 gimple_opt_pass *
684 make_pass_cse_reciprocals (gcc::context *ctxt)
685 {
686 return new pass_cse_reciprocals (ctxt);
687 }
688
689 /* Records an occurrence at statement USE_STMT in the vector of trees
690 STMTS if it is dominated by *TOP_BB or dominates it or this basic block
691 is not yet initialized. Returns true if the occurrence was pushed on
692 the vector. Adjusts *TOP_BB to be the basic block dominating all
693 statements in the vector. */
694
695 static bool
696 maybe_record_sincos (vec<gimple> *stmts,
697 basic_block *top_bb, gimple use_stmt)
698 {
699 basic_block use_bb = gimple_bb (use_stmt);
700 if (*top_bb
701 && (*top_bb == use_bb
702 || dominated_by_p (CDI_DOMINATORS, use_bb, *top_bb)))
703 stmts->safe_push (use_stmt);
704 else if (!*top_bb
705 || dominated_by_p (CDI_DOMINATORS, *top_bb, use_bb))
706 {
707 stmts->safe_push (use_stmt);
708 *top_bb = use_bb;
709 }
710 else
711 return false;
712
713 return true;
714 }
715
716 /* Look for sin, cos and cexpi calls with the same argument NAME and
717 create a single call to cexpi CSEing the result in this case.
718 We first walk over all immediate uses of the argument collecting
719 statements that we can CSE in a vector and in a second pass replace
720 the statement rhs with a REALPART or IMAGPART expression on the
721 result of the cexpi call we insert before the use statement that
722 dominates all other candidates. */
723
724 static bool
725 execute_cse_sincos_1 (tree name)
726 {
727 gimple_stmt_iterator gsi;
728 imm_use_iterator use_iter;
729 tree fndecl, res, type;
730 gimple def_stmt, use_stmt, stmt;
731 int seen_cos = 0, seen_sin = 0, seen_cexpi = 0;
732 vec<gimple> stmts = vNULL;
733 basic_block top_bb = NULL;
734 int i;
735 bool cfg_changed = false;
736
737 type = TREE_TYPE (name);
738 FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, name)
739 {
740 if (gimple_code (use_stmt) != GIMPLE_CALL
741 || !gimple_call_lhs (use_stmt)
742 || !(fndecl = gimple_call_fndecl (use_stmt))
743 || DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL)
744 continue;
745
746 switch (DECL_FUNCTION_CODE (fndecl))
747 {
748 CASE_FLT_FN (BUILT_IN_COS):
749 seen_cos |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
750 break;
751
752 CASE_FLT_FN (BUILT_IN_SIN):
753 seen_sin |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
754 break;
755
756 CASE_FLT_FN (BUILT_IN_CEXPI):
757 seen_cexpi |= maybe_record_sincos (&stmts, &top_bb, use_stmt) ? 1 : 0;
758 break;
759
760 default:;
761 }
762 }
763
764 if (seen_cos + seen_sin + seen_cexpi <= 1)
765 {
766 stmts.release ();
767 return false;
768 }
769
770 /* Simply insert cexpi at the beginning of top_bb but not earlier than
771 the name def statement. */
772 fndecl = mathfn_built_in (type, BUILT_IN_CEXPI);
773 if (!fndecl)
774 return false;
775 stmt = gimple_build_call (fndecl, 1, name);
776 res = make_temp_ssa_name (TREE_TYPE (TREE_TYPE (fndecl)), stmt, "sincostmp");
777 gimple_call_set_lhs (stmt, res);
778
779 def_stmt = SSA_NAME_DEF_STMT (name);
780 if (!SSA_NAME_IS_DEFAULT_DEF (name)
781 && gimple_code (def_stmt) != GIMPLE_PHI
782 && gimple_bb (def_stmt) == top_bb)
783 {
784 gsi = gsi_for_stmt (def_stmt);
785 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
786 }
787 else
788 {
789 gsi = gsi_after_labels (top_bb);
790 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
791 }
792 sincos_stats.inserted++;
793
794 /* And adjust the recorded old call sites. */
795 for (i = 0; stmts.iterate (i, &use_stmt); ++i)
796 {
797 tree rhs = NULL;
798 fndecl = gimple_call_fndecl (use_stmt);
799
800 switch (DECL_FUNCTION_CODE (fndecl))
801 {
802 CASE_FLT_FN (BUILT_IN_COS):
803 rhs = fold_build1 (REALPART_EXPR, type, res);
804 break;
805
806 CASE_FLT_FN (BUILT_IN_SIN):
807 rhs = fold_build1 (IMAGPART_EXPR, type, res);
808 break;
809
810 CASE_FLT_FN (BUILT_IN_CEXPI):
811 rhs = res;
812 break;
813
814 default:;
815 gcc_unreachable ();
816 }
817
818 /* Replace call with a copy. */
819 stmt = gimple_build_assign (gimple_call_lhs (use_stmt), rhs);
820
821 gsi = gsi_for_stmt (use_stmt);
822 gsi_replace (&gsi, stmt, true);
823 if (gimple_purge_dead_eh_edges (gimple_bb (stmt)))
824 cfg_changed = true;
825 }
826
827 stmts.release ();
828
829 return cfg_changed;
830 }
831
832 /* To evaluate powi(x,n), the floating point value x raised to the
833 constant integer exponent n, we use a hybrid algorithm that
834 combines the "window method" with look-up tables. For an
835 introduction to exponentiation algorithms and "addition chains",
836 see section 4.6.3, "Evaluation of Powers" of Donald E. Knuth,
837 "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming",
838 3rd Edition, 1998, and Daniel M. Gordon, "A Survey of Fast Exponentiation
839 Methods", Journal of Algorithms, Vol. 27, pp. 129-146, 1998. */
840
841 /* Provide a default value for POWI_MAX_MULTS, the maximum number of
842 multiplications to inline before calling the system library's pow
843 function. powi(x,n) requires at worst 2*bits(n)-2 multiplications,
844 so this default never requires calling pow, powf or powl. */
845
846 #ifndef POWI_MAX_MULTS
847 #define POWI_MAX_MULTS (2*HOST_BITS_PER_WIDE_INT-2)
848 #endif
849
850 /* The size of the "optimal power tree" lookup table. All
851 exponents less than this value are simply looked up in the
852 powi_table below. This threshold is also used to size the
853 cache of pseudo registers that hold intermediate results. */
854 #define POWI_TABLE_SIZE 256
855
856 /* The size, in bits of the window, used in the "window method"
857 exponentiation algorithm. This is equivalent to a radix of
858 (1<<POWI_WINDOW_SIZE) in the corresponding "m-ary method". */
859 #define POWI_WINDOW_SIZE 3
860
861 /* The following table is an efficient representation of an
862 "optimal power tree". For each value, i, the corresponding
863 value, j, in the table states than an optimal evaluation
864 sequence for calculating pow(x,i) can be found by evaluating
865 pow(x,j)*pow(x,i-j). An optimal power tree for the first
866 100 integers is given in Knuth's "Seminumerical algorithms". */
867
868 static const unsigned char powi_table[POWI_TABLE_SIZE] =
869 {
870 0, 1, 1, 2, 2, 3, 3, 4, /* 0 - 7 */
871 4, 6, 5, 6, 6, 10, 7, 9, /* 8 - 15 */
872 8, 16, 9, 16, 10, 12, 11, 13, /* 16 - 23 */
873 12, 17, 13, 18, 14, 24, 15, 26, /* 24 - 31 */
874 16, 17, 17, 19, 18, 33, 19, 26, /* 32 - 39 */
875 20, 25, 21, 40, 22, 27, 23, 44, /* 40 - 47 */
876 24, 32, 25, 34, 26, 29, 27, 44, /* 48 - 55 */
877 28, 31, 29, 34, 30, 60, 31, 36, /* 56 - 63 */
878 32, 64, 33, 34, 34, 46, 35, 37, /* 64 - 71 */
879 36, 65, 37, 50, 38, 48, 39, 69, /* 72 - 79 */
880 40, 49, 41, 43, 42, 51, 43, 58, /* 80 - 87 */
881 44, 64, 45, 47, 46, 59, 47, 76, /* 88 - 95 */
882 48, 65, 49, 66, 50, 67, 51, 66, /* 96 - 103 */
883 52, 70, 53, 74, 54, 104, 55, 74, /* 104 - 111 */
884 56, 64, 57, 69, 58, 78, 59, 68, /* 112 - 119 */
885 60, 61, 61, 80, 62, 75, 63, 68, /* 120 - 127 */
886 64, 65, 65, 128, 66, 129, 67, 90, /* 128 - 135 */
887 68, 73, 69, 131, 70, 94, 71, 88, /* 136 - 143 */
888 72, 128, 73, 98, 74, 132, 75, 121, /* 144 - 151 */
889 76, 102, 77, 124, 78, 132, 79, 106, /* 152 - 159 */
890 80, 97, 81, 160, 82, 99, 83, 134, /* 160 - 167 */
891 84, 86, 85, 95, 86, 160, 87, 100, /* 168 - 175 */
892 88, 113, 89, 98, 90, 107, 91, 122, /* 176 - 183 */
893 92, 111, 93, 102, 94, 126, 95, 150, /* 184 - 191 */
894 96, 128, 97, 130, 98, 133, 99, 195, /* 192 - 199 */
895 100, 128, 101, 123, 102, 164, 103, 138, /* 200 - 207 */
896 104, 145, 105, 146, 106, 109, 107, 149, /* 208 - 215 */
897 108, 200, 109, 146, 110, 170, 111, 157, /* 216 - 223 */
898 112, 128, 113, 130, 114, 182, 115, 132, /* 224 - 231 */
899 116, 200, 117, 132, 118, 158, 119, 206, /* 232 - 239 */
900 120, 240, 121, 162, 122, 147, 123, 152, /* 240 - 247 */
901 124, 166, 125, 214, 126, 138, 127, 153, /* 248 - 255 */
902 };
903
904
905 /* Return the number of multiplications required to calculate
906 powi(x,n) where n is less than POWI_TABLE_SIZE. This is a
907 subroutine of powi_cost. CACHE is an array indicating
908 which exponents have already been calculated. */
909
910 static int
911 powi_lookup_cost (unsigned HOST_WIDE_INT n, bool *cache)
912 {
913 /* If we've already calculated this exponent, then this evaluation
914 doesn't require any additional multiplications. */
915 if (cache[n])
916 return 0;
917
918 cache[n] = true;
919 return powi_lookup_cost (n - powi_table[n], cache)
920 + powi_lookup_cost (powi_table[n], cache) + 1;
921 }
922
923 /* Return the number of multiplications required to calculate
924 powi(x,n) for an arbitrary x, given the exponent N. This
925 function needs to be kept in sync with powi_as_mults below. */
926
927 static int
928 powi_cost (HOST_WIDE_INT n)
929 {
930 bool cache[POWI_TABLE_SIZE];
931 unsigned HOST_WIDE_INT digit;
932 unsigned HOST_WIDE_INT val;
933 int result;
934
935 if (n == 0)
936 return 0;
937
938 /* Ignore the reciprocal when calculating the cost. */
939 val = (n < 0) ? -n : n;
940
941 /* Initialize the exponent cache. */
942 memset (cache, 0, POWI_TABLE_SIZE * sizeof (bool));
943 cache[1] = true;
944
945 result = 0;
946
947 while (val >= POWI_TABLE_SIZE)
948 {
949 if (val & 1)
950 {
951 digit = val & ((1 << POWI_WINDOW_SIZE) - 1);
952 result += powi_lookup_cost (digit, cache)
953 + POWI_WINDOW_SIZE + 1;
954 val >>= POWI_WINDOW_SIZE;
955 }
956 else
957 {
958 val >>= 1;
959 result++;
960 }
961 }
962
963 return result + powi_lookup_cost (val, cache);
964 }
965
966 /* Recursive subroutine of powi_as_mults. This function takes the
967 array, CACHE, of already calculated exponents and an exponent N and
968 returns a tree that corresponds to CACHE[1]**N, with type TYPE. */
969
970 static tree
971 powi_as_mults_1 (gimple_stmt_iterator *gsi, location_t loc, tree type,
972 HOST_WIDE_INT n, tree *cache)
973 {
974 tree op0, op1, ssa_target;
975 unsigned HOST_WIDE_INT digit;
976 gimple mult_stmt;
977
978 if (n < POWI_TABLE_SIZE && cache[n])
979 return cache[n];
980
981 ssa_target = make_temp_ssa_name (type, NULL, "powmult");
982
983 if (n < POWI_TABLE_SIZE)
984 {
985 cache[n] = ssa_target;
986 op0 = powi_as_mults_1 (gsi, loc, type, n - powi_table[n], cache);
987 op1 = powi_as_mults_1 (gsi, loc, type, powi_table[n], cache);
988 }
989 else if (n & 1)
990 {
991 digit = n & ((1 << POWI_WINDOW_SIZE) - 1);
992 op0 = powi_as_mults_1 (gsi, loc, type, n - digit, cache);
993 op1 = powi_as_mults_1 (gsi, loc, type, digit, cache);
994 }
995 else
996 {
997 op0 = powi_as_mults_1 (gsi, loc, type, n >> 1, cache);
998 op1 = op0;
999 }
1000
1001 mult_stmt = gimple_build_assign_with_ops (MULT_EXPR, ssa_target, op0, op1);
1002 gimple_set_location (mult_stmt, loc);
1003 gsi_insert_before (gsi, mult_stmt, GSI_SAME_STMT);
1004
1005 return ssa_target;
1006 }
1007
1008 /* Convert ARG0**N to a tree of multiplications of ARG0 with itself.
1009 This function needs to be kept in sync with powi_cost above. */
1010
1011 static tree
1012 powi_as_mults (gimple_stmt_iterator *gsi, location_t loc,
1013 tree arg0, HOST_WIDE_INT n)
1014 {
1015 tree cache[POWI_TABLE_SIZE], result, type = TREE_TYPE (arg0);
1016 gimple div_stmt;
1017 tree target;
1018
1019 if (n == 0)
1020 return build_real (type, dconst1);
1021
1022 memset (cache, 0, sizeof (cache));
1023 cache[1] = arg0;
1024
1025 result = powi_as_mults_1 (gsi, loc, type, (n < 0) ? -n : n, cache);
1026 if (n >= 0)
1027 return result;
1028
1029 /* If the original exponent was negative, reciprocate the result. */
1030 target = make_temp_ssa_name (type, NULL, "powmult");
1031 div_stmt = gimple_build_assign_with_ops (RDIV_EXPR, target,
1032 build_real (type, dconst1),
1033 result);
1034 gimple_set_location (div_stmt, loc);
1035 gsi_insert_before (gsi, div_stmt, GSI_SAME_STMT);
1036
1037 return target;
1038 }
1039
1040 /* ARG0 and N are the two arguments to a powi builtin in GSI with
1041 location info LOC. If the arguments are appropriate, create an
1042 equivalent sequence of statements prior to GSI using an optimal
1043 number of multiplications, and return an expession holding the
1044 result. */
1045
1046 static tree
1047 gimple_expand_builtin_powi (gimple_stmt_iterator *gsi, location_t loc,
1048 tree arg0, HOST_WIDE_INT n)
1049 {
1050 /* Avoid largest negative number. */
1051 if (n != -n
1052 && ((n >= -1 && n <= 2)
1053 || (optimize_function_for_speed_p (cfun)
1054 && powi_cost (n) <= POWI_MAX_MULTS)))
1055 return powi_as_mults (gsi, loc, arg0, n);
1056
1057 return NULL_TREE;
1058 }
1059
1060 /* Build a gimple call statement that calls FN with argument ARG.
1061 Set the lhs of the call statement to a fresh SSA name. Insert the
1062 statement prior to GSI's current position, and return the fresh
1063 SSA name. */
1064
1065 static tree
1066 build_and_insert_call (gimple_stmt_iterator *gsi, location_t loc,
1067 tree fn, tree arg)
1068 {
1069 gimple call_stmt;
1070 tree ssa_target;
1071
1072 call_stmt = gimple_build_call (fn, 1, arg);
1073 ssa_target = make_temp_ssa_name (TREE_TYPE (arg), NULL, "powroot");
1074 gimple_set_lhs (call_stmt, ssa_target);
1075 gimple_set_location (call_stmt, loc);
1076 gsi_insert_before (gsi, call_stmt, GSI_SAME_STMT);
1077
1078 return ssa_target;
1079 }
1080
1081 /* Build a gimple binary operation with the given CODE and arguments
1082 ARG0, ARG1, assigning the result to a new SSA name for variable
1083 TARGET. Insert the statement prior to GSI's current position, and
1084 return the fresh SSA name.*/
1085
1086 static tree
1087 build_and_insert_binop (gimple_stmt_iterator *gsi, location_t loc,
1088 const char *name, enum tree_code code,
1089 tree arg0, tree arg1)
1090 {
1091 tree result = make_temp_ssa_name (TREE_TYPE (arg0), NULL, name);
1092 gimple stmt = gimple_build_assign_with_ops (code, result, arg0, arg1);
1093 gimple_set_location (stmt, loc);
1094 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1095 return result;
1096 }
1097
1098 /* Build a gimple reference operation with the given CODE and argument
1099 ARG, assigning the result to a new SSA name of TYPE with NAME.
1100 Insert the statement prior to GSI's current position, and return
1101 the fresh SSA name. */
1102
1103 static inline tree
1104 build_and_insert_ref (gimple_stmt_iterator *gsi, location_t loc, tree type,
1105 const char *name, enum tree_code code, tree arg0)
1106 {
1107 tree result = make_temp_ssa_name (type, NULL, name);
1108 gimple stmt = gimple_build_assign (result, build1 (code, type, arg0));
1109 gimple_set_location (stmt, loc);
1110 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1111 return result;
1112 }
1113
1114 /* Build a gimple assignment to cast VAL to TYPE. Insert the statement
1115 prior to GSI's current position, and return the fresh SSA name. */
1116
1117 static tree
1118 build_and_insert_cast (gimple_stmt_iterator *gsi, location_t loc,
1119 tree type, tree val)
1120 {
1121 tree result = make_ssa_name (type, NULL);
1122 gimple stmt = gimple_build_assign_with_ops (NOP_EXPR, result, val, NULL_TREE);
1123 gimple_set_location (stmt, loc);
1124 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
1125 return result;
1126 }
1127
1128 /* ARG0 and ARG1 are the two arguments to a pow builtin call in GSI
1129 with location info LOC. If possible, create an equivalent and
1130 less expensive sequence of statements prior to GSI, and return an
1131 expession holding the result. */
1132
1133 static tree
1134 gimple_expand_builtin_pow (gimple_stmt_iterator *gsi, location_t loc,
1135 tree arg0, tree arg1)
1136 {
1137 REAL_VALUE_TYPE c, cint, dconst1_4, dconst3_4, dconst1_3, dconst1_6;
1138 REAL_VALUE_TYPE c2, dconst3;
1139 HOST_WIDE_INT n;
1140 tree type, sqrtfn, cbrtfn, sqrt_arg0, sqrt_sqrt, result, cbrt_x, powi_cbrt_x;
1141 enum machine_mode mode;
1142 bool hw_sqrt_exists, c_is_int, c2_is_int;
1143
1144 /* If the exponent isn't a constant, there's nothing of interest
1145 to be done. */
1146 if (TREE_CODE (arg1) != REAL_CST)
1147 return NULL_TREE;
1148
1149 /* If the exponent is equivalent to an integer, expand to an optimal
1150 multiplication sequence when profitable. */
1151 c = TREE_REAL_CST (arg1);
1152 n = real_to_integer (&c);
1153 real_from_integer (&cint, VOIDmode, n, SIGNED);
1154 c_is_int = real_identical (&c, &cint);
1155
1156 if (c_is_int
1157 && ((n >= -1 && n <= 2)
1158 || (flag_unsafe_math_optimizations
1159 && optimize_bb_for_speed_p (gsi_bb (*gsi))
1160 && powi_cost (n) <= POWI_MAX_MULTS)))
1161 return gimple_expand_builtin_powi (gsi, loc, arg0, n);
1162
1163 /* Attempt various optimizations using sqrt and cbrt. */
1164 type = TREE_TYPE (arg0);
1165 mode = TYPE_MODE (type);
1166 sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1167
1168 /* Optimize pow(x,0.5) = sqrt(x). This replacement is always safe
1169 unless signed zeros must be maintained. pow(-0,0.5) = +0, while
1170 sqrt(-0) = -0. */
1171 if (sqrtfn
1172 && REAL_VALUES_EQUAL (c, dconsthalf)
1173 && !HONOR_SIGNED_ZEROS (mode))
1174 return build_and_insert_call (gsi, loc, sqrtfn, arg0);
1175
1176 /* Optimize pow(x,0.25) = sqrt(sqrt(x)). Assume on most machines that
1177 a builtin sqrt instruction is smaller than a call to pow with 0.25,
1178 so do this optimization even if -Os. Don't do this optimization
1179 if we don't have a hardware sqrt insn. */
1180 dconst1_4 = dconst1;
1181 SET_REAL_EXP (&dconst1_4, REAL_EXP (&dconst1_4) - 2);
1182 hw_sqrt_exists = optab_handler (sqrt_optab, mode) != CODE_FOR_nothing;
1183
1184 if (flag_unsafe_math_optimizations
1185 && sqrtfn
1186 && REAL_VALUES_EQUAL (c, dconst1_4)
1187 && hw_sqrt_exists)
1188 {
1189 /* sqrt(x) */
1190 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1191
1192 /* sqrt(sqrt(x)) */
1193 return build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
1194 }
1195
1196 /* Optimize pow(x,0.75) = sqrt(x) * sqrt(sqrt(x)) unless we are
1197 optimizing for space. Don't do this optimization if we don't have
1198 a hardware sqrt insn. */
1199 real_from_integer (&dconst3_4, VOIDmode, 3, SIGNED);
1200 SET_REAL_EXP (&dconst3_4, REAL_EXP (&dconst3_4) - 2);
1201
1202 if (flag_unsafe_math_optimizations
1203 && sqrtfn
1204 && optimize_function_for_speed_p (cfun)
1205 && REAL_VALUES_EQUAL (c, dconst3_4)
1206 && hw_sqrt_exists)
1207 {
1208 /* sqrt(x) */
1209 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1210
1211 /* sqrt(sqrt(x)) */
1212 sqrt_sqrt = build_and_insert_call (gsi, loc, sqrtfn, sqrt_arg0);
1213
1214 /* sqrt(x) * sqrt(sqrt(x)) */
1215 return build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1216 sqrt_arg0, sqrt_sqrt);
1217 }
1218
1219 /* Optimize pow(x,1./3.) = cbrt(x). This requires unsafe math
1220 optimizations since 1./3. is not exactly representable. If x
1221 is negative and finite, the correct value of pow(x,1./3.) is
1222 a NaN with the "invalid" exception raised, because the value
1223 of 1./3. actually has an even denominator. The correct value
1224 of cbrt(x) is a negative real value. */
1225 cbrtfn = mathfn_built_in (type, BUILT_IN_CBRT);
1226 dconst1_3 = real_value_truncate (mode, dconst_third ());
1227
1228 if (flag_unsafe_math_optimizations
1229 && cbrtfn
1230 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1231 && REAL_VALUES_EQUAL (c, dconst1_3))
1232 return build_and_insert_call (gsi, loc, cbrtfn, arg0);
1233
1234 /* Optimize pow(x,1./6.) = cbrt(sqrt(x)). Don't do this optimization
1235 if we don't have a hardware sqrt insn. */
1236 dconst1_6 = dconst1_3;
1237 SET_REAL_EXP (&dconst1_6, REAL_EXP (&dconst1_6) - 1);
1238
1239 if (flag_unsafe_math_optimizations
1240 && sqrtfn
1241 && cbrtfn
1242 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1243 && optimize_function_for_speed_p (cfun)
1244 && hw_sqrt_exists
1245 && REAL_VALUES_EQUAL (c, dconst1_6))
1246 {
1247 /* sqrt(x) */
1248 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1249
1250 /* cbrt(sqrt(x)) */
1251 return build_and_insert_call (gsi, loc, cbrtfn, sqrt_arg0);
1252 }
1253
1254 /* Optimize pow(x,c), where n = 2c for some nonzero integer n
1255 and c not an integer, into
1256
1257 sqrt(x) * powi(x, n/2), n > 0;
1258 1.0 / (sqrt(x) * powi(x, abs(n/2))), n < 0.
1259
1260 Do not calculate the powi factor when n/2 = 0. */
1261 real_arithmetic (&c2, MULT_EXPR, &c, &dconst2);
1262 n = real_to_integer (&c2);
1263 real_from_integer (&cint, VOIDmode, n, SIGNED);
1264 c2_is_int = real_identical (&c2, &cint);
1265
1266 if (flag_unsafe_math_optimizations
1267 && sqrtfn
1268 && c2_is_int
1269 && !c_is_int
1270 && optimize_function_for_speed_p (cfun))
1271 {
1272 tree powi_x_ndiv2 = NULL_TREE;
1273
1274 /* Attempt to fold powi(arg0, abs(n/2)) into multiplies. If not
1275 possible or profitable, give up. Skip the degenerate case when
1276 n is 1 or -1, where the result is always 1. */
1277 if (absu_hwi (n) != 1)
1278 {
1279 powi_x_ndiv2 = gimple_expand_builtin_powi (gsi, loc, arg0,
1280 abs_hwi (n / 2));
1281 if (!powi_x_ndiv2)
1282 return NULL_TREE;
1283 }
1284
1285 /* Calculate sqrt(x). When n is not 1 or -1, multiply it by the
1286 result of the optimal multiply sequence just calculated. */
1287 sqrt_arg0 = build_and_insert_call (gsi, loc, sqrtfn, arg0);
1288
1289 if (absu_hwi (n) == 1)
1290 result = sqrt_arg0;
1291 else
1292 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1293 sqrt_arg0, powi_x_ndiv2);
1294
1295 /* If n is negative, reciprocate the result. */
1296 if (n < 0)
1297 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1298 build_real (type, dconst1), result);
1299 return result;
1300 }
1301
1302 /* Optimize pow(x,c), where 3c = n for some nonzero integer n, into
1303
1304 powi(x, n/3) * powi(cbrt(x), n%3), n > 0;
1305 1.0 / (powi(x, abs(n)/3) * powi(cbrt(x), abs(n)%3)), n < 0.
1306
1307 Do not calculate the first factor when n/3 = 0. As cbrt(x) is
1308 different from pow(x, 1./3.) due to rounding and behavior with
1309 negative x, we need to constrain this transformation to unsafe
1310 math and positive x or finite math. */
1311 real_from_integer (&dconst3, VOIDmode, 3, SIGNED);
1312 real_arithmetic (&c2, MULT_EXPR, &c, &dconst3);
1313 real_round (&c2, mode, &c2);
1314 n = real_to_integer (&c2);
1315 real_from_integer (&cint, VOIDmode, n, SIGNED);
1316 real_arithmetic (&c2, RDIV_EXPR, &cint, &dconst3);
1317 real_convert (&c2, mode, &c2);
1318
1319 if (flag_unsafe_math_optimizations
1320 && cbrtfn
1321 && (gimple_val_nonnegative_real_p (arg0) || !HONOR_NANS (mode))
1322 && real_identical (&c2, &c)
1323 && !c2_is_int
1324 && optimize_function_for_speed_p (cfun)
1325 && powi_cost (n / 3) <= POWI_MAX_MULTS)
1326 {
1327 tree powi_x_ndiv3 = NULL_TREE;
1328
1329 /* Attempt to fold powi(arg0, abs(n/3)) into multiplies. If not
1330 possible or profitable, give up. Skip the degenerate case when
1331 abs(n) < 3, where the result is always 1. */
1332 if (absu_hwi (n) >= 3)
1333 {
1334 powi_x_ndiv3 = gimple_expand_builtin_powi (gsi, loc, arg0,
1335 abs_hwi (n / 3));
1336 if (!powi_x_ndiv3)
1337 return NULL_TREE;
1338 }
1339
1340 /* Calculate powi(cbrt(x), n%3). Don't use gimple_expand_builtin_powi
1341 as that creates an unnecessary variable. Instead, just produce
1342 either cbrt(x) or cbrt(x) * cbrt(x). */
1343 cbrt_x = build_and_insert_call (gsi, loc, cbrtfn, arg0);
1344
1345 if (absu_hwi (n) % 3 == 1)
1346 powi_cbrt_x = cbrt_x;
1347 else
1348 powi_cbrt_x = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1349 cbrt_x, cbrt_x);
1350
1351 /* Multiply the two subexpressions, unless powi(x,abs(n)/3) = 1. */
1352 if (absu_hwi (n) < 3)
1353 result = powi_cbrt_x;
1354 else
1355 result = build_and_insert_binop (gsi, loc, "powroot", MULT_EXPR,
1356 powi_x_ndiv3, powi_cbrt_x);
1357
1358 /* If n is negative, reciprocate the result. */
1359 if (n < 0)
1360 result = build_and_insert_binop (gsi, loc, "powroot", RDIV_EXPR,
1361 build_real (type, dconst1), result);
1362
1363 return result;
1364 }
1365
1366 /* No optimizations succeeded. */
1367 return NULL_TREE;
1368 }
1369
1370 /* ARG is the argument to a cabs builtin call in GSI with location info
1371 LOC. Create a sequence of statements prior to GSI that calculates
1372 sqrt(R*R + I*I), where R and I are the real and imaginary components
1373 of ARG, respectively. Return an expression holding the result. */
1374
1375 static tree
1376 gimple_expand_builtin_cabs (gimple_stmt_iterator *gsi, location_t loc, tree arg)
1377 {
1378 tree real_part, imag_part, addend1, addend2, sum, result;
1379 tree type = TREE_TYPE (TREE_TYPE (arg));
1380 tree sqrtfn = mathfn_built_in (type, BUILT_IN_SQRT);
1381 enum machine_mode mode = TYPE_MODE (type);
1382
1383 if (!flag_unsafe_math_optimizations
1384 || !optimize_bb_for_speed_p (gimple_bb (gsi_stmt (*gsi)))
1385 || !sqrtfn
1386 || optab_handler (sqrt_optab, mode) == CODE_FOR_nothing)
1387 return NULL_TREE;
1388
1389 real_part = build_and_insert_ref (gsi, loc, type, "cabs",
1390 REALPART_EXPR, arg);
1391 addend1 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1392 real_part, real_part);
1393 imag_part = build_and_insert_ref (gsi, loc, type, "cabs",
1394 IMAGPART_EXPR, arg);
1395 addend2 = build_and_insert_binop (gsi, loc, "cabs", MULT_EXPR,
1396 imag_part, imag_part);
1397 sum = build_and_insert_binop (gsi, loc, "cabs", PLUS_EXPR, addend1, addend2);
1398 result = build_and_insert_call (gsi, loc, sqrtfn, sum);
1399
1400 return result;
1401 }
1402
1403 /* Go through all calls to sin, cos and cexpi and call execute_cse_sincos_1
1404 on the SSA_NAME argument of each of them. Also expand powi(x,n) into
1405 an optimal number of multiplies, when n is a constant. */
1406
1407 namespace {
1408
1409 const pass_data pass_data_cse_sincos =
1410 {
1411 GIMPLE_PASS, /* type */
1412 "sincos", /* name */
1413 OPTGROUP_NONE, /* optinfo_flags */
1414 true, /* has_execute */
1415 TV_NONE, /* tv_id */
1416 PROP_ssa, /* properties_required */
1417 0, /* properties_provided */
1418 0, /* properties_destroyed */
1419 0, /* todo_flags_start */
1420 TODO_update_ssa, /* todo_flags_finish */
1421 };
1422
1423 class pass_cse_sincos : public gimple_opt_pass
1424 {
1425 public:
1426 pass_cse_sincos (gcc::context *ctxt)
1427 : gimple_opt_pass (pass_data_cse_sincos, ctxt)
1428 {}
1429
1430 /* opt_pass methods: */
1431 virtual bool gate (function *)
1432 {
1433 /* We no longer require either sincos or cexp, since powi expansion
1434 piggybacks on this pass. */
1435 return optimize;
1436 }
1437
1438 virtual unsigned int execute (function *);
1439
1440 }; // class pass_cse_sincos
1441
1442 unsigned int
1443 pass_cse_sincos::execute (function *fun)
1444 {
1445 basic_block bb;
1446 bool cfg_changed = false;
1447
1448 calculate_dominance_info (CDI_DOMINATORS);
1449 memset (&sincos_stats, 0, sizeof (sincos_stats));
1450
1451 FOR_EACH_BB_FN (bb, fun)
1452 {
1453 gimple_stmt_iterator gsi;
1454 bool cleanup_eh = false;
1455
1456 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1457 {
1458 gimple stmt = gsi_stmt (gsi);
1459 tree fndecl;
1460
1461 /* Only the last stmt in a bb could throw, no need to call
1462 gimple_purge_dead_eh_edges if we change something in the middle
1463 of a basic block. */
1464 cleanup_eh = false;
1465
1466 if (is_gimple_call (stmt)
1467 && gimple_call_lhs (stmt)
1468 && (fndecl = gimple_call_fndecl (stmt))
1469 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
1470 {
1471 tree arg, arg0, arg1, result;
1472 HOST_WIDE_INT n;
1473 location_t loc;
1474
1475 switch (DECL_FUNCTION_CODE (fndecl))
1476 {
1477 CASE_FLT_FN (BUILT_IN_COS):
1478 CASE_FLT_FN (BUILT_IN_SIN):
1479 CASE_FLT_FN (BUILT_IN_CEXPI):
1480 /* Make sure we have either sincos or cexp. */
1481 if (!targetm.libc_has_function (function_c99_math_complex)
1482 && !targetm.libc_has_function (function_sincos))
1483 break;
1484
1485 arg = gimple_call_arg (stmt, 0);
1486 if (TREE_CODE (arg) == SSA_NAME)
1487 cfg_changed |= execute_cse_sincos_1 (arg);
1488 break;
1489
1490 CASE_FLT_FN (BUILT_IN_POW):
1491 arg0 = gimple_call_arg (stmt, 0);
1492 arg1 = gimple_call_arg (stmt, 1);
1493
1494 loc = gimple_location (stmt);
1495 result = gimple_expand_builtin_pow (&gsi, loc, arg0, arg1);
1496
1497 if (result)
1498 {
1499 tree lhs = gimple_get_lhs (stmt);
1500 gimple new_stmt = gimple_build_assign (lhs, result);
1501 gimple_set_location (new_stmt, loc);
1502 unlink_stmt_vdef (stmt);
1503 gsi_replace (&gsi, new_stmt, true);
1504 cleanup_eh = true;
1505 if (gimple_vdef (stmt))
1506 release_ssa_name (gimple_vdef (stmt));
1507 }
1508 break;
1509
1510 CASE_FLT_FN (BUILT_IN_POWI):
1511 arg0 = gimple_call_arg (stmt, 0);
1512 arg1 = gimple_call_arg (stmt, 1);
1513 loc = gimple_location (stmt);
1514
1515 if (real_minus_onep (arg0))
1516 {
1517 tree t0, t1, cond, one, minus_one;
1518 gimple stmt;
1519
1520 t0 = TREE_TYPE (arg0);
1521 t1 = TREE_TYPE (arg1);
1522 one = build_real (t0, dconst1);
1523 minus_one = build_real (t0, dconstm1);
1524
1525 cond = make_temp_ssa_name (t1, NULL, "powi_cond");
1526 stmt = gimple_build_assign_with_ops (BIT_AND_EXPR, cond,
1527 arg1,
1528 build_int_cst (t1,
1529 1));
1530 gimple_set_location (stmt, loc);
1531 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1532
1533 result = make_temp_ssa_name (t0, NULL, "powi");
1534 stmt = gimple_build_assign_with_ops (COND_EXPR, result,
1535 cond,
1536 minus_one, one);
1537 gimple_set_location (stmt, loc);
1538 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
1539 }
1540 else
1541 {
1542 if (!tree_fits_shwi_p (arg1))
1543 break;
1544
1545 n = tree_to_shwi (arg1);
1546 result = gimple_expand_builtin_powi (&gsi, loc, arg0, n);
1547 }
1548
1549 if (result)
1550 {
1551 tree lhs = gimple_get_lhs (stmt);
1552 gimple new_stmt = gimple_build_assign (lhs, result);
1553 gimple_set_location (new_stmt, loc);
1554 unlink_stmt_vdef (stmt);
1555 gsi_replace (&gsi, new_stmt, true);
1556 cleanup_eh = true;
1557 if (gimple_vdef (stmt))
1558 release_ssa_name (gimple_vdef (stmt));
1559 }
1560 break;
1561
1562 CASE_FLT_FN (BUILT_IN_CABS):
1563 arg0 = gimple_call_arg (stmt, 0);
1564 loc = gimple_location (stmt);
1565 result = gimple_expand_builtin_cabs (&gsi, loc, arg0);
1566
1567 if (result)
1568 {
1569 tree lhs = gimple_get_lhs (stmt);
1570 gimple new_stmt = gimple_build_assign (lhs, result);
1571 gimple_set_location (new_stmt, loc);
1572 unlink_stmt_vdef (stmt);
1573 gsi_replace (&gsi, new_stmt, true);
1574 cleanup_eh = true;
1575 if (gimple_vdef (stmt))
1576 release_ssa_name (gimple_vdef (stmt));
1577 }
1578 break;
1579
1580 default:;
1581 }
1582 }
1583 }
1584 if (cleanup_eh)
1585 cfg_changed |= gimple_purge_dead_eh_edges (bb);
1586 }
1587
1588 statistics_counter_event (fun, "sincos statements inserted",
1589 sincos_stats.inserted);
1590
1591 free_dominance_info (CDI_DOMINATORS);
1592 return cfg_changed ? TODO_cleanup_cfg : 0;
1593 }
1594
1595 } // anon namespace
1596
1597 gimple_opt_pass *
1598 make_pass_cse_sincos (gcc::context *ctxt)
1599 {
1600 return new pass_cse_sincos (ctxt);
1601 }
1602
1603 /* A symbolic number is used to detect byte permutation and selection
1604 patterns. Therefore the field N contains an artificial number
1605 consisting of byte size markers:
1606
1607 0 - byte has the value 0
1608 1..size - byte contains the content of the byte
1609 number indexed with that value minus one.
1610
1611 To detect permutations on memory sources (arrays and structures), a symbolic
1612 number is also associated a base address (the array or structure the load is
1613 made from), an offset from the base address and a range which gives the
1614 difference between the highest and lowest accessed memory location to make
1615 such a symbolic number. The range is thus different from size which reflects
1616 the size of the type of current expression. Note that for non memory source,
1617 range holds the same value as size.
1618
1619 For instance, for an array char a[], (short) a[0] | (short) a[3] would have
1620 a size of 2 but a range of 4 while (short) a[0] | ((short) a[0] << 1) would
1621 still have a size of 2 but this time a range of 1. */
1622
1623 struct symbolic_number {
1624 uint64_t n;
1625 int size;
1626 tree base_addr;
1627 tree offset;
1628 HOST_WIDE_INT bytepos;
1629 tree alias_set;
1630 tree vuse;
1631 unsigned HOST_WIDE_INT range;
1632 };
1633
1634 /* The number which the find_bswap_or_nop_1 result should match in
1635 order to have a nop. The number is masked according to the size of
1636 the symbolic number before using it. */
1637 #define CMPNOP (sizeof (int64_t) < 8 ? 0 : \
1638 (uint64_t)0x08070605 << 32 | 0x04030201)
1639
1640 /* The number which the find_bswap_or_nop_1 result should match in
1641 order to have a byte swap. The number is masked according to the
1642 size of the symbolic number before using it. */
1643 #define CMPXCHG (sizeof (int64_t) < 8 ? 0 : \
1644 (uint64_t)0x01020304 << 32 | 0x05060708)
1645
1646 /* Perform a SHIFT or ROTATE operation by COUNT bits on symbolic
1647 number N. Return false if the requested operation is not permitted
1648 on a symbolic number. */
1649
1650 static inline bool
1651 do_shift_rotate (enum tree_code code,
1652 struct symbolic_number *n,
1653 int count)
1654 {
1655 if (count % 8 != 0)
1656 return false;
1657
1658 /* Zero out the extra bits of N in order to avoid them being shifted
1659 into the significant bits. */
1660 if (n->size < (int)sizeof (int64_t))
1661 n->n &= ((uint64_t)1 << (n->size * BITS_PER_UNIT)) - 1;
1662
1663 switch (code)
1664 {
1665 case LSHIFT_EXPR:
1666 n->n <<= count;
1667 break;
1668 case RSHIFT_EXPR:
1669 n->n >>= count;
1670 break;
1671 case LROTATE_EXPR:
1672 n->n = (n->n << count) | (n->n >> ((n->size * BITS_PER_UNIT) - count));
1673 break;
1674 case RROTATE_EXPR:
1675 n->n = (n->n >> count) | (n->n << ((n->size * BITS_PER_UNIT) - count));
1676 break;
1677 default:
1678 return false;
1679 }
1680 /* Zero unused bits for size. */
1681 if (n->size < (int)sizeof (int64_t))
1682 n->n &= ((uint64_t)1 << (n->size * BITS_PER_UNIT)) - 1;
1683 return true;
1684 }
1685
1686 /* Perform sanity checking for the symbolic number N and the gimple
1687 statement STMT. */
1688
1689 static inline bool
1690 verify_symbolic_number_p (struct symbolic_number *n, gimple stmt)
1691 {
1692 tree lhs_type;
1693
1694 lhs_type = gimple_expr_type (stmt);
1695
1696 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
1697 return false;
1698
1699 if (TYPE_PRECISION (lhs_type) != n->size * BITS_PER_UNIT)
1700 return false;
1701
1702 return true;
1703 }
1704
1705 /* Initialize the symbolic number N for the bswap pass from the base element
1706 SRC manipulated by the bitwise OR expression. */
1707
1708 static bool
1709 init_symbolic_number (struct symbolic_number *n, tree src)
1710 {
1711 n->base_addr = n->offset = n->alias_set = n->vuse = NULL_TREE;
1712
1713 /* Set up the symbolic number N by setting each byte to a value between 1 and
1714 the byte size of rhs1. The highest order byte is set to n->size and the
1715 lowest order byte to 1. */
1716 n->size = TYPE_PRECISION (TREE_TYPE (src));
1717 if (n->size % BITS_PER_UNIT != 0)
1718 return false;
1719 n->size /= BITS_PER_UNIT;
1720 n->range = n->size;
1721 n->n = CMPNOP;
1722
1723 if (n->size < (int)sizeof (int64_t))
1724 n->n &= ((uint64_t)1 << (n->size * BITS_PER_UNIT)) - 1;
1725
1726 return true;
1727 }
1728
1729 /* Check if STMT might be a byte swap or a nop from a memory source and returns
1730 the answer. If so, REF is that memory source and the base of the memory area
1731 accessed and the offset of the access from that base are recorded in N. */
1732
1733 bool
1734 find_bswap_or_nop_load (gimple stmt, tree ref, struct symbolic_number *n)
1735 {
1736 /* Leaf node is an array or component ref. Memorize its base and
1737 offset from base to compare to other such leaf node. */
1738 HOST_WIDE_INT bitsize, bitpos;
1739 enum machine_mode mode;
1740 int unsignedp, volatilep;
1741 tree offset, base_addr;
1742
1743 if (!gimple_assign_load_p (stmt) || gimple_has_volatile_ops (stmt))
1744 return false;
1745
1746 base_addr = get_inner_reference (ref, &bitsize, &bitpos, &offset, &mode,
1747 &unsignedp, &volatilep, false);
1748
1749 if (TREE_CODE (base_addr) == MEM_REF)
1750 {
1751 offset_int bit_offset = 0;
1752 tree off = TREE_OPERAND (base_addr, 1);
1753
1754 if (!integer_zerop (off))
1755 {
1756 offset_int boff, coff = mem_ref_offset (base_addr);
1757 boff = wi::lshift (coff, LOG2_BITS_PER_UNIT);
1758 bit_offset += boff;
1759 }
1760
1761 base_addr = TREE_OPERAND (base_addr, 0);
1762
1763 /* Avoid returning a negative bitpos as this may wreak havoc later. */
1764 if (wi::neg_p (bit_offset))
1765 {
1766 offset_int mask = wi::mask <offset_int> (LOG2_BITS_PER_UNIT, false);
1767 offset_int tem = bit_offset.and_not (mask);
1768 /* TEM is the bitpos rounded to BITS_PER_UNIT towards -Inf.
1769 Subtract it to BIT_OFFSET and add it (scaled) to OFFSET. */
1770 bit_offset -= tem;
1771 tem = wi::arshift (tem, LOG2_BITS_PER_UNIT);
1772 if (offset)
1773 offset = size_binop (PLUS_EXPR, offset,
1774 wide_int_to_tree (sizetype, tem));
1775 else
1776 offset = wide_int_to_tree (sizetype, tem);
1777 }
1778
1779 bitpos += bit_offset.to_shwi ();
1780 }
1781
1782 if (bitpos % BITS_PER_UNIT)
1783 return false;
1784 if (bitsize % BITS_PER_UNIT)
1785 return false;
1786
1787 if (!init_symbolic_number (n, ref))
1788 return false;
1789 n->base_addr = base_addr;
1790 n->offset = offset;
1791 n->bytepos = bitpos / BITS_PER_UNIT;
1792 n->alias_set = reference_alias_ptr_type (ref);
1793 n->vuse = gimple_vuse (stmt);
1794 return true;
1795 }
1796
1797 /* find_bswap_or_nop_1 invokes itself recursively with N and tries to perform
1798 the operation given by the rhs of STMT on the result. If the operation
1799 could successfully be executed the function returns the tree expression of
1800 the source operand and NULL otherwise. */
1801
1802 static tree
1803 find_bswap_or_nop_1 (gimple stmt, struct symbolic_number *n, int limit)
1804 {
1805 enum tree_code code;
1806 tree rhs1, rhs2 = NULL;
1807 gimple rhs1_stmt, rhs2_stmt;
1808 tree source_expr1;
1809 enum gimple_rhs_class rhs_class;
1810
1811 if (!limit || !is_gimple_assign (stmt))
1812 return NULL_TREE;
1813
1814 rhs1 = gimple_assign_rhs1 (stmt);
1815
1816 if (find_bswap_or_nop_load (stmt, rhs1, n))
1817 return rhs1;
1818
1819 if (TREE_CODE (rhs1) != SSA_NAME)
1820 return NULL_TREE;
1821
1822 code = gimple_assign_rhs_code (stmt);
1823 rhs_class = gimple_assign_rhs_class (stmt);
1824 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
1825
1826 if (rhs_class == GIMPLE_BINARY_RHS)
1827 rhs2 = gimple_assign_rhs2 (stmt);
1828
1829 /* Handle unary rhs and binary rhs with integer constants as second
1830 operand. */
1831
1832 if (rhs_class == GIMPLE_UNARY_RHS
1833 || (rhs_class == GIMPLE_BINARY_RHS
1834 && TREE_CODE (rhs2) == INTEGER_CST))
1835 {
1836 if (code != BIT_AND_EXPR
1837 && code != LSHIFT_EXPR
1838 && code != RSHIFT_EXPR
1839 && code != LROTATE_EXPR
1840 && code != RROTATE_EXPR
1841 && code != NOP_EXPR
1842 && code != CONVERT_EXPR)
1843 return NULL_TREE;
1844
1845 source_expr1 = find_bswap_or_nop_1 (rhs1_stmt, n, limit - 1);
1846
1847 /* If find_bswap_or_nop_1 returned NULL, STMT is a leaf node and
1848 we have to initialize the symbolic number. */
1849 if (!source_expr1)
1850 {
1851 if (gimple_assign_load_p (stmt)
1852 || !init_symbolic_number (n, rhs1))
1853 return NULL_TREE;
1854 source_expr1 = rhs1;
1855 }
1856
1857 switch (code)
1858 {
1859 case BIT_AND_EXPR:
1860 {
1861 int i;
1862 uint64_t val = int_cst_value (rhs2);
1863 uint64_t tmp = val;
1864
1865 /* Only constants masking full bytes are allowed. */
1866 for (i = 0; i < n->size; i++, tmp >>= BITS_PER_UNIT)
1867 if ((tmp & 0xff) != 0 && (tmp & 0xff) != 0xff)
1868 return NULL_TREE;
1869
1870 n->n &= val;
1871 }
1872 break;
1873 case LSHIFT_EXPR:
1874 case RSHIFT_EXPR:
1875 case LROTATE_EXPR:
1876 case RROTATE_EXPR:
1877 if (!do_shift_rotate (code, n, (int)TREE_INT_CST_LOW (rhs2)))
1878 return NULL_TREE;
1879 break;
1880 CASE_CONVERT:
1881 {
1882 int type_size;
1883
1884 type_size = TYPE_PRECISION (gimple_expr_type (stmt));
1885 if (type_size % BITS_PER_UNIT != 0)
1886 return NULL_TREE;
1887
1888 if (type_size / BITS_PER_UNIT < (int)(sizeof (int64_t)))
1889 {
1890 /* If STMT casts to a smaller type mask out the bits not
1891 belonging to the target type. */
1892 n->n &= ((uint64_t)1 << type_size) - 1;
1893 }
1894 n->size = type_size / BITS_PER_UNIT;
1895 if (!n->base_addr)
1896 n->range = n->size;
1897 }
1898 break;
1899 default:
1900 return NULL_TREE;
1901 };
1902 return verify_symbolic_number_p (n, stmt) ? source_expr1 : NULL;
1903 }
1904
1905 /* Handle binary rhs. */
1906
1907 if (rhs_class == GIMPLE_BINARY_RHS)
1908 {
1909 int i;
1910 struct symbolic_number n1, n2;
1911 uint64_t mask;
1912 tree source_expr2;
1913
1914 if (code != BIT_IOR_EXPR)
1915 return NULL_TREE;
1916
1917 if (TREE_CODE (rhs2) != SSA_NAME)
1918 return NULL_TREE;
1919
1920 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
1921
1922 switch (code)
1923 {
1924 case BIT_IOR_EXPR:
1925 source_expr1 = find_bswap_or_nop_1 (rhs1_stmt, &n1, limit - 1);
1926
1927 if (!source_expr1)
1928 return NULL_TREE;
1929
1930 source_expr2 = find_bswap_or_nop_1 (rhs2_stmt, &n2, limit - 1);
1931
1932 if (!source_expr2)
1933 return NULL_TREE;
1934
1935 if (n1.size != n2.size)
1936 return NULL_TREE;
1937
1938 if (!n1.vuse != !n2.vuse ||
1939 (n1.vuse && !operand_equal_p (n1.vuse, n2.vuse, 0)))
1940 return NULL_TREE;
1941
1942 if (source_expr1 != source_expr2)
1943 {
1944 int64_t inc, mask;
1945 unsigned i;
1946 HOST_WIDE_INT off_sub;
1947 struct symbolic_number *n_ptr;
1948
1949 if (!n1.base_addr || !n2.base_addr
1950 || !operand_equal_p (n1.base_addr, n2.base_addr, 0))
1951 return NULL_TREE;
1952 if (!n1.offset != !n2.offset ||
1953 (n1.offset && !operand_equal_p (n1.offset, n2.offset, 0)))
1954 return NULL_TREE;
1955
1956 /* We swap n1 with n2 to have n1 < n2. */
1957 if (n2.bytepos < n1.bytepos)
1958 {
1959 struct symbolic_number tmpn;
1960
1961 tmpn = n2;
1962 n2 = n1;
1963 n1 = tmpn;
1964 source_expr1 = source_expr2;
1965 }
1966
1967 off_sub = n2.bytepos - n1.bytepos;
1968
1969 /* Check that the range of memory covered < biggest int size. */
1970 if (off_sub + n2.range > (int) sizeof (int64_t))
1971 return NULL_TREE;
1972 n->range = n2.range + off_sub;
1973
1974 /* Reinterpret byte marks in symbolic number holding the value of
1975 bigger weight according to target endianness. */
1976 inc = BYTES_BIG_ENDIAN ? off_sub + n2.range - n1.range : off_sub;
1977 mask = 0xFF;
1978 if (BYTES_BIG_ENDIAN)
1979 n_ptr = &n1;
1980 else
1981 n_ptr = &n2;
1982 for (i = 0; i < sizeof (int64_t); i++, inc <<= 8,
1983 mask <<= 8)
1984 {
1985 if (n_ptr->n & mask)
1986 n_ptr->n += inc;
1987 }
1988 }
1989 else
1990 n->range = n1.range;
1991
1992 if (!n1.alias_set
1993 || alias_ptr_types_compatible_p (n1.alias_set, n2.alias_set))
1994 n->alias_set = n1.alias_set;
1995 else
1996 n->alias_set = ptr_type_node;
1997 n->vuse = n1.vuse;
1998 n->base_addr = n1.base_addr;
1999 n->offset = n1.offset;
2000 n->bytepos = n1.bytepos;
2001 n->size = n1.size;
2002 for (i = 0, mask = 0xff; i < n->size; i++, mask <<= BITS_PER_UNIT)
2003 {
2004 uint64_t masked1, masked2;
2005
2006 masked1 = n1.n & mask;
2007 masked2 = n2.n & mask;
2008 if (masked1 && masked2 && masked1 != masked2)
2009 return NULL_TREE;
2010 }
2011 n->n = n1.n | n2.n;
2012
2013 if (!verify_symbolic_number_p (n, stmt))
2014 return NULL_TREE;
2015
2016 break;
2017 default:
2018 return NULL_TREE;
2019 }
2020 return source_expr1;
2021 }
2022 return NULL_TREE;
2023 }
2024
2025 /* Check if STMT completes a bswap implementation or a read in a given
2026 endianness consisting of ORs, SHIFTs and ANDs and sets *BSWAP
2027 accordingly. It also sets N to represent the kind of operations
2028 performed: size of the resulting expression and whether it works on
2029 a memory source, and if so alias-set and vuse. At last, the
2030 function returns the source tree expression. */
2031
2032 static tree
2033 find_bswap_or_nop (gimple stmt, struct symbolic_number *n, bool *bswap)
2034 {
2035 /* The number which the find_bswap_or_nop_1 result should match in order
2036 to have a full byte swap. The number is shifted to the right
2037 according to the size of the symbolic number before using it. */
2038 uint64_t cmpxchg = CMPXCHG;
2039 uint64_t cmpnop = CMPNOP;
2040
2041 tree source_expr;
2042 int limit;
2043
2044 /* The last parameter determines the depth search limit. It usually
2045 correlates directly to the number n of bytes to be touched. We
2046 increase that number by log2(n) + 1 here in order to also
2047 cover signed -> unsigned conversions of the src operand as can be seen
2048 in libgcc, and for initial shift/and operation of the src operand. */
2049 limit = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (gimple_expr_type (stmt)));
2050 limit += 1 + (int) ceil_log2 ((unsigned HOST_WIDE_INT) limit);
2051 source_expr = find_bswap_or_nop_1 (stmt, n, limit);
2052
2053 if (!source_expr)
2054 return NULL_TREE;
2055
2056 /* Find real size of result (highest non zero byte). */
2057 if (n->base_addr)
2058 {
2059 int rsize;
2060 uint64_t tmpn;
2061
2062 for (tmpn = n->n, rsize = 0; tmpn; tmpn >>= BITS_PER_UNIT, rsize++);
2063 n->range = rsize;
2064 }
2065
2066 /* Zero out the extra bits of N and CMP*. */
2067 if (n->range < (int)sizeof (int64_t))
2068 {
2069 uint64_t mask;
2070
2071 mask = ((uint64_t)1 << (n->range * BITS_PER_UNIT)) - 1;
2072 cmpxchg >>= (sizeof (int64_t) - n->range) * BITS_PER_UNIT;
2073 cmpnop &= mask;
2074 }
2075
2076 /* A complete byte swap should make the symbolic number to start with
2077 the largest digit in the highest order byte. Unchanged symbolic
2078 number indicates a read with same endianness as target architecture. */
2079 if (n->n == cmpnop)
2080 *bswap = false;
2081 else if (n->n == cmpxchg)
2082 *bswap = true;
2083 else
2084 return NULL_TREE;
2085
2086 /* Useless bit manipulation performed by code. */
2087 if (!n->base_addr && n->n == cmpnop)
2088 return NULL_TREE;
2089
2090 n->range *= BITS_PER_UNIT;
2091 return source_expr;
2092 }
2093
2094 namespace {
2095
2096 const pass_data pass_data_optimize_bswap =
2097 {
2098 GIMPLE_PASS, /* type */
2099 "bswap", /* name */
2100 OPTGROUP_NONE, /* optinfo_flags */
2101 true, /* has_execute */
2102 TV_NONE, /* tv_id */
2103 PROP_ssa, /* properties_required */
2104 0, /* properties_provided */
2105 0, /* properties_destroyed */
2106 0, /* todo_flags_start */
2107 0, /* todo_flags_finish */
2108 };
2109
2110 class pass_optimize_bswap : public gimple_opt_pass
2111 {
2112 public:
2113 pass_optimize_bswap (gcc::context *ctxt)
2114 : gimple_opt_pass (pass_data_optimize_bswap, ctxt)
2115 {}
2116
2117 /* opt_pass methods: */
2118 virtual bool gate (function *)
2119 {
2120 return flag_expensive_optimizations && optimize;
2121 }
2122
2123 virtual unsigned int execute (function *);
2124
2125 }; // class pass_optimize_bswap
2126
2127 /* Perform the bswap optimization: replace the statement STMT at GSI
2128 with load type, VUSE and set-alias as described by N if a memory
2129 source is involved (N->base_addr is non null), followed by the
2130 builtin bswap invocation in FNDECL if BSWAP is true. SRC gives
2131 the source on which STMT is operating and N->range gives the
2132 size of the expression involved for maintaining some statistics. */
2133
2134 static bool
2135 bswap_replace (gimple stmt, gimple_stmt_iterator *gsi, tree src, tree fndecl,
2136 tree bswap_type, tree load_type, struct symbolic_number *n,
2137 bool bswap)
2138 {
2139 tree tmp, tgt;
2140 gimple call;
2141
2142 tgt = gimple_assign_lhs (stmt);
2143
2144 /* Need to load the value from memory first. */
2145 if (n->base_addr)
2146 {
2147 tree addr_expr, addr_tmp, val_expr, val_tmp;
2148 tree load_offset_ptr, aligned_load_type;
2149 gimple addr_stmt, load_stmt;
2150 unsigned align;
2151
2152 align = get_object_alignment (src);
2153 if (bswap && SLOW_UNALIGNED_ACCESS (TYPE_MODE (load_type), align))
2154 return false;
2155
2156 /* Compute address to load from and cast according to the size
2157 of the load. */
2158 addr_expr = build_fold_addr_expr (unshare_expr (src));
2159 if (is_gimple_min_invariant (addr_expr))
2160 addr_tmp = addr_expr;
2161 else
2162 {
2163 addr_tmp = make_temp_ssa_name (TREE_TYPE (addr_expr), NULL,
2164 "load_src");
2165 addr_stmt = gimple_build_assign (addr_tmp, addr_expr);
2166 gsi_insert_before (gsi, addr_stmt, GSI_SAME_STMT);
2167 }
2168
2169 /* Perform the load. */
2170 aligned_load_type = load_type;
2171 if (align < TYPE_ALIGN (load_type))
2172 aligned_load_type = build_aligned_type (load_type, align);
2173 load_offset_ptr = build_int_cst (n->alias_set, 0);
2174 val_expr = fold_build2 (MEM_REF, aligned_load_type, addr_tmp,
2175 load_offset_ptr);
2176
2177 if (!bswap)
2178 {
2179 if (n->range == 16)
2180 nop_stats.found_16bit++;
2181 else if (n->range == 32)
2182 nop_stats.found_32bit++;
2183 else
2184 {
2185 gcc_assert (n->range == 64);
2186 nop_stats.found_64bit++;
2187 }
2188
2189 /* Convert the result of load if necessary. */
2190 if (!useless_type_conversion_p (TREE_TYPE (tgt), load_type))
2191 {
2192 val_tmp = make_temp_ssa_name (aligned_load_type, NULL,
2193 "load_dst");
2194 load_stmt = gimple_build_assign (val_tmp, val_expr);
2195 gimple_set_vuse (load_stmt, n->vuse);
2196 gsi_insert_before (gsi, load_stmt, GSI_SAME_STMT);
2197 gimple_assign_set_rhs_with_ops_1 (gsi, NOP_EXPR, val_tmp,
2198 NULL_TREE, NULL_TREE);
2199 }
2200 else
2201 gimple_assign_set_rhs_with_ops_1 (gsi, MEM_REF, val_expr,
2202 NULL_TREE, NULL_TREE);
2203 update_stmt (gsi_stmt (*gsi));
2204
2205 if (dump_file)
2206 {
2207 fprintf (dump_file,
2208 "%d bit load in target endianness found at: ",
2209 (int)n->range);
2210 print_gimple_stmt (dump_file, stmt, 0, 0);
2211 }
2212 return true;
2213 }
2214 else
2215 {
2216 val_tmp = make_temp_ssa_name (aligned_load_type, NULL, "load_dst");
2217 load_stmt = gimple_build_assign (val_tmp, val_expr);
2218 gimple_set_vuse (load_stmt, n->vuse);
2219 gsi_insert_before (gsi, load_stmt, GSI_SAME_STMT);
2220 }
2221 src = val_tmp;
2222 }
2223
2224 if (n->range == 16)
2225 bswap_stats.found_16bit++;
2226 else if (n->range == 32)
2227 bswap_stats.found_32bit++;
2228 else
2229 {
2230 gcc_assert (n->range == 64);
2231 bswap_stats.found_64bit++;
2232 }
2233
2234 tmp = src;
2235
2236 /* Convert the src expression if necessary. */
2237 if (!useless_type_conversion_p (TREE_TYPE (tmp), bswap_type))
2238 {
2239 gimple convert_stmt;
2240 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapsrc");
2241 convert_stmt = gimple_build_assign_with_ops (NOP_EXPR, tmp, src, NULL);
2242 gsi_insert_before (gsi, convert_stmt, GSI_SAME_STMT);
2243 }
2244
2245 call = gimple_build_call (fndecl, 1, tmp);
2246
2247 tmp = tgt;
2248
2249 /* Convert the result if necessary. */
2250 if (!useless_type_conversion_p (TREE_TYPE (tgt), bswap_type))
2251 {
2252 gimple convert_stmt;
2253 tmp = make_temp_ssa_name (bswap_type, NULL, "bswapdst");
2254 convert_stmt = gimple_build_assign_with_ops (NOP_EXPR, tgt, tmp, NULL);
2255 gsi_insert_after (gsi, convert_stmt, GSI_SAME_STMT);
2256 }
2257
2258 gimple_call_set_lhs (call, tmp);
2259
2260 if (dump_file)
2261 {
2262 fprintf (dump_file, "%d bit bswap implementation found at: ",
2263 (int)n->range);
2264 print_gimple_stmt (dump_file, stmt, 0, 0);
2265 }
2266
2267 gsi_insert_after (gsi, call, GSI_SAME_STMT);
2268 gsi_remove (gsi, true);
2269 return true;
2270 }
2271
2272 /* Find manual byte swap implementations as well as load in a given
2273 endianness. Byte swaps are turned into a bswap builtin invokation
2274 while endian loads are converted to bswap builtin invokation or
2275 simple load according to the target endianness. */
2276
2277 unsigned int
2278 pass_optimize_bswap::execute (function *fun)
2279 {
2280 basic_block bb;
2281 bool bswap16_p, bswap32_p, bswap64_p;
2282 bool changed = false;
2283 tree bswap16_type = NULL_TREE, bswap32_type = NULL_TREE, bswap64_type = NULL_TREE;
2284
2285 if (BITS_PER_UNIT != 8)
2286 return 0;
2287
2288 bswap16_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP16)
2289 && optab_handler (bswap_optab, HImode) != CODE_FOR_nothing);
2290 bswap32_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP32)
2291 && optab_handler (bswap_optab, SImode) != CODE_FOR_nothing);
2292 bswap64_p = (builtin_decl_explicit_p (BUILT_IN_BSWAP64)
2293 && (optab_handler (bswap_optab, DImode) != CODE_FOR_nothing
2294 || (bswap32_p && word_mode == SImode)));
2295
2296 /* Determine the argument type of the builtins. The code later on
2297 assumes that the return and argument type are the same. */
2298 if (bswap16_p)
2299 {
2300 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP16);
2301 bswap16_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2302 }
2303
2304 if (bswap32_p)
2305 {
2306 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2307 bswap32_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2308 }
2309
2310 if (bswap64_p)
2311 {
2312 tree fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2313 bswap64_type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl)));
2314 }
2315
2316 memset (&nop_stats, 0, sizeof (nop_stats));
2317 memset (&bswap_stats, 0, sizeof (bswap_stats));
2318
2319 FOR_EACH_BB_FN (bb, fun)
2320 {
2321 gimple_stmt_iterator gsi;
2322
2323 /* We do a reverse scan for bswap patterns to make sure we get the
2324 widest match. As bswap pattern matching doesn't handle
2325 previously inserted smaller bswap replacements as sub-
2326 patterns, the wider variant wouldn't be detected. */
2327 for (gsi = gsi_last_bb (bb); !gsi_end_p (gsi); gsi_prev (&gsi))
2328 {
2329 gimple stmt = gsi_stmt (gsi);
2330 tree fndecl = NULL_TREE, bswap_type = NULL_TREE;
2331 tree src, load_type;
2332 struct symbolic_number n;
2333 bool bswap;
2334
2335 if (!is_gimple_assign (stmt)
2336 || gimple_assign_rhs_code (stmt) != BIT_IOR_EXPR)
2337 continue;
2338
2339 src = find_bswap_or_nop (stmt, &n, &bswap);
2340
2341 if (!src)
2342 continue;
2343
2344 switch (n.range)
2345 {
2346 case 16:
2347 load_type = uint16_type_node;
2348 if (bswap16_p)
2349 {
2350 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP16);
2351 bswap_type = bswap16_type;
2352 }
2353 break;
2354 case 32:
2355 load_type = uint32_type_node;
2356 if (bswap32_p)
2357 {
2358 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP32);
2359 bswap_type = bswap32_type;
2360 }
2361 break;
2362 case 64:
2363 load_type = uint64_type_node;
2364 if (bswap64_p)
2365 {
2366 fndecl = builtin_decl_explicit (BUILT_IN_BSWAP64);
2367 bswap_type = bswap64_type;
2368 }
2369 break;
2370 default:
2371 continue;
2372 }
2373
2374 if (bswap && !fndecl)
2375 continue;
2376
2377 if (bswap_replace (stmt, &gsi, src, fndecl, bswap_type, load_type,
2378 &n, bswap))
2379 changed = true;
2380 }
2381 }
2382
2383 statistics_counter_event (fun, "16-bit nop implementations found",
2384 nop_stats.found_16bit);
2385 statistics_counter_event (fun, "32-bit nop implementations found",
2386 nop_stats.found_32bit);
2387 statistics_counter_event (fun, "64-bit nop implementations found",
2388 nop_stats.found_64bit);
2389 statistics_counter_event (fun, "16-bit bswap implementations found",
2390 bswap_stats.found_16bit);
2391 statistics_counter_event (fun, "32-bit bswap implementations found",
2392 bswap_stats.found_32bit);
2393 statistics_counter_event (fun, "64-bit bswap implementations found",
2394 bswap_stats.found_64bit);
2395
2396 return (changed ? TODO_update_ssa : 0);
2397 }
2398
2399 } // anon namespace
2400
2401 gimple_opt_pass *
2402 make_pass_optimize_bswap (gcc::context *ctxt)
2403 {
2404 return new pass_optimize_bswap (ctxt);
2405 }
2406
2407 /* Return true if stmt is a type conversion operation that can be stripped
2408 when used in a widening multiply operation. */
2409 static bool
2410 widening_mult_conversion_strippable_p (tree result_type, gimple stmt)
2411 {
2412 enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
2413
2414 if (TREE_CODE (result_type) == INTEGER_TYPE)
2415 {
2416 tree op_type;
2417 tree inner_op_type;
2418
2419 if (!CONVERT_EXPR_CODE_P (rhs_code))
2420 return false;
2421
2422 op_type = TREE_TYPE (gimple_assign_lhs (stmt));
2423
2424 /* If the type of OP has the same precision as the result, then
2425 we can strip this conversion. The multiply operation will be
2426 selected to create the correct extension as a by-product. */
2427 if (TYPE_PRECISION (result_type) == TYPE_PRECISION (op_type))
2428 return true;
2429
2430 /* We can also strip a conversion if it preserves the signed-ness of
2431 the operation and doesn't narrow the range. */
2432 inner_op_type = TREE_TYPE (gimple_assign_rhs1 (stmt));
2433
2434 /* If the inner-most type is unsigned, then we can strip any
2435 intermediate widening operation. If it's signed, then the
2436 intermediate widening operation must also be signed. */
2437 if ((TYPE_UNSIGNED (inner_op_type)
2438 || TYPE_UNSIGNED (op_type) == TYPE_UNSIGNED (inner_op_type))
2439 && TYPE_PRECISION (op_type) > TYPE_PRECISION (inner_op_type))
2440 return true;
2441
2442 return false;
2443 }
2444
2445 return rhs_code == FIXED_CONVERT_EXPR;
2446 }
2447
2448 /* Return true if RHS is a suitable operand for a widening multiplication,
2449 assuming a target type of TYPE.
2450 There are two cases:
2451
2452 - RHS makes some value at least twice as wide. Store that value
2453 in *NEW_RHS_OUT if so, and store its type in *TYPE_OUT.
2454
2455 - RHS is an integer constant. Store that value in *NEW_RHS_OUT if so,
2456 but leave *TYPE_OUT untouched. */
2457
2458 static bool
2459 is_widening_mult_rhs_p (tree type, tree rhs, tree *type_out,
2460 tree *new_rhs_out)
2461 {
2462 gimple stmt;
2463 tree type1, rhs1;
2464
2465 if (TREE_CODE (rhs) == SSA_NAME)
2466 {
2467 stmt = SSA_NAME_DEF_STMT (rhs);
2468 if (is_gimple_assign (stmt))
2469 {
2470 if (! widening_mult_conversion_strippable_p (type, stmt))
2471 rhs1 = rhs;
2472 else
2473 {
2474 rhs1 = gimple_assign_rhs1 (stmt);
2475
2476 if (TREE_CODE (rhs1) == INTEGER_CST)
2477 {
2478 *new_rhs_out = rhs1;
2479 *type_out = NULL;
2480 return true;
2481 }
2482 }
2483 }
2484 else
2485 rhs1 = rhs;
2486
2487 type1 = TREE_TYPE (rhs1);
2488
2489 if (TREE_CODE (type1) != TREE_CODE (type)
2490 || TYPE_PRECISION (type1) * 2 > TYPE_PRECISION (type))
2491 return false;
2492
2493 *new_rhs_out = rhs1;
2494 *type_out = type1;
2495 return true;
2496 }
2497
2498 if (TREE_CODE (rhs) == INTEGER_CST)
2499 {
2500 *new_rhs_out = rhs;
2501 *type_out = NULL;
2502 return true;
2503 }
2504
2505 return false;
2506 }
2507
2508 /* Return true if STMT performs a widening multiplication, assuming the
2509 output type is TYPE. If so, store the unwidened types of the operands
2510 in *TYPE1_OUT and *TYPE2_OUT respectively. Also fill *RHS1_OUT and
2511 *RHS2_OUT such that converting those operands to types *TYPE1_OUT
2512 and *TYPE2_OUT would give the operands of the multiplication. */
2513
2514 static bool
2515 is_widening_mult_p (gimple stmt,
2516 tree *type1_out, tree *rhs1_out,
2517 tree *type2_out, tree *rhs2_out)
2518 {
2519 tree type = TREE_TYPE (gimple_assign_lhs (stmt));
2520
2521 if (TREE_CODE (type) != INTEGER_TYPE
2522 && TREE_CODE (type) != FIXED_POINT_TYPE)
2523 return false;
2524
2525 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs1 (stmt), type1_out,
2526 rhs1_out))
2527 return false;
2528
2529 if (!is_widening_mult_rhs_p (type, gimple_assign_rhs2 (stmt), type2_out,
2530 rhs2_out))
2531 return false;
2532
2533 if (*type1_out == NULL)
2534 {
2535 if (*type2_out == NULL || !int_fits_type_p (*rhs1_out, *type2_out))
2536 return false;
2537 *type1_out = *type2_out;
2538 }
2539
2540 if (*type2_out == NULL)
2541 {
2542 if (!int_fits_type_p (*rhs2_out, *type1_out))
2543 return false;
2544 *type2_out = *type1_out;
2545 }
2546
2547 /* Ensure that the larger of the two operands comes first. */
2548 if (TYPE_PRECISION (*type1_out) < TYPE_PRECISION (*type2_out))
2549 {
2550 tree tmp;
2551 tmp = *type1_out;
2552 *type1_out = *type2_out;
2553 *type2_out = tmp;
2554 tmp = *rhs1_out;
2555 *rhs1_out = *rhs2_out;
2556 *rhs2_out = tmp;
2557 }
2558
2559 return true;
2560 }
2561
2562 /* Process a single gimple statement STMT, which has a MULT_EXPR as
2563 its rhs, and try to convert it into a WIDEN_MULT_EXPR. The return
2564 value is true iff we converted the statement. */
2565
2566 static bool
2567 convert_mult_to_widen (gimple stmt, gimple_stmt_iterator *gsi)
2568 {
2569 tree lhs, rhs1, rhs2, type, type1, type2;
2570 enum insn_code handler;
2571 enum machine_mode to_mode, from_mode, actual_mode;
2572 optab op;
2573 int actual_precision;
2574 location_t loc = gimple_location (stmt);
2575 bool from_unsigned1, from_unsigned2;
2576
2577 lhs = gimple_assign_lhs (stmt);
2578 type = TREE_TYPE (lhs);
2579 if (TREE_CODE (type) != INTEGER_TYPE)
2580 return false;
2581
2582 if (!is_widening_mult_p (stmt, &type1, &rhs1, &type2, &rhs2))
2583 return false;
2584
2585 to_mode = TYPE_MODE (type);
2586 from_mode = TYPE_MODE (type1);
2587 from_unsigned1 = TYPE_UNSIGNED (type1);
2588 from_unsigned2 = TYPE_UNSIGNED (type2);
2589
2590 if (from_unsigned1 && from_unsigned2)
2591 op = umul_widen_optab;
2592 else if (!from_unsigned1 && !from_unsigned2)
2593 op = smul_widen_optab;
2594 else
2595 op = usmul_widen_optab;
2596
2597 handler = find_widening_optab_handler_and_mode (op, to_mode, from_mode,
2598 0, &actual_mode);
2599
2600 if (handler == CODE_FOR_nothing)
2601 {
2602 if (op != smul_widen_optab)
2603 {
2604 /* We can use a signed multiply with unsigned types as long as
2605 there is a wider mode to use, or it is the smaller of the two
2606 types that is unsigned. Note that type1 >= type2, always. */
2607 if ((TYPE_UNSIGNED (type1)
2608 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2609 || (TYPE_UNSIGNED (type2)
2610 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2611 {
2612 from_mode = GET_MODE_WIDER_MODE (from_mode);
2613 if (GET_MODE_SIZE (to_mode) <= GET_MODE_SIZE (from_mode))
2614 return false;
2615 }
2616
2617 op = smul_widen_optab;
2618 handler = find_widening_optab_handler_and_mode (op, to_mode,
2619 from_mode, 0,
2620 &actual_mode);
2621
2622 if (handler == CODE_FOR_nothing)
2623 return false;
2624
2625 from_unsigned1 = from_unsigned2 = false;
2626 }
2627 else
2628 return false;
2629 }
2630
2631 /* Ensure that the inputs to the handler are in the correct precison
2632 for the opcode. This will be the full mode size. */
2633 actual_precision = GET_MODE_PRECISION (actual_mode);
2634 if (2 * actual_precision > TYPE_PRECISION (type))
2635 return false;
2636 if (actual_precision != TYPE_PRECISION (type1)
2637 || from_unsigned1 != TYPE_UNSIGNED (type1))
2638 rhs1 = build_and_insert_cast (gsi, loc,
2639 build_nonstandard_integer_type
2640 (actual_precision, from_unsigned1), rhs1);
2641 if (actual_precision != TYPE_PRECISION (type2)
2642 || from_unsigned2 != TYPE_UNSIGNED (type2))
2643 rhs2 = build_and_insert_cast (gsi, loc,
2644 build_nonstandard_integer_type
2645 (actual_precision, from_unsigned2), rhs2);
2646
2647 /* Handle constants. */
2648 if (TREE_CODE (rhs1) == INTEGER_CST)
2649 rhs1 = fold_convert (type1, rhs1);
2650 if (TREE_CODE (rhs2) == INTEGER_CST)
2651 rhs2 = fold_convert (type2, rhs2);
2652
2653 gimple_assign_set_rhs1 (stmt, rhs1);
2654 gimple_assign_set_rhs2 (stmt, rhs2);
2655 gimple_assign_set_rhs_code (stmt, WIDEN_MULT_EXPR);
2656 update_stmt (stmt);
2657 widen_mul_stats.widen_mults_inserted++;
2658 return true;
2659 }
2660
2661 /* Process a single gimple statement STMT, which is found at the
2662 iterator GSI and has a either a PLUS_EXPR or a MINUS_EXPR as its
2663 rhs (given by CODE), and try to convert it into a
2664 WIDEN_MULT_PLUS_EXPR or a WIDEN_MULT_MINUS_EXPR. The return value
2665 is true iff we converted the statement. */
2666
2667 static bool
2668 convert_plusminus_to_widen (gimple_stmt_iterator *gsi, gimple stmt,
2669 enum tree_code code)
2670 {
2671 gimple rhs1_stmt = NULL, rhs2_stmt = NULL;
2672 gimple conv1_stmt = NULL, conv2_stmt = NULL, conv_stmt;
2673 tree type, type1, type2, optype;
2674 tree lhs, rhs1, rhs2, mult_rhs1, mult_rhs2, add_rhs;
2675 enum tree_code rhs1_code = ERROR_MARK, rhs2_code = ERROR_MARK;
2676 optab this_optab;
2677 enum tree_code wmult_code;
2678 enum insn_code handler;
2679 enum machine_mode to_mode, from_mode, actual_mode;
2680 location_t loc = gimple_location (stmt);
2681 int actual_precision;
2682 bool from_unsigned1, from_unsigned2;
2683
2684 lhs = gimple_assign_lhs (stmt);
2685 type = TREE_TYPE (lhs);
2686 if (TREE_CODE (type) != INTEGER_TYPE
2687 && TREE_CODE (type) != FIXED_POINT_TYPE)
2688 return false;
2689
2690 if (code == MINUS_EXPR)
2691 wmult_code = WIDEN_MULT_MINUS_EXPR;
2692 else
2693 wmult_code = WIDEN_MULT_PLUS_EXPR;
2694
2695 rhs1 = gimple_assign_rhs1 (stmt);
2696 rhs2 = gimple_assign_rhs2 (stmt);
2697
2698 if (TREE_CODE (rhs1) == SSA_NAME)
2699 {
2700 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2701 if (is_gimple_assign (rhs1_stmt))
2702 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2703 }
2704
2705 if (TREE_CODE (rhs2) == SSA_NAME)
2706 {
2707 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2708 if (is_gimple_assign (rhs2_stmt))
2709 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2710 }
2711
2712 /* Allow for one conversion statement between the multiply
2713 and addition/subtraction statement. If there are more than
2714 one conversions then we assume they would invalidate this
2715 transformation. If that's not the case then they should have
2716 been folded before now. */
2717 if (CONVERT_EXPR_CODE_P (rhs1_code))
2718 {
2719 conv1_stmt = rhs1_stmt;
2720 rhs1 = gimple_assign_rhs1 (rhs1_stmt);
2721 if (TREE_CODE (rhs1) == SSA_NAME)
2722 {
2723 rhs1_stmt = SSA_NAME_DEF_STMT (rhs1);
2724 if (is_gimple_assign (rhs1_stmt))
2725 rhs1_code = gimple_assign_rhs_code (rhs1_stmt);
2726 }
2727 else
2728 return false;
2729 }
2730 if (CONVERT_EXPR_CODE_P (rhs2_code))
2731 {
2732 conv2_stmt = rhs2_stmt;
2733 rhs2 = gimple_assign_rhs1 (rhs2_stmt);
2734 if (TREE_CODE (rhs2) == SSA_NAME)
2735 {
2736 rhs2_stmt = SSA_NAME_DEF_STMT (rhs2);
2737 if (is_gimple_assign (rhs2_stmt))
2738 rhs2_code = gimple_assign_rhs_code (rhs2_stmt);
2739 }
2740 else
2741 return false;
2742 }
2743
2744 /* If code is WIDEN_MULT_EXPR then it would seem unnecessary to call
2745 is_widening_mult_p, but we still need the rhs returns.
2746
2747 It might also appear that it would be sufficient to use the existing
2748 operands of the widening multiply, but that would limit the choice of
2749 multiply-and-accumulate instructions.
2750
2751 If the widened-multiplication result has more than one uses, it is
2752 probably wiser not to do the conversion. */
2753 if (code == PLUS_EXPR
2754 && (rhs1_code == MULT_EXPR || rhs1_code == WIDEN_MULT_EXPR))
2755 {
2756 if (!has_single_use (rhs1)
2757 || !is_widening_mult_p (rhs1_stmt, &type1, &mult_rhs1,
2758 &type2, &mult_rhs2))
2759 return false;
2760 add_rhs = rhs2;
2761 conv_stmt = conv1_stmt;
2762 }
2763 else if (rhs2_code == MULT_EXPR || rhs2_code == WIDEN_MULT_EXPR)
2764 {
2765 if (!has_single_use (rhs2)
2766 || !is_widening_mult_p (rhs2_stmt, &type1, &mult_rhs1,
2767 &type2, &mult_rhs2))
2768 return false;
2769 add_rhs = rhs1;
2770 conv_stmt = conv2_stmt;
2771 }
2772 else
2773 return false;
2774
2775 to_mode = TYPE_MODE (type);
2776 from_mode = TYPE_MODE (type1);
2777 from_unsigned1 = TYPE_UNSIGNED (type1);
2778 from_unsigned2 = TYPE_UNSIGNED (type2);
2779 optype = type1;
2780
2781 /* There's no such thing as a mixed sign madd yet, so use a wider mode. */
2782 if (from_unsigned1 != from_unsigned2)
2783 {
2784 if (!INTEGRAL_TYPE_P (type))
2785 return false;
2786 /* We can use a signed multiply with unsigned types as long as
2787 there is a wider mode to use, or it is the smaller of the two
2788 types that is unsigned. Note that type1 >= type2, always. */
2789 if ((from_unsigned1
2790 && TYPE_PRECISION (type1) == GET_MODE_PRECISION (from_mode))
2791 || (from_unsigned2
2792 && TYPE_PRECISION (type2) == GET_MODE_PRECISION (from_mode)))
2793 {
2794 from_mode = GET_MODE_WIDER_MODE (from_mode);
2795 if (GET_MODE_SIZE (from_mode) >= GET_MODE_SIZE (to_mode))
2796 return false;
2797 }
2798
2799 from_unsigned1 = from_unsigned2 = false;
2800 optype = build_nonstandard_integer_type (GET_MODE_PRECISION (from_mode),
2801 false);
2802 }
2803
2804 /* If there was a conversion between the multiply and addition
2805 then we need to make sure it fits a multiply-and-accumulate.
2806 The should be a single mode change which does not change the
2807 value. */
2808 if (conv_stmt)
2809 {
2810 /* We use the original, unmodified data types for this. */
2811 tree from_type = TREE_TYPE (gimple_assign_rhs1 (conv_stmt));
2812 tree to_type = TREE_TYPE (gimple_assign_lhs (conv_stmt));
2813 int data_size = TYPE_PRECISION (type1) + TYPE_PRECISION (type2);
2814 bool is_unsigned = TYPE_UNSIGNED (type1) && TYPE_UNSIGNED (type2);
2815
2816 if (TYPE_PRECISION (from_type) > TYPE_PRECISION (to_type))
2817 {
2818 /* Conversion is a truncate. */
2819 if (TYPE_PRECISION (to_type) < data_size)
2820 return false;
2821 }
2822 else if (TYPE_PRECISION (from_type) < TYPE_PRECISION (to_type))
2823 {
2824 /* Conversion is an extend. Check it's the right sort. */
2825 if (TYPE_UNSIGNED (from_type) != is_unsigned
2826 && !(is_unsigned && TYPE_PRECISION (from_type) > data_size))
2827 return false;
2828 }
2829 /* else convert is a no-op for our purposes. */
2830 }
2831
2832 /* Verify that the machine can perform a widening multiply
2833 accumulate in this mode/signedness combination, otherwise
2834 this transformation is likely to pessimize code. */
2835 this_optab = optab_for_tree_code (wmult_code, optype, optab_default);
2836 handler = find_widening_optab_handler_and_mode (this_optab, to_mode,
2837 from_mode, 0, &actual_mode);
2838
2839 if (handler == CODE_FOR_nothing)
2840 return false;
2841
2842 /* Ensure that the inputs to the handler are in the correct precison
2843 for the opcode. This will be the full mode size. */
2844 actual_precision = GET_MODE_PRECISION (actual_mode);
2845 if (actual_precision != TYPE_PRECISION (type1)
2846 || from_unsigned1 != TYPE_UNSIGNED (type1))
2847 mult_rhs1 = build_and_insert_cast (gsi, loc,
2848 build_nonstandard_integer_type
2849 (actual_precision, from_unsigned1),
2850 mult_rhs1);
2851 if (actual_precision != TYPE_PRECISION (type2)
2852 || from_unsigned2 != TYPE_UNSIGNED (type2))
2853 mult_rhs2 = build_and_insert_cast (gsi, loc,
2854 build_nonstandard_integer_type
2855 (actual_precision, from_unsigned2),
2856 mult_rhs2);
2857
2858 if (!useless_type_conversion_p (type, TREE_TYPE (add_rhs)))
2859 add_rhs = build_and_insert_cast (gsi, loc, type, add_rhs);
2860
2861 /* Handle constants. */
2862 if (TREE_CODE (mult_rhs1) == INTEGER_CST)
2863 mult_rhs1 = fold_convert (type1, mult_rhs1);
2864 if (TREE_CODE (mult_rhs2) == INTEGER_CST)
2865 mult_rhs2 = fold_convert (type2, mult_rhs2);
2866
2867 gimple_assign_set_rhs_with_ops_1 (gsi, wmult_code, mult_rhs1, mult_rhs2,
2868 add_rhs);
2869 update_stmt (gsi_stmt (*gsi));
2870 widen_mul_stats.maccs_inserted++;
2871 return true;
2872 }
2873
2874 /* Combine the multiplication at MUL_STMT with operands MULOP1 and MULOP2
2875 with uses in additions and subtractions to form fused multiply-add
2876 operations. Returns true if successful and MUL_STMT should be removed. */
2877
2878 static bool
2879 convert_mult_to_fma (gimple mul_stmt, tree op1, tree op2)
2880 {
2881 tree mul_result = gimple_get_lhs (mul_stmt);
2882 tree type = TREE_TYPE (mul_result);
2883 gimple use_stmt, neguse_stmt, fma_stmt;
2884 use_operand_p use_p;
2885 imm_use_iterator imm_iter;
2886
2887 if (FLOAT_TYPE_P (type)
2888 && flag_fp_contract_mode == FP_CONTRACT_OFF)
2889 return false;
2890
2891 /* We don't want to do bitfield reduction ops. */
2892 if (INTEGRAL_TYPE_P (type)
2893 && (TYPE_PRECISION (type)
2894 != GET_MODE_PRECISION (TYPE_MODE (type))))
2895 return false;
2896
2897 /* If the target doesn't support it, don't generate it. We assume that
2898 if fma isn't available then fms, fnma or fnms are not either. */
2899 if (optab_handler (fma_optab, TYPE_MODE (type)) == CODE_FOR_nothing)
2900 return false;
2901
2902 /* If the multiplication has zero uses, it is kept around probably because
2903 of -fnon-call-exceptions. Don't optimize it away in that case,
2904 it is DCE job. */
2905 if (has_zero_uses (mul_result))
2906 return false;
2907
2908 /* Make sure that the multiplication statement becomes dead after
2909 the transformation, thus that all uses are transformed to FMAs.
2910 This means we assume that an FMA operation has the same cost
2911 as an addition. */
2912 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, mul_result)
2913 {
2914 enum tree_code use_code;
2915 tree result = mul_result;
2916 bool negate_p = false;
2917
2918 use_stmt = USE_STMT (use_p);
2919
2920 if (is_gimple_debug (use_stmt))
2921 continue;
2922
2923 /* For now restrict this operations to single basic blocks. In theory
2924 we would want to support sinking the multiplication in
2925 m = a*b;
2926 if ()
2927 ma = m + c;
2928 else
2929 d = m;
2930 to form a fma in the then block and sink the multiplication to the
2931 else block. */
2932 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2933 return false;
2934
2935 if (!is_gimple_assign (use_stmt))
2936 return false;
2937
2938 use_code = gimple_assign_rhs_code (use_stmt);
2939
2940 /* A negate on the multiplication leads to FNMA. */
2941 if (use_code == NEGATE_EXPR)
2942 {
2943 ssa_op_iter iter;
2944 use_operand_p usep;
2945
2946 result = gimple_assign_lhs (use_stmt);
2947
2948 /* Make sure the negate statement becomes dead with this
2949 single transformation. */
2950 if (!single_imm_use (gimple_assign_lhs (use_stmt),
2951 &use_p, &neguse_stmt))
2952 return false;
2953
2954 /* Make sure the multiplication isn't also used on that stmt. */
2955 FOR_EACH_PHI_OR_STMT_USE (usep, neguse_stmt, iter, SSA_OP_USE)
2956 if (USE_FROM_PTR (usep) == mul_result)
2957 return false;
2958
2959 /* Re-validate. */
2960 use_stmt = neguse_stmt;
2961 if (gimple_bb (use_stmt) != gimple_bb (mul_stmt))
2962 return false;
2963 if (!is_gimple_assign (use_stmt))
2964 return false;
2965
2966 use_code = gimple_assign_rhs_code (use_stmt);
2967 negate_p = true;
2968 }
2969
2970 switch (use_code)
2971 {
2972 case MINUS_EXPR:
2973 if (gimple_assign_rhs2 (use_stmt) == result)
2974 negate_p = !negate_p;
2975 break;
2976 case PLUS_EXPR:
2977 break;
2978 default:
2979 /* FMA can only be formed from PLUS and MINUS. */
2980 return false;
2981 }
2982
2983 /* If the subtrahend (gimple_assign_rhs2 (use_stmt)) is computed
2984 by a MULT_EXPR that we'll visit later, we might be able to
2985 get a more profitable match with fnma.
2986 OTOH, if we don't, a negate / fma pair has likely lower latency
2987 that a mult / subtract pair. */
2988 if (use_code == MINUS_EXPR && !negate_p
2989 && gimple_assign_rhs1 (use_stmt) == result
2990 && optab_handler (fms_optab, TYPE_MODE (type)) == CODE_FOR_nothing
2991 && optab_handler (fnma_optab, TYPE_MODE (type)) != CODE_FOR_nothing)
2992 {
2993 tree rhs2 = gimple_assign_rhs2 (use_stmt);
2994
2995 if (TREE_CODE (rhs2) == SSA_NAME)
2996 {
2997 gimple stmt2 = SSA_NAME_DEF_STMT (rhs2);
2998 if (has_single_use (rhs2)
2999 && is_gimple_assign (stmt2)
3000 && gimple_assign_rhs_code (stmt2) == MULT_EXPR)
3001 return false;
3002 }
3003 }
3004
3005 /* We can't handle a * b + a * b. */
3006 if (gimple_assign_rhs1 (use_stmt) == gimple_assign_rhs2 (use_stmt))
3007 return false;
3008
3009 /* While it is possible to validate whether or not the exact form
3010 that we've recognized is available in the backend, the assumption
3011 is that the transformation is never a loss. For instance, suppose
3012 the target only has the plain FMA pattern available. Consider
3013 a*b-c -> fma(a,b,-c): we've exchanged MUL+SUB for FMA+NEG, which
3014 is still two operations. Consider -(a*b)-c -> fma(-a,b,-c): we
3015 still have 3 operations, but in the FMA form the two NEGs are
3016 independent and could be run in parallel. */
3017 }
3018
3019 FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, mul_result)
3020 {
3021 gimple_stmt_iterator gsi = gsi_for_stmt (use_stmt);
3022 enum tree_code use_code;
3023 tree addop, mulop1 = op1, result = mul_result;
3024 bool negate_p = false;
3025
3026 if (is_gimple_debug (use_stmt))
3027 continue;
3028
3029 use_code = gimple_assign_rhs_code (use_stmt);
3030 if (use_code == NEGATE_EXPR)
3031 {
3032 result = gimple_assign_lhs (use_stmt);
3033 single_imm_use (gimple_assign_lhs (use_stmt), &use_p, &neguse_stmt);
3034 gsi_remove (&gsi, true);
3035 release_defs (use_stmt);
3036
3037 use_stmt = neguse_stmt;
3038 gsi = gsi_for_stmt (use_stmt);
3039 use_code = gimple_assign_rhs_code (use_stmt);
3040 negate_p = true;
3041 }
3042
3043 if (gimple_assign_rhs1 (use_stmt) == result)
3044 {
3045 addop = gimple_assign_rhs2 (use_stmt);
3046 /* a * b - c -> a * b + (-c) */
3047 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3048 addop = force_gimple_operand_gsi (&gsi,
3049 build1 (NEGATE_EXPR,
3050 type, addop),
3051 true, NULL_TREE, true,
3052 GSI_SAME_STMT);
3053 }
3054 else
3055 {
3056 addop = gimple_assign_rhs1 (use_stmt);
3057 /* a - b * c -> (-b) * c + a */
3058 if (gimple_assign_rhs_code (use_stmt) == MINUS_EXPR)
3059 negate_p = !negate_p;
3060 }
3061
3062 if (negate_p)
3063 mulop1 = force_gimple_operand_gsi (&gsi,
3064 build1 (NEGATE_EXPR,
3065 type, mulop1),
3066 true, NULL_TREE, true,
3067 GSI_SAME_STMT);
3068
3069 fma_stmt = gimple_build_assign_with_ops (FMA_EXPR,
3070 gimple_assign_lhs (use_stmt),
3071 mulop1, op2,
3072 addop);
3073 gsi_replace (&gsi, fma_stmt, true);
3074 widen_mul_stats.fmas_inserted++;
3075 }
3076
3077 return true;
3078 }
3079
3080 /* Find integer multiplications where the operands are extended from
3081 smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
3082 where appropriate. */
3083
3084 namespace {
3085
3086 const pass_data pass_data_optimize_widening_mul =
3087 {
3088 GIMPLE_PASS, /* type */
3089 "widening_mul", /* name */
3090 OPTGROUP_NONE, /* optinfo_flags */
3091 true, /* has_execute */
3092 TV_NONE, /* tv_id */
3093 PROP_ssa, /* properties_required */
3094 0, /* properties_provided */
3095 0, /* properties_destroyed */
3096 0, /* todo_flags_start */
3097 TODO_update_ssa, /* todo_flags_finish */
3098 };
3099
3100 class pass_optimize_widening_mul : public gimple_opt_pass
3101 {
3102 public:
3103 pass_optimize_widening_mul (gcc::context *ctxt)
3104 : gimple_opt_pass (pass_data_optimize_widening_mul, ctxt)
3105 {}
3106
3107 /* opt_pass methods: */
3108 virtual bool gate (function *)
3109 {
3110 return flag_expensive_optimizations && optimize;
3111 }
3112
3113 virtual unsigned int execute (function *);
3114
3115 }; // class pass_optimize_widening_mul
3116
3117 unsigned int
3118 pass_optimize_widening_mul::execute (function *fun)
3119 {
3120 basic_block bb;
3121 bool cfg_changed = false;
3122
3123 memset (&widen_mul_stats, 0, sizeof (widen_mul_stats));
3124
3125 FOR_EACH_BB_FN (bb, fun)
3126 {
3127 gimple_stmt_iterator gsi;
3128
3129 for (gsi = gsi_after_labels (bb); !gsi_end_p (gsi);)
3130 {
3131 gimple stmt = gsi_stmt (gsi);
3132 enum tree_code code;
3133
3134 if (is_gimple_assign (stmt))
3135 {
3136 code = gimple_assign_rhs_code (stmt);
3137 switch (code)
3138 {
3139 case MULT_EXPR:
3140 if (!convert_mult_to_widen (stmt, &gsi)
3141 && convert_mult_to_fma (stmt,
3142 gimple_assign_rhs1 (stmt),
3143 gimple_assign_rhs2 (stmt)))
3144 {
3145 gsi_remove (&gsi, true);
3146 release_defs (stmt);
3147 continue;
3148 }
3149 break;
3150
3151 case PLUS_EXPR:
3152 case MINUS_EXPR:
3153 convert_plusminus_to_widen (&gsi, stmt, code);
3154 break;
3155
3156 default:;
3157 }
3158 }
3159 else if (is_gimple_call (stmt)
3160 && gimple_call_lhs (stmt))
3161 {
3162 tree fndecl = gimple_call_fndecl (stmt);
3163 if (fndecl
3164 && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3165 {
3166 switch (DECL_FUNCTION_CODE (fndecl))
3167 {
3168 case BUILT_IN_POWF:
3169 case BUILT_IN_POW:
3170 case BUILT_IN_POWL:
3171 if (TREE_CODE (gimple_call_arg (stmt, 1)) == REAL_CST
3172 && REAL_VALUES_EQUAL
3173 (TREE_REAL_CST (gimple_call_arg (stmt, 1)),
3174 dconst2)
3175 && convert_mult_to_fma (stmt,
3176 gimple_call_arg (stmt, 0),
3177 gimple_call_arg (stmt, 0)))
3178 {
3179 unlink_stmt_vdef (stmt);
3180 if (gsi_remove (&gsi, true)
3181 && gimple_purge_dead_eh_edges (bb))
3182 cfg_changed = true;
3183 release_defs (stmt);
3184 continue;
3185 }
3186 break;
3187
3188 default:;
3189 }
3190 }
3191 }
3192 gsi_next (&gsi);
3193 }
3194 }
3195
3196 statistics_counter_event (fun, "widening multiplications inserted",
3197 widen_mul_stats.widen_mults_inserted);
3198 statistics_counter_event (fun, "widening maccs inserted",
3199 widen_mul_stats.maccs_inserted);
3200 statistics_counter_event (fun, "fused multiply-adds inserted",
3201 widen_mul_stats.fmas_inserted);
3202
3203 return cfg_changed ? TODO_cleanup_cfg : 0;
3204 }
3205
3206 } // anon namespace
3207
3208 gimple_opt_pass *
3209 make_pass_optimize_widening_mul (gcc::context *ctxt)
3210 {
3211 return new pass_optimize_widening_mul (ctxt);
3212 }