}
/* Expand a complex multiplication or division to a libcall to the c99
- compliant routines. */
+ compliant routines. TYPE is the complex type of the operation.
+ If INPLACE_P replace the statement at GSI with
+ the libcall and return NULL_TREE. Else insert the call, assign its
+ result to an output variable and return that variable. If INPLACE_P
+ is true then the statement being replaced should be an assignment
+ statement. */
-static void
-expand_complex_libcall (gimple_stmt_iterator *gsi, tree ar, tree ai,
- tree br, tree bi, enum tree_code code)
+static tree
+expand_complex_libcall (gimple_stmt_iterator *gsi, tree type, tree ar, tree ai,
+ tree br, tree bi, enum tree_code code, bool inplace_p)
{
machine_mode mode;
enum built_in_function bcode;
- tree fn, type, lhs;
- gimple *old_stmt;
+ tree fn, lhs;
gcall *stmt;
- old_stmt = gsi_stmt (*gsi);
- lhs = gimple_assign_lhs (old_stmt);
- type = TREE_TYPE (lhs);
-
mode = TYPE_MODE (type);
gcc_assert (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT);
fn = builtin_decl_explicit (bcode);
stmt = gimple_build_call (fn, 4, ar, ai, br, bi);
- gimple_call_set_lhs (stmt, lhs);
- update_stmt (stmt);
- gsi_replace (gsi, stmt, false);
- if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
- gimple_purge_dead_eh_edges (gsi_bb (*gsi));
- if (gimple_in_ssa_p (cfun))
+ if (inplace_p)
{
+ gimple *old_stmt = gsi_stmt (*gsi);
+ lhs = gimple_assign_lhs (old_stmt);
+ gimple_call_set_lhs (stmt, lhs);
+ update_stmt (stmt);
+ gsi_replace (gsi, stmt, false);
+
+ if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt))
+ gimple_purge_dead_eh_edges (gsi_bb (*gsi));
+
type = TREE_TYPE (type);
update_complex_components (gsi, stmt,
- build1 (REALPART_EXPR, type, lhs),
- build1 (IMAGPART_EXPR, type, lhs));
+ build1 (REALPART_EXPR, type, lhs),
+ build1 (IMAGPART_EXPR, type, lhs));
SSA_NAME_DEF_STMT (lhs) = stmt;
+ return NULL_TREE;
}
+
+ lhs = create_tmp_var (type);
+ gimple_call_set_lhs (stmt, lhs);
+
+ lhs = make_ssa_name (lhs, stmt);
+ gimple_call_set_lhs (stmt, lhs);
+
+ update_stmt (stmt);
+ gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
+ return lhs;
+}
+
+/* Perform a complex multiplication on two complex constants A, B represented
+ by AR, AI, BR, BI of type TYPE.
+ The operation we want is: a * b = (ar*br - ai*bi) + i(ar*bi + br*ai).
+ Insert the GIMPLE statements into GSI. Store the real and imaginary
+ components of the result into RR and RI. */
+
+static void
+expand_complex_multiplication_components (gimple_stmt_iterator *gsi,
+ tree type, tree ar, tree ai,
+ tree br, tree bi,
+ tree *rr, tree *ri)
+{
+ tree t1, t2, t3, t4;
+
+ t1 = gimplify_build2 (gsi, MULT_EXPR, type, ar, br);
+ t2 = gimplify_build2 (gsi, MULT_EXPR, type, ai, bi);
+ t3 = gimplify_build2 (gsi, MULT_EXPR, type, ar, bi);
+
+ /* Avoid expanding redundant multiplication for the common
+ case of squaring a complex number. */
+ if (ar == br && ai == bi)
+ t4 = t3;
+ else
+ t4 = gimplify_build2 (gsi, MULT_EXPR, type, ai, br);
+
+ *rr = gimplify_build2 (gsi, MINUS_EXPR, type, t1, t2);
+ *ri = gimplify_build2 (gsi, PLUS_EXPR, type, t3, t4);
}
/* Expand complex multiplication to scalars:
*/
static void
-expand_complex_multiplication (gimple_stmt_iterator *gsi, tree inner_type,
+expand_complex_multiplication (gimple_stmt_iterator *gsi, tree type,
tree ar, tree ai, tree br, tree bi,
complex_lattice_t al, complex_lattice_t bl)
{
tree rr, ri;
+ tree inner_type = TREE_TYPE (type);
if (al < bl)
{
case PAIR (VARYING, VARYING):
if (flag_complex_method == 2 && SCALAR_FLOAT_TYPE_P (inner_type))
{
- expand_complex_libcall (gsi, ar, ai, br, bi, MULT_EXPR);
- return;
- }
- else
- {
- tree t1, t2, t3, t4;
-
- t1 = gimplify_build2 (gsi, MULT_EXPR, inner_type, ar, br);
- t2 = gimplify_build2 (gsi, MULT_EXPR, inner_type, ai, bi);
- t3 = gimplify_build2 (gsi, MULT_EXPR, inner_type, ar, bi);
-
- /* Avoid expanding redundant multiplication for the common
- case of squaring a complex number. */
- if (ar == br && ai == bi)
- t4 = t3;
- else
- t4 = gimplify_build2 (gsi, MULT_EXPR, inner_type, ai, br);
+ /* If optimizing for size or not at all just do a libcall.
+ Same if there are exception-handling edges or signaling NaNs. */
+ if (optimize == 0 || optimize_bb_for_size_p (gsi_bb (*gsi))
+ || stmt_can_throw_internal (gsi_stmt (*gsi))
+ || flag_signaling_nans)
+ {
+ expand_complex_libcall (gsi, type, ar, ai, br, bi,
+ MULT_EXPR, true);
+ return;
+ }
- rr = gimplify_build2 (gsi, MINUS_EXPR, inner_type, t1, t2);
- ri = gimplify_build2 (gsi, PLUS_EXPR, inner_type, t3, t4);
+ /* Else, expand x = a * b into
+ x = (ar*br - ai*bi) + i(ar*bi + br*ai);
+ if (isunordered (__real__ x, __imag__ x))
+ x = __muldc3 (a, b); */
+
+ tree tmpr, tmpi;
+ expand_complex_multiplication_components (gsi, inner_type, ar, ai,
+ br, bi, &tmpr, &tmpi);
+
+ gimple *check
+ = gimple_build_cond (UNORDERED_EXPR, tmpr, tmpi,
+ NULL_TREE, NULL_TREE);
+
+ basic_block orig_bb = gsi_bb (*gsi);
+ /* We want to keep track of the original complex multiplication
+ statement as we're going to modify it later in
+ update_complex_assignment. Make sure that insert_cond_bb leaves
+ that statement in the join block. */
+ gsi_prev (gsi);
+ basic_block cond_bb
+ = insert_cond_bb (gsi_bb (*gsi), gsi_stmt (*gsi), check,
+ profile_probability::very_unlikely ());
+
+
+ gimple_stmt_iterator cond_bb_gsi = gsi_last_bb (cond_bb);
+ gsi_insert_after (&cond_bb_gsi, gimple_build_nop (), GSI_NEW_STMT);
+
+ tree libcall_res
+ = expand_complex_libcall (&cond_bb_gsi, type, ar, ai, br,
+ bi, MULT_EXPR, false);
+ tree cond_real = gimplify_build1 (&cond_bb_gsi, REALPART_EXPR,
+ inner_type, libcall_res);
+ tree cond_imag = gimplify_build1 (&cond_bb_gsi, IMAGPART_EXPR,
+ inner_type, libcall_res);
+
+ basic_block join_bb = single_succ_edge (cond_bb)->dest;
+ *gsi = gsi_start_nondebug_after_labels_bb (join_bb);
+
+ /* We have a conditional block with some assignments in cond_bb.
+ Wire up the PHIs to wrap up. */
+ rr = make_ssa_name (inner_type);
+ ri = make_ssa_name (inner_type);
+ edge cond_to_join = single_succ_edge (cond_bb);
+ edge orig_to_join = find_edge (orig_bb, join_bb);
+
+ gphi *real_phi = create_phi_node (rr, gsi_bb (*gsi));
+ add_phi_arg (real_phi, cond_real, cond_to_join,
+ UNKNOWN_LOCATION);
+ add_phi_arg (real_phi, tmpr, orig_to_join, UNKNOWN_LOCATION);
+
+ gphi *imag_phi = create_phi_node (ri, gsi_bb (*gsi));
+ add_phi_arg (imag_phi, cond_imag, cond_to_join,
+ UNKNOWN_LOCATION);
+ add_phi_arg (imag_phi, tmpi, orig_to_join, UNKNOWN_LOCATION);
}
+ else
+ /* If we are not worrying about NaNs expand to
+ (ar*br - ai*bi) + i(ar*bi + br*ai) directly. */
+ expand_complex_multiplication_components (gsi, inner_type, ar, ai,
+ br, bi, &rr, &ri);
break;
default:
/* Expand complex division to scalars. */
static void
-expand_complex_division (gimple_stmt_iterator *gsi, tree inner_type,
+expand_complex_division (gimple_stmt_iterator *gsi, tree type,
tree ar, tree ai, tree br, tree bi,
enum tree_code code,
complex_lattice_t al, complex_lattice_t bl)
{
tree rr, ri;
+ tree inner_type = TREE_TYPE (type);
switch (PAIR (al, bl))
{
case PAIR (ONLY_REAL, ONLY_REAL):
case 2:
if (SCALAR_FLOAT_TYPE_P (inner_type))
{
- expand_complex_libcall (gsi, ar, ai, br, bi, code);
+ expand_complex_libcall (gsi, type, ar, ai, br, bi, code, true);
break;
}
/* FALLTHRU */
break;
case MULT_EXPR:
- expand_complex_multiplication (gsi, inner_type, ar, ai, br, bi, al, bl);
+ expand_complex_multiplication (gsi, type, ar, ai, br, bi, al, bl);
break;
case TRUNC_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case RDIV_EXPR:
- expand_complex_division (gsi, inner_type, ar, ai, br, bi, code, al, bl);
+ expand_complex_division (gsi, type, ar, ai, br, bi, code, al, bl);
break;
case NEGATE_EXPR: