-2019-05-16 Sebastian Huber <sebastian.huber@embedded-brains.de>
-
- * config/arm/t-rtems: Replace ARMv7-M multilibs with Cortex-M
- multilibs.
-
-2019-05-16 Richard Biener <rguenther@suse.de>
-
- PR tree-optimization/90424
- * tree-ssa.c (non_rewritable_lvalue_p): Handle inserts from
- aligned subvectors.
- (execute_update_addresses_taken): Likewise.
- * tree-cfg.c (verify_gimple_assign_ternary): Likewise.
-
-2019-05-16 Richard Biener <rguenther@suse.de>
-
- * gimple-pretty-print.c (dump_ternary_rhs): Dump BIT_INSERT_EXPR
- as __BIT_INSERT with -gimple.
+2019-05-16 Jun Ma <JunMa@linux.alibaba.com>
+
+ PR tree-optimization/90106
+ * tree-call-cdce.c (shrink_wrap_one_built_in_call_with_conds): Add
+ new parameter as new internal function call, also move it to new
+ basic block.
+ (use_internal_fn): Pass internal function call to
+ shrink_wrap_one_built_in_call_with_conds.
2019-05-15 Jakub Jelinek <jakub@redhat.com>
y = sqrt (x);
==>
- y = IFN_SQRT (x);
if (__builtin_isless (x, 0))
- sqrt (x);
-
+ y = sqrt (x);
+ else
+ y = IFN_SQRT (x);
In the vast majority of cases we should then never need to call sqrt.
Note that library functions are not supposed to clear errno to zero without
}
/* Shrink-wrap BI_CALL so that it is only called when one of the NCONDS
- conditions in CONDS is false. */
+ conditions in CONDS is false. Also move BI_NEWCALL to a new basic block
+ when it is non-null, it is called while all of the CONDS are true. */
static void
shrink_wrap_one_built_in_call_with_conds (gcall *bi_call, vec <gimple *> conds,
- unsigned int nconds)
+ unsigned int nconds,
+ gcall *bi_newcall = NULL)
{
gimple_stmt_iterator bi_call_bsi;
- basic_block bi_call_bb, join_tgt_bb, guard_bb;
+ basic_block bi_call_bb, bi_newcall_bb, join_tgt_bb, guard_bb;
edge join_tgt_in_edge_from_call, join_tgt_in_edge_fall_thru;
edge bi_call_in_edge0, guard_bb_in_edge;
unsigned tn_cond_stmts;
gimple *cond_expr_start;
/* The cfg we want to create looks like this:
-
- [guard n-1] <- guard_bb (old block)
- | \
- | [guard n-2] }
- | / \ }
- | / ... } new blocks
- | / [guard 0] }
- | / / | }
- [ call ] | <- bi_call_bb }
- | \ |
- | \ |
- | [ join ] <- join_tgt_bb (old iff call must end bb)
- |
+ [guard n-1] <- guard_bb (old block)
+ | \
+ | [guard n-2] }
+ | / \ }
+ | / ... } new blocks
+ | / [guard 0] }
+ | / / | }
+ [call] | <- bi_call_bb }
+ \ [newcall] <-bi_newcall_bb}
+ \ |
+ [join] <- join_tgt_bb (old iff call must end bb)
possible EH edges (only if [join] is old)
When [join] is new, the immediate dominators for these blocks are:
1. [guard n-1]: unchanged
2. [call]: [guard n-1]
- 3. [guard m]: [guard m+1] for 0 <= m <= n-2
- 4. [join]: [guard n-1]
+ 3. [newcall]: [guard 0]
+ 4. [guard m]: [guard m+1] for 0 <= m <= n-2
+ 5. [join]: [guard n-1]
We punt for the more complex case case of [join] being old and
simply free the dominance info. We also punt on postdominators,
edges.quick_push (edge_pair (bi_call_in_edge, guard_bb_in_edge));
}
+ /* Move BI_NEWCALL to new basic block when it is non-null. */
+ if (bi_newcall)
+ {
+ /* Get bi_newcall_bb by split join_tgt_in_edge_fall_thru edge,
+ and move BI_NEWCALL to bi_newcall_bb. */
+ bi_newcall_bb = split_edge (join_tgt_in_edge_fall_thru);
+ gimple_stmt_iterator to_gsi = gsi_start_bb (bi_newcall_bb);
+ gimple_stmt_iterator from_gsi = gsi_for_stmt (bi_newcall);
+ gsi_move_before (&from_gsi, &to_gsi);
+ join_tgt_in_edge_fall_thru = EDGE_SUCC (bi_newcall_bb, 0);
+ join_tgt_bb = join_tgt_in_edge_fall_thru->dest;
+
+ tree bi_newcall_lhs = gimple_call_lhs (bi_newcall);
+ tree bi_call_lhs = gimple_call_lhs (bi_call);
+ if (!bi_call_lhs)
+ {
+ bi_call_lhs = copy_ssa_name (bi_newcall_lhs);
+ gimple_call_set_lhs (bi_call, bi_call_lhs);
+ SSA_NAME_DEF_STMT (bi_call_lhs) = bi_call;
+ }
+
+ /* Create phi node for lhs of BI_CALL and BI_NEWCALL. */
+ gphi *new_phi = create_phi_node (copy_ssa_name (bi_newcall_lhs),
+ join_tgt_bb);
+ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (PHI_RESULT (new_phi))
+ = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (bi_newcall_lhs);
+ add_phi_arg (new_phi, bi_call_lhs, join_tgt_in_edge_from_call,
+ gimple_location (bi_call));
+ add_phi_arg (new_phi, bi_newcall_lhs, join_tgt_in_edge_fall_thru,
+ gimple_location (bi_newcall));
+
+ /* Replace all use of original return value with result of phi node. */
+ use_operand_p use_p;
+ gimple *use_stmt;
+ imm_use_iterator iterator;
+ FOR_EACH_IMM_USE_STMT (use_stmt, iterator, bi_newcall_lhs)
+ if (use_stmt != new_phi)
+ FOR_EACH_IMM_USE_ON_STMT (use_p, iterator)
+ SET_USE (use_p, PHI_RESULT (new_phi));
+ }
+
/* Now update the probability and profile information, processing the
guards in order of execution.
unsigned nconds = 0;
auto_vec<gimple *, 12> conds;
+ bool is_arg_conds = false;
if (can_test_argument_range (call))
{
gen_shrink_wrap_conditions (call, conds, &nconds);
+ is_arg_conds = true;
gcc_assert (nconds != 0);
}
else
call = new_call;
}
}
-
- shrink_wrap_one_built_in_call_with_conds (call, conds, nconds);
+ shrink_wrap_one_built_in_call_with_conds (call, conds, nconds,
+ is_arg_conds ? new_call : NULL);
}
/* The top level function for conditional dead code shrink