replacement MEM_REF.) */
tree cand_type;
+ /* The type to be used to interpret the stride field when the stride
+ is not a constant. Normally the same as the type of the recorded
+ stride, but when the stride has been cast we need to maintain that
+ knowledge in order to make legal substitutions without losing
+ precision. When the stride is a constant, this will be sizetype. */
+ tree stride_type;
+
/* The kind of candidate (CAND_MULT, etc.). */
enum cand_kind kind;
|| one_basis->cand_stmt == c->cand_stmt
|| !operand_equal_p (one_basis->stride, c->stride, 0)
|| !types_compatible_p (one_basis->cand_type, c->cand_type)
+ || !types_compatible_p (one_basis->stride_type, c->stride_type)
|| !dominated_by_p (CDI_DOMINATORS,
gimple_bb (c->cand_stmt),
gimple_bb (one_basis->cand_stmt)))
static slsr_cand_t
alloc_cand_and_find_basis (enum cand_kind kind, gimple *gs, tree base,
const widest_int &index, tree stride, tree ctype,
- unsigned savings)
+ tree stype, unsigned savings)
{
slsr_cand_t c = (slsr_cand_t) obstack_alloc (&cand_obstack,
sizeof (slsr_cand));
c->stride = stride;
c->index = index;
c->cand_type = ctype;
+ c->stride_type = stype;
c->kind = kind;
c->cand_num = cand_vec.length () + 1;
c->next_interp = 0;
base_type = TREE_TYPE (arg0_base);
c = alloc_cand_and_find_basis (CAND_PHI, phi, arg0_base,
- 0, integer_one_node, base_type, savings);
+ 0, integer_one_node, base_type,
+ sizetype, savings);
/* Add the candidate to the statement-candidate mapping. */
add_cand_for_stmt (phi, c);
e.g. 'B' is widened from an 'int' in order to calculate
a 64-bit address. */
if (CONVERT_EXPR_P (base_in)
- && legal_cast_p_1 (base_in, TREE_OPERAND (base_in, 0)))
+ && legal_cast_p_1 (TREE_TYPE (base_in),
+ TREE_TYPE (TREE_OPERAND (base_in, 0))))
base_in = get_unwidened (base_in, NULL_TREE);
if (TREE_CODE (base_in) != SSA_NAME)
return;
c = alloc_cand_and_find_basis (CAND_REF, gs, base, index, offset,
- type, 0);
+ type, sizetype, 0);
/* Add the candidate to the statement-candidate mapping. */
add_cand_for_stmt (gs, c);
create_mul_ssa_cand (gimple *gs, tree base_in, tree stride_in, bool speed)
{
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
+ tree stype = NULL_TREE;
widest_int index;
unsigned savings = 0;
slsr_cand_t c;
index = base_cand->index;
stride = stride_in;
ctype = base_cand->cand_type;
+ stype = TREE_TYPE (stride_in);
if (has_single_use (base_in))
savings = (base_cand->dead_savings
+ stmt_cost (base_cand->cand_stmt, speed));
index = base_cand->index * wi::to_widest (base_cand->stride);
stride = stride_in;
ctype = base_cand->cand_type;
+ stype = TREE_TYPE (stride_in);
if (has_single_use (base_in))
savings = (base_cand->dead_savings
+ stmt_cost (base_cand->cand_stmt, speed));
index = 0;
stride = stride_in;
ctype = TREE_TYPE (base_in);
+ stype = TREE_TYPE (stride_in);
}
c = alloc_cand_and_find_basis (CAND_MULT, gs, base, index, stride,
- ctype, savings);
+ ctype, stype, savings);
return c;
}
}
c = alloc_cand_and_find_basis (CAND_MULT, gs, base, index, stride,
- ctype, savings);
+ ctype, sizetype, savings);
return c;
}
create_add_ssa_cand (gimple *gs, tree base_in, tree addend_in,
bool subtract_p, bool speed)
{
- tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL;
+ tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
+ tree stype = NULL_TREE;
widest_int index;
unsigned savings = 0;
slsr_cand_t c;
index = -index;
stride = addend_cand->base_expr;
ctype = TREE_TYPE (base_in);
+ stype = addend_cand->cand_type;
if (has_single_use (addend_in))
savings = (addend_cand->dead_savings
+ stmt_cost (addend_cand->cand_stmt, speed));
index = subtract_p ? -1 : 1;
stride = addend_in;
ctype = base_cand->cand_type;
+ stype = (TREE_CODE (addend_in) == INTEGER_CST ? sizetype
+ : TREE_TYPE (addend_in));
if (has_single_use (base_in))
savings = (base_cand->dead_savings
+ stmt_cost (base_cand->cand_stmt, speed));
index = -index;
stride = subtrahend_cand->base_expr;
ctype = TREE_TYPE (base_in);
+ stype = subtrahend_cand->cand_type;
if (has_single_use (addend_in))
savings = (subtrahend_cand->dead_savings
+ stmt_cost (subtrahend_cand->cand_stmt, speed));
index = subtract_p ? -1 : 1;
stride = addend_in;
ctype = TREE_TYPE (base_in);
+ stype = (TREE_CODE (addend_in) == INTEGER_CST ? sizetype
+ : TREE_TYPE (addend_in));
}
c = alloc_cand_and_find_basis (CAND_ADD, gs, base, index, stride,
- ctype, savings);
+ ctype, stype, savings);
return c;
}
{
enum cand_kind kind = CAND_ADD;
tree base = NULL_TREE, stride = NULL_TREE, ctype = NULL_TREE;
+ tree stype = NULL_TREE;
widest_int index, multiple;
unsigned savings = 0;
slsr_cand_t c;
index = base_cand->index + multiple;
stride = base_cand->stride;
ctype = base_cand->cand_type;
+ stype = base_cand->stride_type;
if (has_single_use (base_in))
savings = (base_cand->dead_savings
+ stmt_cost (base_cand->cand_stmt, speed));
index = index_in;
stride = integer_one_node;
ctype = TREE_TYPE (base_in);
+ stype = sizetype;
}
c = alloc_cand_and_find_basis (kind, gs, base, index, stride,
- ctype, savings);
+ ctype, stype, savings);
return c;
}
for more details. */
static bool
-legal_cast_p_1 (tree lhs, tree rhs)
+legal_cast_p_1 (tree lhs_type, tree rhs_type)
{
- tree lhs_type, rhs_type;
unsigned lhs_size, rhs_size;
bool lhs_wraps, rhs_wraps;
- lhs_type = TREE_TYPE (lhs);
- rhs_type = TREE_TYPE (rhs);
lhs_size = TYPE_PRECISION (lhs_type);
rhs_size = TYPE_PRECISION (rhs_type);
lhs_wraps = ANY_INTEGRAL_TYPE_P (lhs_type) && TYPE_OVERFLOW_WRAPS (lhs_type);
|| !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (gs)))
return false;
- return legal_cast_p_1 (gimple_assign_lhs (gs), rhs);
+ return legal_cast_p_1 (TREE_TYPE (gimple_assign_lhs (gs)), TREE_TYPE (rhs));
}
/* Given GS which is a cast to a scalar integer type, determine whether
c = alloc_cand_and_find_basis (base_cand->kind, gs,
base_cand->base_expr,
base_cand->index, base_cand->stride,
- ctype, savings);
+ ctype, base_cand->stride_type,
+ savings);
if (base_cand->next_interp)
base_cand = lookup_cand (base_cand->next_interp);
else
The first of these is somewhat arbitrary, but the choice of
1 for the stride simplifies the logic for propagating casts
into their uses. */
- c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1,
- 0, integer_one_node, ctype, 0);
- c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1,
- 0, integer_one_node, ctype, 0);
+ c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, 0,
+ integer_one_node, ctype, sizetype, 0);
+ c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, 0,
+ integer_one_node, ctype, sizetype, 0);
c->next_interp = c2->cand_num;
}
c = alloc_cand_and_find_basis (base_cand->kind, gs,
base_cand->base_expr,
base_cand->index, base_cand->stride,
- base_cand->cand_type, savings);
+ base_cand->cand_type,
+ base_cand->stride_type, savings);
if (base_cand->next_interp)
base_cand = lookup_cand (base_cand->next_interp);
else
The first of these is somewhat arbitrary, but the choice of
1 for the stride simplifies the logic for propagating casts
into their uses. */
- c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1,
- 0, integer_one_node, TREE_TYPE (rhs1), 0);
- c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1,
- 0, integer_one_node, TREE_TYPE (rhs1), 0);
+ c = alloc_cand_and_find_basis (CAND_ADD, gs, rhs1, 0,
+ integer_one_node, TREE_TYPE (rhs1),
+ sizetype, 0);
+ c2 = alloc_cand_and_find_basis (CAND_MULT, gs, rhs1, 0,
+ integer_one_node, TREE_TYPE (rhs1),
+ sizetype, 0);
c->next_interp = c2->cand_num;
}
fputs (" + ", dump_file);
print_decs (c->index, dump_file);
fputs (") * ", dump_file);
+ if (TREE_CODE (c->stride) != INTEGER_CST
+ && c->stride_type != TREE_TYPE (c->stride))
+ {
+ fputs ("(", dump_file);
+ print_generic_expr (dump_file, c->stride_type, 0);
+ fputs (")", dump_file);
+ }
print_generic_expr (dump_file, c->stride, 0);
fputs (" : ", dump_file);
break;
fputs (" + (", dump_file);
print_decs (c->index, dump_file);
fputs (" * ", dump_file);
+ if (TREE_CODE (c->stride) != INTEGER_CST
+ && c->stride_type != TREE_TYPE (c->stride))
+ {
+ fputs ("(", dump_file);
+ print_generic_expr (dump_file, c->stride_type, 0);
+ fputs (")", dump_file);
+ }
print_generic_expr (dump_file, c->stride, 0);
fputs (") : ", dump_file);
break;
basic_block insert_bb;
gimple_stmt_iterator gsi;
tree lhs, basis_type;
- gassign *new_stmt;
+ gassign *new_stmt, *cast_stmt = NULL;
/* If the add candidate along this incoming edge has the same
index as C's hidden basis, the hidden basis represents this
new_stmt = gimple_build_assign (lhs, code, basis_name,
incr_vec[i].initializer);
}
- else if (increment == 1)
- new_stmt = gimple_build_assign (lhs, plus_code, basis_name, c->stride);
- else if (increment == -1)
- new_stmt = gimple_build_assign (lhs, MINUS_EXPR, basis_name,
- c->stride);
- else
- gcc_unreachable ();
+ else {
+ tree stride;
+
+ if (!types_compatible_p (TREE_TYPE (c->stride), c->stride_type))
+ {
+ tree cast_stride = make_temp_ssa_name (c->stride_type, NULL,
+ "slsr");
+ cast_stmt = gimple_build_assign (cast_stride, NOP_EXPR,
+ c->stride);
+ stride = cast_stride;
+ }
+ else
+ stride = c->stride;
+
+ if (increment == 1)
+ new_stmt = gimple_build_assign (lhs, plus_code, basis_name, stride);
+ else if (increment == -1)
+ new_stmt = gimple_build_assign (lhs, MINUS_EXPR, basis_name, stride);
+ else
+ gcc_unreachable ();
+ }
}
insert_bb = single_succ_p (e->src) ? e->src : split_edge (e);
gsi = gsi_last_bb (insert_bb);
if (!gsi_end_p (gsi) && is_ctrl_stmt (gsi_stmt (gsi)))
- gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT);
+ {
+ gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT);
+ if (cast_stmt)
+ {
+ gsi_insert_before (&gsi, cast_stmt, GSI_SAME_STMT);
+ gimple_set_location (cast_stmt, loc);
+ }
+ }
else
- gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
+ {
+ if (cast_stmt)
+ {
+ gsi_insert_after (&gsi, cast_stmt, GSI_NEW_STMT);
+ gimple_set_location (cast_stmt, loc);
+ }
+ gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
+ }
gimple_set_location (new_stmt, loc);
if (dump_file && (dump_flags & TDF_DETAILS))
{
+ if (cast_stmt)
+ {
+ fprintf (dump_file, "Inserting cast in block %d: ",
+ insert_bb->index);
+ print_gimple_stmt (dump_file, cast_stmt, 0, 0);
+ }
fprintf (dump_file, "Inserting in block %d: ", insert_bb->index);
print_gimple_stmt (dump_file, new_stmt, 0, 0);
}
&& !POINTER_TYPE_P (first_dep->cand_type)))
incr_vec[i].cost = COST_NEUTRAL;
- /* FORNOW: If we need to add an initializer, give up if a cast from
- the candidate's type to its stride's type can lose precision.
- This could eventually be handled better by expressly retaining the
- result of a cast to a wider type in the stride. Example:
+ /* If we need to add an initializer, give up if a cast from the
+ candidate's type to its stride's type can lose precision.
+ Note that this already takes into account that the stride may
+ have been cast to a wider type, in which case this test won't
+ fire. Example:
short int _1;
_2 = (int) _1;
_3 = _2 * 10;
- _4 = x + _3; ADD: x + (10 * _1) : int
+ _4 = x + _3; ADD: x + (10 * (int)_1) : int
_5 = _2 * 15;
- _6 = x + _3; ADD: x + (15 * _1) : int
-
- Right now replacing _6 would cause insertion of an initializer
- of the form "short int T = _1 * 5;" followed by a cast to
- int, which could overflow incorrectly. Had we recorded _2 or
- (int)_1 as the stride, this wouldn't happen. However, doing
- this breaks other opportunities, so this will require some
- care. */
+ _6 = x + _5; ADD: x + (15 * (int)_1) : int
+
+ Although the stride was a short int initially, the stride
+ used in the analysis has been widened to an int, and such
+ widening will be done in the initializer as well. */
else if (!incr_vec[i].initializer
&& TREE_CODE (first_dep->stride) != INTEGER_CST
- && !legal_cast_p_1 (first_dep->stride,
- gimple_assign_lhs (first_dep->cand_stmt)))
-
+ && !legal_cast_p_1 (first_dep->stride_type,
+ TREE_TYPE (gimple_assign_lhs
+ (first_dep->cand_stmt))))
incr_vec[i].cost = COST_INFINITE;
/* If we need to add an initializer, make sure we don't introduce
a multiply by a pointer type, which can happen in certain cast
- scenarios. FIXME: When cleaning up these cast issues, we can
- afford to introduce the multiply provided we cast out to an
- unsigned int of appropriate size. */
+ scenarios. */
else if (!incr_vec[i].initializer
&& TREE_CODE (first_dep->stride) != INTEGER_CST
- && POINTER_TYPE_P (TREE_TYPE (first_dep->stride)))
-
+ && POINTER_TYPE_P (first_dep->stride_type))
incr_vec[i].cost = COST_INFINITE;
/* For any other increment, if this is a multiply candidate, we
basic_block bb;
slsr_cand_t where = NULL;
gassign *init_stmt;
- tree stride_type, new_name, incr_tree;
+ gassign *cast_stmt = NULL;
+ tree new_name, incr_tree, init_stride;
widest_int incr = incr_vec[i].incr;
if (!profitable_increment_p (i)
that block, the earliest one will be returned in WHERE. */
bb = nearest_common_dominator_for_cands (c, incr, &where);
+ /* If the nominal stride has a different type than the recorded
+ stride type, build a cast from the nominal stride to that type. */
+ if (!types_compatible_p (TREE_TYPE (c->stride), c->stride_type))
+ {
+ init_stride = make_temp_ssa_name (c->stride_type, NULL, "slsr");
+ cast_stmt = gimple_build_assign (init_stride, NOP_EXPR, c->stride);
+ }
+ else
+ init_stride = c->stride;
+
/* Create a new SSA name to hold the initializer's value. */
- stride_type = TREE_TYPE (c->stride);
- new_name = make_temp_ssa_name (stride_type, NULL, "slsr");
+ new_name = make_temp_ssa_name (c->stride_type, NULL, "slsr");
incr_vec[i].initializer = new_name;
/* Create the initializer and insert it in the latest possible
dominating position. */
- incr_tree = wide_int_to_tree (stride_type, incr);
+ incr_tree = wide_int_to_tree (c->stride_type, incr);
init_stmt = gimple_build_assign (new_name, MULT_EXPR,
- c->stride, incr_tree);
+ init_stride, incr_tree);
if (where)
{
gimple_stmt_iterator gsi = gsi_for_stmt (where->cand_stmt);
+ location_t loc = gimple_location (where->cand_stmt);
+
+ if (cast_stmt)
+ {
+ gsi_insert_before (&gsi, cast_stmt, GSI_SAME_STMT);
+ gimple_set_location (cast_stmt, loc);
+ }
+
gsi_insert_before (&gsi, init_stmt, GSI_SAME_STMT);
- gimple_set_location (init_stmt, gimple_location (where->cand_stmt));
+ gimple_set_location (init_stmt, loc);
}
else
{
gimple_stmt_iterator gsi = gsi_last_bb (bb);
gimple *basis_stmt = lookup_cand (c->basis)->cand_stmt;
+ location_t loc = gimple_location (basis_stmt);
if (!gsi_end_p (gsi) && is_ctrl_stmt (gsi_stmt (gsi)))
- gsi_insert_before (&gsi, init_stmt, GSI_SAME_STMT);
+ {
+ if (cast_stmt)
+ {
+ gsi_insert_before (&gsi, cast_stmt, GSI_SAME_STMT);
+ gimple_set_location (cast_stmt, loc);
+ }
+ gsi_insert_before (&gsi, init_stmt, GSI_SAME_STMT);
+ }
else
- gsi_insert_after (&gsi, init_stmt, GSI_SAME_STMT);
+ {
+ if (cast_stmt)
+ {
+ gsi_insert_after (&gsi, cast_stmt, GSI_NEW_STMT);
+ gimple_set_location (cast_stmt, loc);
+ }
+ gsi_insert_after (&gsi, init_stmt, GSI_SAME_STMT);
+ }
gimple_set_location (init_stmt, gimple_location (basis_stmt));
}
if (dump_file && (dump_flags & TDF_DETAILS))
{
+ if (cast_stmt)
+ {
+ fputs ("Inserting stride cast: ", dump_file);
+ print_gimple_stmt (dump_file, cast_stmt, 0, 0);
+ }
fputs ("Inserting initializer: ", dump_file);
print_gimple_stmt (dump_file, init_stmt, 0, 0);
}