+2017-12-20 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * tree-affine.h (aff_tree::offset): Change from widest_int
+ to poly_widest_int.
+ (wide_int_ext_for_comb): Delete.
+ (aff_combination_const, aff_comb_cannot_overlap_p): Take the
+ constants as poly_widest_int rather than widest_int.
+ (aff_combination_constant_multiple_p): Return the multiplier
+ as a poly_widest_int.
+ (aff_combination_zero_p, aff_combination_singleton_var_p): Handle
+ polynomial offsets.
+ * tree-affine.c (wide_int_ext_for_comb): Make original widest_int
+ version static and add an overload for poly_widest_int.
+ (aff_combination_const, aff_combination_add_cst)
+ (wide_int_constant_multiple_p, aff_comb_cannot_overlap_p): Take
+ the constants as poly_widest_int rather than widest_int.
+ (tree_to_aff_combination): Generalize INTEGER_CST case to
+ poly_int_tree_p.
+ (aff_combination_to_tree): Track offsets as poly_widest_ints.
+ (aff_combination_add_product, aff_combination_mult): Handle
+ polynomial offsets.
+ (aff_combination_constant_multiple_p): Return the multiplier
+ as a poly_widest_int.
+ * tree-predcom.c (determine_offset): Return the offset as a
+ poly_widest_int.
+ (split_data_refs_to_components, suitable_component_p): Update
+ accordingly.
+ (valid_initializer_p): Update call to
+ aff_combination_constant_multiple_p.
+ * tree-ssa-address.c (addr_to_parts): Handle polynomial offsets.
+ * tree-ssa-loop-ivopts.c (get_address_cost_ainc): Take the step
+ as a poly_int64 rather than a HOST_WIDE_INT.
+ (get_address_cost): Handle polynomial offsets.
+ (iv_elimination_compare_lt): Likewise.
+ (rewrite_use_nonlinear_expr): Likewise.
+
2017-12-20 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
/* Extends CST as appropriate for the affine combinations COMB. */
-widest_int
+static widest_int
wide_int_ext_for_comb (const widest_int &cst, tree type)
{
return wi::sext (cst, TYPE_PRECISION (type));
}
+/* Likewise for polynomial offsets. */
+
+static poly_widest_int
+wide_int_ext_for_comb (const poly_widest_int &cst, tree type)
+{
+ return wi::sext (cst, TYPE_PRECISION (type));
+}
+
/* Initializes affine combination COMB so that its value is zero in TYPE. */
static void
/* Sets COMB to CST. */
void
-aff_combination_const (aff_tree *comb, tree type, const widest_int &cst)
+aff_combination_const (aff_tree *comb, tree type, const poly_widest_int &cst)
{
aff_combination_zero (comb, type);
comb->offset = wide_int_ext_for_comb (cst, comb->type);;
/* Adds CST to C. */
static void
-aff_combination_add_cst (aff_tree *c, const widest_int &cst)
+aff_combination_add_cst (aff_tree *c, const poly_widest_int &cst)
{
c->offset = wide_int_ext_for_comb (c->offset + cst, c->type);
}
code = TREE_CODE (expr);
switch (code)
{
- case INTEGER_CST:
- aff_combination_const (comb, type, wi::to_widest (expr));
- return;
-
case POINTER_PLUS_EXPR:
tree_to_aff_combination (TREE_OPERAND (expr, 0), type, comb);
tree_to_aff_combination (TREE_OPERAND (expr, 1), sizetype, &tmp);
break;
default:
- break;
+ {
+ if (poly_int_tree_p (expr))
+ {
+ aff_combination_const (comb, type, wi::to_poly_widest (expr));
+ return;
+ }
+ break;
+ }
}
aff_combination_elt (comb, type, expr);
{
tree type = comb->type, base = NULL_TREE, expr = NULL_TREE;
unsigned i;
- widest_int off, sgn;
+ poly_widest_int off;
+ int sgn;
gcc_assert (comb->n == MAX_AFF_ELTS || comb->rest == NULL_TREE);
/* Ensure that we get x - 1, not x + (-1) or x + 0xff..f if x is
unsigned. */
- if (wi::neg_p (comb->offset))
+ if (known_lt (comb->offset, 0))
{
off = -comb->offset;
sgn = -1;
}
if (val)
- aff_combination_add_elt (r, val, coef * c->offset);
+ {
+ if (c->offset.is_constant ())
+ /* Access coeffs[0] directly, for efficiency. */
+ aff_combination_add_elt (r, val, coef * c->offset.coeffs[0]);
+ else
+ {
+ /* c->offset is polynomial, so multiply VAL rather than COEF
+ by it. */
+ tree offset = wide_int_to_tree (TREE_TYPE (val), c->offset);
+ val = fold_build2 (MULT_EXPR, TREE_TYPE (val), val, offset);
+ aff_combination_add_elt (r, val, coef);
+ }
+ }
else
aff_combination_add_cst (r, coef * c->offset);
}
aff_combination_add_product (c1, c2->elts[i].coef, c2->elts[i].val, r);
if (c2->rest)
aff_combination_add_product (c1, 1, c2->rest, r);
- aff_combination_add_product (c1, c2->offset, NULL, r);
+ if (c2->offset.is_constant ())
+ /* Access coeffs[0] directly, for efficiency. */
+ aff_combination_add_product (c1, c2->offset.coeffs[0], NULL, r);
+ else
+ {
+ /* c2->offset is polynomial, so do the multiplication in tree form. */
+ tree offset = wide_int_to_tree (c2->type, c2->offset);
+ aff_combination_add_product (c1, 1, offset, r);
+ }
}
/* Returns the element of COMB whose value is VAL, or NULL if no such
is set to true. */
static bool
-wide_int_constant_multiple_p (const widest_int &val, const widest_int &div,
- bool *mult_set, widest_int *mult)
+wide_int_constant_multiple_p (const poly_widest_int &val,
+ const poly_widest_int &div,
+ bool *mult_set, poly_widest_int *mult)
{
- widest_int rem, cst;
+ poly_widest_int rem, cst;
- if (val == 0)
+ if (known_eq (val, 0))
{
- if (*mult_set && *mult != 0)
+ if (*mult_set && maybe_ne (*mult, 0))
return false;
*mult_set = true;
*mult = 0;
return true;
}
- if (div == 0)
+ if (maybe_eq (div, 0))
return false;
- if (!wi::multiple_of_p (val, div, SIGNED, &cst))
+ if (!multiple_p (val, div, &cst))
return false;
- if (*mult_set && *mult != cst)
+ if (*mult_set && maybe_ne (*mult, cst))
return false;
*mult_set = true;
bool
aff_combination_constant_multiple_p (aff_tree *val, aff_tree *div,
- widest_int *mult)
+ poly_widest_int *mult)
{
bool mult_set = false;
unsigned i;
- if (val->n == 0 && val->offset == 0)
+ if (val->n == 0 && known_eq (val->offset, 0))
{
*mult = 0;
return true;
size SIZE2 at position DIFF cannot overlap. */
bool
-aff_comb_cannot_overlap_p (aff_tree *diff, const widest_int &size1,
- const widest_int &size2)
+aff_comb_cannot_overlap_p (aff_tree *diff, const poly_widest_int &size1,
+ const poly_widest_int &size2)
{
/* Unless the difference is a constant, we fail. */
if (diff->n != 0)
return false;
- if (wi::neg_p (diff->offset))
+ if (!ordered_p (diff->offset, 0))
+ return false;
+
+ if (maybe_lt (diff->offset, 0))
{
/* The second object is before the first one, we succeed if the last
element of the second object is before the start of the first one. */
- return wi::neg_p (diff->offset + size2 - 1);
+ return known_le (diff->offset + size2, 0);
}
else
{
/* We succeed if the second object starts after the first one ends. */
- return size1 <= diff->offset;
+ return known_le (size1, diff->offset);
}
}
tree type;
/* Constant offset. */
- widest_int offset;
+ poly_widest_int offset;
/* Number of elements of the combination. */
unsigned n;
struct name_expansion;
-widest_int wide_int_ext_for_comb (const widest_int &, aff_tree *);
-void aff_combination_const (aff_tree *, tree, const widest_int &);
+void aff_combination_const (aff_tree *, tree, const poly_widest_int &);
void aff_combination_elt (aff_tree *, tree, tree);
void aff_combination_scale (aff_tree *, const widest_int &);
void aff_combination_mult (aff_tree *, aff_tree *, aff_tree *);
void tree_to_aff_combination (tree, tree, aff_tree *);
tree aff_combination_to_tree (aff_tree *);
void unshare_aff_combination (aff_tree *);
-bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *, widest_int *);
+bool aff_combination_constant_multiple_p (aff_tree *, aff_tree *,
+ poly_widest_int *);
void aff_combination_expand (aff_tree *, hash_map<tree, name_expansion *> **);
void tree_to_aff_combination_expand (tree, tree, aff_tree *,
hash_map<tree, name_expansion *> **);
tree get_inner_reference_aff (tree, aff_tree *, widest_int *);
void free_affine_expand_cache (hash_map<tree, name_expansion *> **);
-bool aff_comb_cannot_overlap_p (aff_tree *, const widest_int &,
- const widest_int &);
+bool aff_comb_cannot_overlap_p (aff_tree *, const poly_widest_int &,
+ const poly_widest_int &);
/* Debugging functions. */
void debug_aff (aff_tree *);
if (!aff)
return true;
- if (aff->n == 0 && aff->offset == 0)
+ if (aff->n == 0 && known_eq (aff->offset, 0))
return true;
return false;
aff_combination_singleton_var_p (aff_tree *aff)
{
return (aff->n == 1
- && aff->offset == 0
+ && known_eq (aff->offset, 0)
&& (aff->elts[0].coef == 1 || aff->elts[0].coef == -1));
}
#endif /* GCC_TREE_AFFINE_H */
static bool
determine_offset (struct data_reference *a, struct data_reference *b,
- widest_int *off)
+ poly_widest_int *off)
{
aff_tree diff, baseb, step;
tree typea, typeb;
FOR_EACH_VEC_ELT (depends, i, ddr)
{
- widest_int dummy_off;
+ poly_widest_int dummy_off;
if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
continue;
for (i = 1; comp->refs.iterate (i, &a); i++)
{
- if (!determine_offset (first->ref, a->ref, &a->offset))
+ /* Polynomial offsets are no use, since we need to know the
+ gap between iteration numbers at compile time. */
+ poly_widest_int offset;
+ if (!determine_offset (first->ref, a->ref, &offset)
+ || !offset.is_constant (&a->offset))
return false;
enum ref_step_type a_step;
unsigned distance, struct data_reference *root)
{
aff_tree diff, base, step;
- widest_int off;
+ poly_widest_int off;
/* Both REF and ROOT must be accessing the same object. */
if (!operand_equal_p (DR_BASE_ADDRESS (ref), DR_BASE_ADDRESS (root), 0))
if (!aff_combination_constant_multiple_p (&diff, &step, &off))
return false;
- if (off != distance)
+ if (maybe_ne (off, distance))
return false;
return true;
parts->index = NULL_TREE;
parts->step = NULL_TREE;
- if (addr->offset != 0)
+ if (maybe_ne (addr->offset, 0))
parts->offset = wide_int_to_tree (sizetype, addr->offset);
else
parts->offset = NULL_TREE;
};
static comp_cost
-get_address_cost_ainc (HOST_WIDE_INT ainc_step, HOST_WIDE_INT ainc_offset,
+get_address_cost_ainc (poly_int64 ainc_step, poly_int64 ainc_offset,
machine_mode addr_mode, machine_mode mem_mode,
addr_space_t as, bool speed)
{
}
HOST_WIDE_INT msize = GET_MODE_SIZE (mem_mode);
- if (ainc_offset == 0 && msize == ainc_step)
+ if (known_eq (ainc_offset, 0) && known_eq (msize, ainc_step))
return comp_cost (data->costs[AINC_POST_INC], 0);
- if (ainc_offset == 0 && msize == -ainc_step)
+ if (known_eq (ainc_offset, 0) && known_eq (msize, -ainc_step))
return comp_cost (data->costs[AINC_POST_DEC], 0);
- if (ainc_offset == msize && msize == ainc_step)
+ if (known_eq (ainc_offset, msize) && known_eq (msize, ainc_step))
return comp_cost (data->costs[AINC_PRE_INC], 0);
- if (ainc_offset == -msize && msize == -ainc_step)
+ if (known_eq (ainc_offset, -msize) && known_eq (msize, -ainc_step))
return comp_cost (data->costs[AINC_PRE_DEC], 0);
return infinite_cost;
}
if (ok_with_ratio_p || ok_without_ratio_p)
{
- if (aff_inv->offset != 0)
+ if (maybe_ne (aff_inv->offset, 0))
{
parts.offset = wide_int_to_tree (sizetype, aff_inv->offset);
/* Addressing mode "base + index [<< scale] + offset". */
}
else
{
- if (can_autoinc && ratio == 1 && cst_and_fits_in_hwi (cand->iv->step))
+ poly_int64 ainc_step;
+ if (can_autoinc
+ && ratio == 1
+ && ptrdiff_tree_p (cand->iv->step, &ainc_step))
{
- HOST_WIDE_INT ainc_step = int_cst_value (cand->iv->step);
- HOST_WIDE_INT ainc_offset = (aff_inv->offset).to_shwi ();
+ poly_int64 ainc_offset = (aff_inv->offset).force_shwi ();
if (stmt_after_increment (data->current_loop, cand, use->stmt))
ainc_offset += ainc_step;
aff_combination_scale (&tmpa, -1);
aff_combination_add (&tmpb, &tmpa);
aff_combination_add (&tmpb, &nit);
- if (tmpb.n != 0 || tmpb.offset != 1)
+ if (tmpb.n != 0 || maybe_ne (tmpb.offset, 1))
return false;
/* Finally, check that CAND->IV->BASE - CAND->IV->STEP * A does not
unshare_aff_combination (&aff_var);
/* Prefer CSE opportunity than loop invariant by adding offset at last
so that iv_uses have different offsets can be CSEed. */
- widest_int offset = aff_inv.offset;
+ poly_widest_int offset = aff_inv.offset;
aff_inv.offset = 0;
gimple_seq stmt_list = NULL, seq = NULL;