+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * tree.h (TYPE_VECTOR_SUBPARTS): Turn into a function and handle
+ polynomial numbers of units.
+ (SET_TYPE_VECTOR_SUBPARTS): Likewise.
+ (valid_vector_subparts_p): New function.
+ (build_vector_type): Remove temporary shim and take the number
+ of units as a poly_uint64 rather than an int.
+ (build_opaque_vector_type): Take the number of units as a
+ poly_uint64 rather than an int.
+ * tree.c (build_vector_from_ctor): Handle polynomial
+ TYPE_VECTOR_SUBPARTS.
+ (type_hash_canon_hash, type_cache_hasher::equal): Likewise.
+ (uniform_vector_p, vector_type_mode, build_vector): Likewise.
+ (build_vector_from_val): If the number of units is variable,
+ use build_vec_duplicate_cst for constant operands and
+ VEC_DUPLICATE_EXPR otherwise.
+ (make_vector_type): Remove temporary is_constant ().
+ (build_vector_type, build_opaque_vector_type): Take the number of
+ units as a poly_uint64 rather than an int.
+ (check_vector_cst): Handle polynomial TYPE_VECTOR_SUBPARTS and
+ VECTOR_CST_NELTS.
+ * cfgexpand.c (expand_debug_expr): Likewise.
+ * expr.c (count_type_elements, categorize_ctor_elements_1): Likewise.
+ (store_constructor, expand_expr_real_1): Likewise.
+ (const_scalar_mask_from_tree): Likewise.
+ * fold-const-call.c (fold_const_reduction): Likewise.
+ * fold-const.c (const_binop, const_unop, fold_convert_const): Likewise.
+ (operand_equal_p, fold_vec_perm, fold_ternary_loc): Likewise.
+ (native_encode_vector, vec_cst_ctor_to_array): Likewise.
+ (fold_relational_const): Likewise.
+ (native_interpret_vector): Likewise. Change the size from an
+ int to an unsigned int.
+ * gimple-fold.c (gimple_fold_stmt_to_constant_1): Handle polynomial
+ TYPE_VECTOR_SUBPARTS.
+ (gimple_fold_indirect_ref, gimple_build_vector): Likewise.
+ (gimple_build_vector_from_val): Use VEC_DUPLICATE_EXPR when
+ duplicating a non-constant operand into a variable-length vector.
+ * hsa-brig.c (hsa_op_immed::emit_to_buffer): Handle polynomial
+ TYPE_VECTOR_SUBPARTS and VECTOR_CST_NELTS.
+ * ipa-icf.c (sem_variable::equals): Likewise.
+ * match.pd: Likewise.
+ * omp-simd-clone.c (simd_clone_subparts): Likewise.
+ * print-tree.c (print_node): Likewise.
+ * stor-layout.c (layout_type): Likewise.
+ * targhooks.c (default_builtin_vectorization_cost): Likewise.
+ * tree-cfg.c (verify_gimple_comparison): Likewise.
+ (verify_gimple_assign_binary): Likewise.
+ (verify_gimple_assign_ternary): Likewise.
+ (verify_gimple_assign_single): Likewise.
+ * tree-pretty-print.c (dump_generic_node): Likewise.
+ * tree-ssa-forwprop.c (simplify_vector_constructor): Likewise.
+ (simplify_bitfield_ref, is_combined_permutation_identity): Likewise.
+ * tree-vect-data-refs.c (vect_permute_store_chain): Likewise.
+ (vect_grouped_load_supported, vect_permute_load_chain): Likewise.
+ (vect_shift_permute_load_chain): Likewise.
+ * tree-vect-generic.c (nunits_for_known_piecewise_op): Likewise.
+ (expand_vector_condition, optimize_vector_constructor): Likewise.
+ (lower_vec_perm, get_compute_type): Likewise.
+ * tree-vect-loop.c (vect_determine_vectorization_factor): Likewise.
+ (get_initial_defs_for_reduction, vect_transform_loop): Likewise.
+ * tree-vect-patterns.c (vect_recog_bool_pattern): Likewise.
+ (vect_recog_mask_conversion_pattern): Likewise.
+ * tree-vect-slp.c (vect_supported_load_permutation_p): Likewise.
+ (vect_get_constant_vectors, vect_transform_slp_perm_load): Likewise.
+ * tree-vect-stmts.c (perm_mask_for_reverse): Likewise.
+ (get_group_load_store_type, vectorizable_mask_load_store): Likewise.
+ (vectorizable_bswap, simd_clone_subparts, vectorizable_assignment)
+ (vectorizable_shift, vectorizable_operation, vectorizable_store)
+ (vectorizable_load, vect_is_simple_cond, vectorizable_comparison)
+ (supportable_widening_operation): Likewise.
+ (supportable_narrowing_operation): Likewise.
+ * tree-vector-builder.c (tree_vector_builder::binary_encoded_nelts):
+ Likewise.
+ * varasm.c (output_constant): Likewise.
+
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * gcc-interface/utils.c (gnat_types_compatible_p): Handle
+ polynomial TYPE_VECTOR_SUBPARTS.
+
2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
Alan Hayward <alan.hayward@arm.com>
David Sherwood <david.sherwood@arm.com>
/* Vector types are also compatible if they have the same number of subparts
and the same form of (scalar) element type. */
if (code == VECTOR_TYPE
- && TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2)
+ && known_eq (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
&& TREE_CODE (TREE_TYPE (t1)) == TREE_CODE (TREE_TYPE (t2))
&& TYPE_PRECISION (TREE_TYPE (t1)) == TYPE_PRECISION (TREE_TYPE (t2)))
return 1;
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * brigfrontend/brig-to-generic.cc (get_unsigned_int_type): Handle
+ polynomial TYPE_VECTOR_SUBPARTS.
+ * brigfrontend/brig-util.h (gccbrig_type_vector_subparts): Likewise.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
Update copyright years.
{
size_t esize
= int_size_in_bytes (TREE_TYPE (original_type)) * BITS_PER_UNIT;
- size_t ecount = TYPE_VECTOR_SUBPARTS (original_type);
+ poly_uint64 ecount = TYPE_VECTOR_SUBPARTS (original_type);
return build_vector_type (build_nonstandard_integer_type (esize, true),
ecount);
}
inline unsigned HOST_WIDE_INT
gccbrig_type_vector_subparts (const_tree type)
{
- return TYPE_VECTOR_SUBPARTS (type);
+ return TYPE_VECTOR_SUBPARTS (type).to_constant ();
}
#endif
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * c-common.c (vector_types_convertible_p, c_build_vec_perm_expr)
+ (convert_vector_to_array_for_subscript): Handle polynomial
+ TYPE_VECTOR_SUBPARTS.
+ (c_common_type_for_mode): Check valid_vector_subparts_p.
+ * c-pretty-print.c (pp_c_initializer_list): Handle polynomial
+ VECTOR_CST_NELTS.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
Update copyright years.
convertible_lax =
(tree_int_cst_equal (TYPE_SIZE (t1), TYPE_SIZE (t2))
- && (TREE_CODE (TREE_TYPE (t1)) != REAL_TYPE ||
- TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2))
+ && (TREE_CODE (TREE_TYPE (t1)) != REAL_TYPE
+ || known_eq (TYPE_VECTOR_SUBPARTS (t1),
+ TYPE_VECTOR_SUBPARTS (t2)))
&& (INTEGRAL_TYPE_P (TREE_TYPE (t1))
== INTEGRAL_TYPE_P (TREE_TYPE (t2))));
if (!convertible_lax || flag_lax_vector_conversions)
return convertible_lax;
- if (TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2)
+ if (known_eq (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
&& lang_hooks.types_compatible_p (TREE_TYPE (t1), TREE_TYPE (t2)))
return true;
return error_mark_node;
}
- if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (v0))
- != TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask))
- && TYPE_VECTOR_SUBPARTS (TREE_TYPE (v1))
- != TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask)))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (TREE_TYPE (v0)),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask)))
+ && maybe_ne (TYPE_VECTOR_SUBPARTS (TREE_TYPE (v1)),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask))))
{
if (complain)
error_at (loc, "__builtin_shuffle number of elements of the "
if (inner_type != NULL_TREE)
return build_complex_type (inner_type);
}
- else if (VECTOR_MODE_P (mode))
+ else if (VECTOR_MODE_P (mode)
+ && valid_vector_subparts_p (GET_MODE_NUNITS (mode)))
{
machine_mode inner_mode = GET_MODE_INNER (mode);
tree inner_type = c_common_type_for_mode (inner_mode, unsignedp);
if (TREE_CODE (index) == INTEGER_CST)
if (!tree_fits_uhwi_p (index)
- || tree_to_uhwi (index) >= TYPE_VECTOR_SUBPARTS (type))
+ || maybe_ge (tree_to_uhwi (index), TYPE_VECTOR_SUBPARTS (type)))
warning_at (loc, OPT_Warray_bounds, "index value is out of bound");
/* We are building an ARRAY_REF so mark the vector as addressable
case VECTOR_TYPE:
if (TREE_CODE (e) == VECTOR_CST)
{
- unsigned i;
- for (i = 0; i < VECTOR_CST_NELTS (e); ++i)
+ /* We don't create variable-length VECTOR_CSTs. */
+ unsigned int nunits = VECTOR_CST_NELTS (e).to_constant ();
+ for (unsigned int i = 0; i < nunits; ++i)
{
if (i > 0)
pp_separate_with (pp, ',');
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * c-typeck.c (comptypes_internal, build_binary_op): Handle polynomial
+ TYPE_VECTOR_SUBPARTS.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
Update copyright years.
break;
case VECTOR_TYPE:
- val = (TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2)
+ val = (known_eq (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
&& comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p, different_types_p));
break;
if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
- && TYPE_VECTOR_SUBPARTS (type0) == TYPE_VECTOR_SUBPARTS (type1))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
- && TYPE_VECTOR_SUBPARTS (type0) == TYPE_VECTOR_SUBPARTS (type1))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
return error_mark_node;
}
- if (TYPE_VECTOR_SUBPARTS (type0) != TYPE_VECTOR_SUBPARTS (type1))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
error_at (location, "comparing vectors with different "
"number of elements");
return error_mark_node;
}
- if (TYPE_VECTOR_SUBPARTS (type0) != TYPE_VECTOR_SUBPARTS (type1))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
error_at (location, "comparing vectors with different "
"number of elements");
case VECTOR_CST:
{
- unsigned i, nelts;
+ unsigned HOST_WIDE_INT i, nelts;
+
+ if (!VECTOR_CST_NELTS (exp).is_constant (&nelts))
+ return NULL;
- nelts = VECTOR_CST_NELTS (exp);
op0 = gen_rtx_CONCATN (mode, rtvec_alloc (nelts));
for (i = 0; i < nelts; ++i)
else if (TREE_CODE (TREE_TYPE (exp)) == VECTOR_TYPE)
{
unsigned i;
+ unsigned HOST_WIDE_INT nelts;
tree val;
- op0 = gen_rtx_CONCATN
- (mode, rtvec_alloc (TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp))));
+ if (!TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)).is_constant (&nelts))
+ goto flag_unsupported;
+
+ op0 = gen_rtx_CONCATN (mode, rtvec_alloc (nelts));
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (exp), i, val)
{
XVECEXP (op0, 0, i) = op1;
}
- if (i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)))
+ if (i < nelts)
{
op1 = expand_debug_expr
(build_zero_cst (TREE_TYPE (TREE_TYPE (exp))));
if (!op1)
return NULL;
- for (; i < TYPE_VECTOR_SUBPARTS (TREE_TYPE (exp)); i++)
+ for (; i < nelts; i++)
XVECEXP (op0, 0, i) = op1;
}
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * constexpr.c (cxx_eval_array_reference): Handle polynomial
+ VECTOR_CST_NELTS.
+ (cxx_fold_indirect_ref): Handle polynomial TYPE_VECTOR_SUBPARTS.
+ * call.c (build_conditional_expr_1): Likewise.
+ * decl.c (cp_finish_decomp): Likewise.
+ * mangle.c (write_type): Likewise.
+ * typeck.c (structural_comptypes): Likewise.
+ (cp_build_binary_op): Likewise.
+ * typeck2.c (process_init_constructor_array): Likewise.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
PR c++/83555
}
if (!same_type_p (arg2_type, arg3_type)
- || TYPE_VECTOR_SUBPARTS (arg1_type)
- != TYPE_VECTOR_SUBPARTS (arg2_type)
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (arg1_type),
+ TYPE_VECTOR_SUBPARTS (arg2_type))
|| TYPE_SIZE (arg1_type) != TYPE_SIZE (arg2_type))
{
if (complain & tf_error)
len = (unsigned) TREE_STRING_LENGTH (ary) / elem_nchars;
}
else if (TREE_CODE (ary) == VECTOR_CST)
- len = VECTOR_CST_NELTS (ary);
+ /* We don't create variable-length VECTOR_CSTs. */
+ len = VECTOR_CST_NELTS (ary).to_constant ();
else
{
/* We can't do anything with other tree codes, so use
unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT;
tree index = bitsize_int (indexi);
- if (offset / part_widthi < TYPE_VECTOR_SUBPARTS (op00type))
+ if (known_lt (offset / part_widthi,
+ TYPE_VECTOR_SUBPARTS (op00type)))
return fold_build3_loc (loc,
BIT_FIELD_REF, type, op00,
part_width, index);
}
else if (TREE_CODE (type) == VECTOR_TYPE)
{
- eltscnt = TYPE_VECTOR_SUBPARTS (type);
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&eltscnt))
+ {
+ error_at (loc, "cannot decompose variable length vector %qT", type);
+ goto error_out;
+ }
if (count != eltscnt)
goto cnt_mismatch;
eltype = cp_build_qualified_type (TREE_TYPE (type), TYPE_QUALS (type));
write_string ("Dv");
/* Non-constant vector size would be encoded with
_ expression, but we don't support that yet. */
- write_unsigned_number (TYPE_VECTOR_SUBPARTS (type));
+ write_unsigned_number (TYPE_VECTOR_SUBPARTS (type)
+ .to_constant ());
write_char ('_');
}
else
break;
case VECTOR_TYPE:
- if (TYPE_VECTOR_SUBPARTS (t1) != TYPE_VECTOR_SUBPARTS (t2)
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (t1), TYPE_VECTOR_SUBPARTS (t2))
|| !same_type_p (TREE_TYPE (t1), TREE_TYPE (t2)))
return false;
break;
converted = 1;
}
else if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
- && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
- && TYPE_VECTOR_SUBPARTS (type0) == TYPE_VECTOR_SUBPARTS (type1))
+ && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
+ && known_eq (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
converted = 1;
}
else if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
- && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
- && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
- && TYPE_VECTOR_SUBPARTS (type0) == TYPE_VECTOR_SUBPARTS (type1))
+ && TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
+ && TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
+ && known_eq (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
result_type = type0;
converted = 1;
return error_mark_node;
}
- if (TYPE_VECTOR_SUBPARTS (type0) != TYPE_VECTOR_SUBPARTS (type1))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (type0),
+ TYPE_VECTOR_SUBPARTS (type1)))
{
if (complain & tf_error)
{
}
else
/* Vectors are like simple fixed-size arrays. */
- len = TYPE_VECTOR_SUBPARTS (type);
+ unbounded = !TYPE_VECTOR_SUBPARTS (type).is_constant (&len);
/* There must not be more initializers than needed. */
if (!unbounded && vec_safe_length (v) > len)
return 2;
case VECTOR_TYPE:
- return TYPE_VECTOR_SUBPARTS (type);
+ {
+ unsigned HOST_WIDE_INT nelts;
+ if (TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
+ return nelts;
+ else
+ return -1;
+ }
case INTEGER_TYPE:
case REAL_TYPE:
case VECTOR_CST:
{
- unsigned i;
- for (i = 0; i < VECTOR_CST_NELTS (value); ++i)
+ /* We can only construct constant-length vectors using
+ CONSTRUCTOR. */
+ unsigned int nunits = VECTOR_CST_NELTS (value).to_constant ();
+ for (unsigned int i = 0; i < nunits; ++i)
{
tree v = VECTOR_CST_ELT (value, i);
if (!initializer_zerop (v))
HOST_WIDE_INT bitsize;
HOST_WIDE_INT bitpos;
rtvec vector = NULL;
- unsigned n_elts;
+ poly_uint64 n_elts;
+ unsigned HOST_WIDE_INT const_n_elts;
alias_set_type alias;
bool vec_vec_init_p = false;
machine_mode mode = GET_MODE (target);
}
n_elts = TYPE_VECTOR_SUBPARTS (type);
- if (REG_P (target) && VECTOR_MODE_P (mode))
+ if (REG_P (target)
+ && VECTOR_MODE_P (mode)
+ && n_elts.is_constant (&const_n_elts))
{
machine_mode emode = eltmode;
== VECTOR_TYPE))
{
tree etype = TREE_TYPE (CONSTRUCTOR_ELT (exp, 0)->value);
- gcc_assert (CONSTRUCTOR_NELTS (exp) * TYPE_VECTOR_SUBPARTS (etype)
- == n_elts);
+ gcc_assert (known_eq (CONSTRUCTOR_NELTS (exp)
+ * TYPE_VECTOR_SUBPARTS (etype),
+ n_elts));
emode = TYPE_MODE (etype);
}
icode = convert_optab_handler (vec_init_optab, mode, emode);
if (icode != CODE_FOR_nothing)
{
- unsigned int i, n = n_elts;
+ unsigned int i, n = const_n_elts;
if (emode != eltmode)
{
/* Clear the entire vector first if there are any missing elements,
or if the incidence of zero elements is >= 75%. */
- need_to_clear = (count < n_elts || 4 * zero_count >= 3 * count);
+ need_to_clear = (maybe_lt (count, n_elts)
+ || 4 * zero_count >= 3 * count);
}
if (need_to_clear && maybe_gt (size, 0) && !vector)
if (!tmp)
{
vec<constructor_elt, va_gc> *v;
- unsigned i;
- vec_alloc (v, VECTOR_CST_NELTS (exp));
- for (i = 0; i < VECTOR_CST_NELTS (exp); ++i)
+ /* Constructors need to be fixed-length. FIXME. */
+ unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
+ vec_alloc (v, nunits);
+ for (unsigned int i = 0; i < nunits; ++i)
CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, VECTOR_CST_ELT (exp, i));
tmp = build_constructor (type, v);
}
{
wide_int res = wi::zero (GET_MODE_PRECISION (mode));
tree elt;
- unsigned i;
- for (i = 0; i < VECTOR_CST_NELTS (exp); ++i)
+ /* The result has a fixed number of bits so the input must too. */
+ unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
+ for (unsigned int i = 0; i < nunits; ++i)
{
elt = VECTOR_CST_ELT (exp, i);
gcc_assert (TREE_CODE (elt) == INTEGER_CST);
static tree
fold_const_reduction (tree type, tree arg, tree_code code)
{
- if (TREE_CODE (arg) != VECTOR_CST)
+ unsigned HOST_WIDE_INT nelts;
+ if (TREE_CODE (arg) != VECTOR_CST
+ || !VECTOR_CST_NELTS (arg).is_constant (&nelts))
return NULL_TREE;
tree res = VECTOR_CST_ELT (arg, 0);
- unsigned int nelts = VECTOR_CST_NELTS (arg);
- for (unsigned int i = 1; i < nelts; i++)
+ for (unsigned HOST_WIDE_INT i = 1; i < nelts; i++)
{
res = const_binop (code, type, res, VECTOR_CST_ELT (arg, i));
if (res == NULL_TREE || !CONSTANT_CLASS_P (res))
if (TREE_CODE (arg1) == VECTOR_CST
&& TREE_CODE (arg2) == VECTOR_CST
- && (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1))
- == TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg2))))
+ && known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1)),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg2))))
{
tree type = TREE_TYPE (arg1);
bool step_ok_p;
case VEC_PACK_TRUNC_EXPR:
case VEC_PACK_FIX_TRUNC_EXPR:
{
- unsigned int out_nelts, in_nelts, i;
+ unsigned int HOST_WIDE_INT out_nelts, in_nelts, i;
if (TREE_CODE (arg1) != VECTOR_CST
|| TREE_CODE (arg2) != VECTOR_CST)
return NULL_TREE;
- in_nelts = VECTOR_CST_NELTS (arg1);
+ if (!VECTOR_CST_NELTS (arg1).is_constant (&in_nelts))
+ return NULL_TREE;
+
out_nelts = in_nelts * 2;
- gcc_assert (in_nelts == VECTOR_CST_NELTS (arg2)
- && out_nelts == TYPE_VECTOR_SUBPARTS (type));
+ gcc_assert (known_eq (in_nelts, VECTOR_CST_NELTS (arg2))
+ && known_eq (out_nelts, TYPE_VECTOR_SUBPARTS (type)));
tree_vector_builder elts (type, out_nelts, 1);
for (i = 0; i < out_nelts; i++)
case VEC_WIDEN_MULT_EVEN_EXPR:
case VEC_WIDEN_MULT_ODD_EXPR:
{
- unsigned int out_nelts, in_nelts, out, ofs, scale;
+ unsigned HOST_WIDE_INT out_nelts, in_nelts, out, ofs, scale;
if (TREE_CODE (arg1) != VECTOR_CST || TREE_CODE (arg2) != VECTOR_CST)
return NULL_TREE;
- in_nelts = VECTOR_CST_NELTS (arg1);
+ if (!VECTOR_CST_NELTS (arg1).is_constant (&in_nelts))
+ return NULL_TREE;
out_nelts = in_nelts / 2;
- gcc_assert (in_nelts == VECTOR_CST_NELTS (arg2)
- && out_nelts == TYPE_VECTOR_SUBPARTS (type));
+ gcc_assert (known_eq (in_nelts, VECTOR_CST_NELTS (arg2))
+ && known_eq (out_nelts, TYPE_VECTOR_SUBPARTS (type)));
if (code == VEC_WIDEN_MULT_LO_EXPR)
scale = 0, ofs = BYTES_BIG_ENDIAN ? out_nelts : 0;
case VEC_UNPACK_FLOAT_LO_EXPR:
case VEC_UNPACK_FLOAT_HI_EXPR:
{
- unsigned int out_nelts, in_nelts, i;
+ unsigned HOST_WIDE_INT out_nelts, in_nelts, i;
enum tree_code subcode;
if (TREE_CODE (arg0) != VECTOR_CST)
return NULL_TREE;
- in_nelts = VECTOR_CST_NELTS (arg0);
+ if (!VECTOR_CST_NELTS (arg0).is_constant (&in_nelts))
+ return NULL_TREE;
out_nelts = in_nelts / 2;
- gcc_assert (out_nelts == TYPE_VECTOR_SUBPARTS (type));
+ gcc_assert (known_eq (out_nelts, TYPE_VECTOR_SUBPARTS (type)));
unsigned int offset = 0;
if ((!BYTES_BIG_ENDIAN) ^ (code == VEC_UNPACK_LO_EXPR
else if (TREE_CODE (type) == VECTOR_TYPE)
{
if (TREE_CODE (arg1) == VECTOR_CST
- && TYPE_VECTOR_SUBPARTS (type) == VECTOR_CST_NELTS (arg1))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type), VECTOR_CST_NELTS (arg1)))
{
tree elttype = TREE_TYPE (type);
tree arg1_elttype = TREE_TYPE (TREE_TYPE (arg1));
We only tested element precision and modes to match.
Vectors may be BLKmode and thus also check that the number of
parts match. */
- if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0))
- != TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1)))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1))))
return 0;
vec<constructor_elt, va_gc> *v0 = CONSTRUCTOR_ELTS (arg0);
static int
native_encode_vector (const_tree expr, unsigned char *ptr, int len, int off)
{
- unsigned i, count;
+ unsigned HOST_WIDE_INT i, count;
int size, offset;
tree itype, elem;
offset = 0;
- count = VECTOR_CST_NELTS (expr);
+ if (!VECTOR_CST_NELTS (expr).is_constant (&count))
+ return 0;
itype = TREE_TYPE (TREE_TYPE (expr));
size = GET_MODE_SIZE (SCALAR_TYPE_MODE (itype));
for (i = 0; i < count; i++)
If the buffer cannot be interpreted, return NULL_TREE. */
static tree
-native_interpret_vector (tree type, const unsigned char *ptr, int len)
+native_interpret_vector (tree type, const unsigned char *ptr, unsigned int len)
{
tree etype, elem;
- int i, size, count;
+ unsigned int i, size;
+ unsigned HOST_WIDE_INT count;
etype = TREE_TYPE (type);
size = GET_MODE_SIZE (SCALAR_TYPE_MODE (etype));
- count = TYPE_VECTOR_SUBPARTS (type);
- if (size * count > len)
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&count)
+ || size * count > len)
return NULL_TREE;
tree_vector_builder elements (type, count, 1);
static bool
vec_cst_ctor_to_array (tree arg, unsigned int nelts, tree *elts)
{
- unsigned int i;
+ unsigned HOST_WIDE_INT i, nunits;
- if (TREE_CODE (arg) == VECTOR_CST)
+ if (TREE_CODE (arg) == VECTOR_CST
+ && VECTOR_CST_NELTS (arg).is_constant (&nunits))
{
- for (i = 0; i < VECTOR_CST_NELTS (arg); ++i)
+ for (i = 0; i < nunits; ++i)
elts[i] = VECTOR_CST_ELT (arg, i);
}
else if (TREE_CODE (arg) == CONSTRUCTOR)
if (!sel.length ().is_constant (&nelts))
return NULL_TREE;
- gcc_assert (TYPE_VECTOR_SUBPARTS (type) == nelts
- && TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)) == nelts
- && TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1)) == nelts);
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (type), nelts)
+ && known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)), nelts)
+ && known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg1)), nelts));
if (TREE_TYPE (TREE_TYPE (arg0)) != TREE_TYPE (type)
|| TREE_TYPE (TREE_TYPE (arg1)) != TREE_TYPE (type))
return NULL_TREE;
}
else if (TREE_CODE (arg0) == VECTOR_CST)
{
+ unsigned HOST_WIDE_INT nelts;
if ((TREE_CODE (arg1) == VECTOR_CST
|| TREE_CODE (arg1) == CONSTRUCTOR)
&& (TREE_CODE (arg2) == VECTOR_CST
- || TREE_CODE (arg2) == CONSTRUCTOR))
+ || TREE_CODE (arg2) == CONSTRUCTOR)
+ && TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
{
- unsigned int nelts = VECTOR_CST_NELTS (arg0), i;
- gcc_assert (nelts == TYPE_VECTOR_SUBPARTS (type));
vec_perm_builder sel (nelts, nelts, 1);
- for (i = 0; i < nelts; i++)
+ for (unsigned int i = 0; i < nelts; i++)
{
tree val = VECTOR_CST_ELT (arg0, i);
if (integer_all_onesp (val))
if (n != 0
&& (idx % width) == 0
&& (n % width) == 0
- && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0)))
+ && known_le ((idx + n) / width,
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (arg0))))
{
idx = idx / width;
n = n / width;
return NULL_TREE;
/* Create a vec_perm_indices for the integer vector. */
- unsigned int nelts = TYPE_VECTOR_SUBPARTS (type);
+ poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (type);
bool single_arg = (op0 == op1);
vec_perm_indices sel (builder, single_arg ? 1 : 2, nelts);
if (bitpos % elsize == 0)
{
unsigned k = bitpos / elsize;
+ unsigned HOST_WIDE_INT nelts;
if (operand_equal_p (VECTOR_CST_ELT (arg0, k), arg1, 0))
return arg0;
- else
+ else if (VECTOR_CST_NELTS (arg0).is_constant (&nelts))
{
- unsigned int nelts = VECTOR_CST_NELTS (arg0);
tree_vector_builder elts (type, nelts, 1);
elts.quick_grow (nelts);
- for (unsigned int i = 0; i < nelts; ++i)
+ for (unsigned HOST_WIDE_INT i = 0; i < nelts; ++i)
elts[i] = (i == k ? arg1 : VECTOR_CST_ELT (arg0, i));
return elts.build ();
}
{
/* Have vector comparison with scalar boolean result. */
gcc_assert ((code == EQ_EXPR || code == NE_EXPR)
- && VECTOR_CST_NELTS (op0) == VECTOR_CST_NELTS (op1));
- for (unsigned i = 0; i < VECTOR_CST_NELTS (op0); i++)
+ && known_eq (VECTOR_CST_NELTS (op0),
+ VECTOR_CST_NELTS (op1)));
+ unsigned HOST_WIDE_INT nunits;
+ if (!VECTOR_CST_NELTS (op0).is_constant (&nunits))
+ return NULL_TREE;
+ for (unsigned i = 0; i < nunits; i++)
{
tree elem0 = VECTOR_CST_ELT (op0, i);
tree elem1 = VECTOR_CST_ELT (op1, i);
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * trans-types.c (gfc_type_for_mode): Check valid_vector_subparts_p.
+
2018-01-03 Thomas Koenig <tkoenig@gcc.gnu.org>
PR fortran/83664
tree type = gfc_type_for_size (GET_MODE_PRECISION (int_mode), unsignedp);
return type != NULL_TREE && mode == TYPE_MODE (type) ? type : NULL_TREE;
}
- else if (VECTOR_MODE_P (mode))
+ else if (VECTOR_MODE_P (mode)
+ && valid_vector_subparts_p (GET_MODE_NUNITS (mode)))
{
machine_mode inner_mode = GET_MODE_INNER (mode);
tree inner_type = gfc_type_for_mode (inner_mode, unsignedp);
}
else if (TREE_CODE (rhs) == CONSTRUCTOR
&& TREE_CODE (TREE_TYPE (rhs)) == VECTOR_TYPE
- && (CONSTRUCTOR_NELTS (rhs)
- == TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs))))
+ && known_eq (CONSTRUCTOR_NELTS (rhs),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs))))
{
unsigned i, nelts;
tree val;
- nelts = TYPE_VECTOR_SUBPARTS (TREE_TYPE (rhs));
+ nelts = CONSTRUCTOR_NELTS (rhs);
tree_vector_builder vec (TREE_TYPE (rhs), nelts, 1);
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (rhs), i, val)
{
= tree_to_shwi (part_width) / BITS_PER_UNIT;
unsigned HOST_WIDE_INT indexi = offset * BITS_PER_UNIT;
tree index = bitsize_int (indexi);
- if (offset / part_widthi
- < TYPE_VECTOR_SUBPARTS (TREE_TYPE (addrtype)))
+ if (known_lt (offset / part_widthi,
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (addrtype))))
return fold_build3 (BIT_FIELD_REF, type, TREE_OPERAND (addr, 0),
part_width, index);
}
gimple_build_vector_from_val (gimple_seq *seq, location_t loc, tree type,
tree op)
{
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant ()
+ && !CONSTANT_CLASS_P (op))
+ return gimple_build (seq, loc, VEC_DUPLICATE_EXPR, type, op);
+
tree res, vec = build_vector_from_val (type, op);
if (is_gimple_val (vec))
return vec;
if (!TREE_CONSTANT ((*builder)[i]))
{
tree type = builder->type ();
- unsigned int nelts = TYPE_VECTOR_SUBPARTS (type);
+ unsigned int nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
vec<constructor_elt, va_gc> *v;
vec_alloc (v, nelts);
for (i = 0; i < nelts; ++i)
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * go-lang.c (go_langhook_type_for_mode): Check valid_vector_subparts_p.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
Update copyright years.
make sense for the middle-end to ask the frontend for a type
which the frontend does not support. However, at least for now
it is required. See PR 46805. */
- if (VECTOR_MODE_P (mode))
+ if (VECTOR_MODE_P (mode)
+ && valid_vector_subparts_p (GET_MODE_NUNITS (mode)))
{
tree inner;
if (TREE_CODE (m_tree_value) == VECTOR_CST)
{
- int i, num = VECTOR_CST_NELTS (m_tree_value);
+ /* Variable-length vectors aren't supported. */
+ int i, num = VECTOR_CST_NELTS (m_tree_value).to_constant ();
for (i = 0; i < num; i++)
{
tree v = VECTOR_CST_ELT (m_tree_value, i);
&TREE_REAL_CST (t2)));
case VECTOR_CST:
{
- if (VECTOR_CST_NELTS (t1) != VECTOR_CST_NELTS (t2))
- return return_false_with_msg ("VECTOR_CST nelts mismatch");
+ if (maybe_ne (VECTOR_CST_NELTS (t1), VECTOR_CST_NELTS (t2)))
+ return return_false_with_msg ("VECTOR_CST nelts mismatch");
unsigned int count
= tree_vector_builder::binary_encoded_nelts (t1, t2);
+2018-01-03 Richard Sandiford <richard.sandiford@linaro.org>
+ Alan Hayward <alan.hayward@arm.com>
+ David Sherwood <david.sherwood@arm.com>
+
+ * lto-lang.c (lto_type_for_mode): Check valid_vector_subparts_p.
+ * lto.c (hash_canonical_type): Handle polynomial TYPE_VECTOR_SUBPARTS.
+
2018-01-03 Jakub Jelinek <jakub@redhat.com>
Update copyright years.
if (inner_type != NULL_TREE)
return build_complex_type (inner_type);
}
- else if (VECTOR_MODE_P (mode))
+ else if (VECTOR_MODE_P (mode)
+ && valid_vector_subparts_p (GET_MODE_NUNITS (mode)))
{
machine_mode inner_mode = GET_MODE_INNER (mode);
tree inner_type = lto_type_for_mode (inner_mode, unsigned_p);
if (VECTOR_TYPE_P (type))
{
- hstate.add_int (TYPE_VECTOR_SUBPARTS (type));
+ hstate.add_poly_int (TYPE_VECTOR_SUBPARTS (type));
hstate.add_int (TYPE_UNSIGNED (type));
}
(match (nop_convert @0)
(view_convert @0)
(if (VECTOR_TYPE_P (type) && VECTOR_TYPE_P (TREE_TYPE (@0))
- && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (@0)))
&& tree_nop_conversion_p (TREE_TYPE (type), TREE_TYPE (TREE_TYPE (@0))))))
/* This one has to be last, or it shadows the others. */
(match (nop_convert @0)
(simplify
(plus:c @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
(if (VECTOR_TYPE_P (type)
- && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
&& (TYPE_MODE (TREE_TYPE (type))
== TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
(minus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
(simplify
(minus @3 (view_convert? (vec_cond:s @0 integer_each_onep@1 integer_zerop@2)))
(if (VECTOR_TYPE_P (type)
- && TYPE_VECTOR_SUBPARTS (type) == TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1))
+ && known_eq (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (@1)))
&& (TYPE_MODE (TREE_TYPE (type))
== TYPE_MODE (TREE_TYPE (TREE_TYPE (@1)))))
(plus @3 (view_convert (vec_cond @0 (negate @1) @2)))))
(if (n != 0
&& (idx % width) == 0
&& (n % width) == 0
- && ((idx + n) / width) <= TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor)))
+ && known_le ((idx + n) / width,
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (ctor))))
(with
{
idx = idx / width;
static unsigned HOST_WIDE_INT
simd_clone_subparts (tree vectype)
{
- return TYPE_VECTOR_SUBPARTS (vectype);
+ return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
}
/* Allocate a fresh `simd_clone' and return it. NARGS is the number
else if (code == ARRAY_TYPE)
print_node (file, "domain", TYPE_DOMAIN (node), indent + 4);
else if (code == VECTOR_TYPE)
- fprintf (file, " nunits:%d", (int) TYPE_VECTOR_SUBPARTS (node));
+ {
+ fprintf (file, " nunits:");
+ print_dec (TYPE_VECTOR_SUBPARTS (node), file);
+ }
else if (code == RECORD_TYPE
|| code == UNION_TYPE
|| code == QUAL_UNION_TYPE)
case VECTOR_TYPE:
{
- int nunits = TYPE_VECTOR_SUBPARTS (type);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type);
tree innertype = TREE_TYPE (type);
- gcc_assert (!(nunits & (nunits - 1)));
-
/* Find an appropriate mode for the vector type. */
if (TYPE_MODE (type) == VOIDmode)
SET_TYPE_MODE (type,
return 3;
case vec_construct:
- return TYPE_VECTOR_SUBPARTS (vectype) - 1;
+ return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vectype)) - 1;
default:
gcc_unreachable ();
return true;
}
- if (TYPE_VECTOR_SUBPARTS (type) != TYPE_VECTOR_SUBPARTS (op0_type))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (op0_type)))
{
error ("invalid vector comparison resulting type");
debug_generic_expr (type);
if (VECTOR_BOOLEAN_TYPE_P (lhs_type)
&& VECTOR_BOOLEAN_TYPE_P (rhs1_type)
&& types_compatible_p (rhs1_type, rhs2_type)
- && (TYPE_VECTOR_SUBPARTS (lhs_type)
- == 2 * TYPE_VECTOR_SUBPARTS (rhs1_type)))
+ && known_eq (TYPE_VECTOR_SUBPARTS (lhs_type),
+ 2 * TYPE_VECTOR_SUBPARTS (rhs1_type)))
return false;
/* Fallthru. */
case VEC_COND_EXPR:
if (!VECTOR_BOOLEAN_TYPE_P (rhs1_type)
- || TYPE_VECTOR_SUBPARTS (rhs1_type)
- != TYPE_VECTOR_SUBPARTS (lhs_type))
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs1_type),
+ TYPE_VECTOR_SUBPARTS (lhs_type)))
{
error ("the first argument of a VEC_COND_EXPR must be of a "
"boolean vector type of the same number of elements "
return true;
}
- if (TYPE_VECTOR_SUBPARTS (rhs1_type) != TYPE_VECTOR_SUBPARTS (rhs2_type)
- || TYPE_VECTOR_SUBPARTS (rhs2_type)
- != TYPE_VECTOR_SUBPARTS (rhs3_type)
- || TYPE_VECTOR_SUBPARTS (rhs3_type)
- != TYPE_VECTOR_SUBPARTS (lhs_type))
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (rhs1_type),
+ TYPE_VECTOR_SUBPARTS (rhs2_type))
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs2_type),
+ TYPE_VECTOR_SUBPARTS (rhs3_type))
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (rhs3_type),
+ TYPE_VECTOR_SUBPARTS (lhs_type)))
{
error ("vectors with different element number found "
"in vector permute expression");
debug_generic_stmt (rhs1);
return true;
}
- else if (CONSTRUCTOR_NELTS (rhs1)
- * TYPE_VECTOR_SUBPARTS (elt_t)
- != TYPE_VECTOR_SUBPARTS (rhs1_type))
+ else if (maybe_ne (CONSTRUCTOR_NELTS (rhs1)
+ * TYPE_VECTOR_SUBPARTS (elt_t),
+ TYPE_VECTOR_SUBPARTS (rhs1_type)))
{
error ("incorrect number of vector CONSTRUCTOR"
" elements");
debug_generic_stmt (rhs1);
return true;
}
- else if (CONSTRUCTOR_NELTS (rhs1)
- > TYPE_VECTOR_SUBPARTS (rhs1_type))
+ else if (maybe_gt (CONSTRUCTOR_NELTS (rhs1),
+ TYPE_VECTOR_SUBPARTS (rhs1_type)))
{
error ("incorrect number of vector CONSTRUCTOR elements");
debug_generic_stmt (rhs1);
{
unsigned i;
pp_string (pp, "{ ");
- for (i = 0; i < VECTOR_CST_NELTS (node); ++i)
+ unsigned HOST_WIDE_INT nunits;
+ if (!VECTOR_CST_NELTS (node).is_constant (&nunits))
+ nunits = vector_cst_encoded_nelts (node);
+ for (i = 0; i < nunits; ++i)
{
if (i != 0)
pp_string (pp, ", ");
dump_generic_node (pp, VECTOR_CST_ELT (node, i),
spc, flags, false);
}
+ if (!VECTOR_CST_NELTS (node).is_constant ())
+ pp_string (pp, ", ...");
pp_string (pp, " }");
}
break;
&& constant_multiple_p (bit_field_offset (op), size, &idx))
{
tree p, m, tem;
- unsigned nelts;
+ unsigned HOST_WIDE_INT nelts;
m = gimple_assign_rhs3 (def_stmt);
- if (TREE_CODE (m) != VECTOR_CST)
+ if (TREE_CODE (m) != VECTOR_CST
+ || !VECTOR_CST_NELTS (m).is_constant (&nelts))
return false;
- nelts = VECTOR_CST_NELTS (m);
idx = TREE_INT_CST_LOW (VECTOR_CST_ELT (m, idx));
idx %= 2 * nelts;
if (idx < nelts)
is_combined_permutation_identity (tree mask1, tree mask2)
{
tree mask;
- unsigned int nelts, i, j;
+ unsigned HOST_WIDE_INT nelts, i, j;
bool maybe_identity1 = true;
bool maybe_identity2 = true;
mask = fold_ternary (VEC_PERM_EXPR, TREE_TYPE (mask1), mask1, mask1, mask2);
gcc_assert (TREE_CODE (mask) == VECTOR_CST);
- nelts = VECTOR_CST_NELTS (mask);
+ if (!VECTOR_CST_NELTS (mask).is_constant (&nelts))
+ return 0;
for (i = 0; i < nelts; i++)
{
tree val = VECTOR_CST_ELT (mask, i);
gimple *stmt = gsi_stmt (*gsi);
gimple *def_stmt;
tree op, op2, orig, type, elem_type;
- unsigned elem_size, nelts, i;
+ unsigned elem_size, i;
+ unsigned HOST_WIDE_INT nelts;
enum tree_code code, conv_code;
constructor_elt *elt;
bool maybe_ident;
type = TREE_TYPE (op);
gcc_checking_assert (TREE_CODE (type) == VECTOR_TYPE);
- nelts = TYPE_VECTOR_SUBPARTS (type);
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts))
+ return false;
elem_type = TREE_TYPE (type);
elem_size = TREE_INT_CST_LOW (TYPE_SIZE (elem_type));
return false;
if (! VECTOR_TYPE_P (TREE_TYPE (orig))
- || (TYPE_VECTOR_SUBPARTS (type)
- != TYPE_VECTOR_SUBPARTS (TREE_TYPE (orig))))
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (type),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (orig))))
return false;
tree tem;
if (length == 3)
{
/* vect_grouped_store_supported ensures that this is constant. */
- unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype);
+ unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
unsigned int j0 = 0, j1 = 0, j2 = 0;
vec_perm_builder sel (nelt, nelt, 1);
gcc_assert (pow2p_hwi (length));
/* The encoding has 2 interleaved stepped patterns. */
- unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype);
+ poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
vec_perm_builder sel (nelt, 2, 3);
sel.quick_grow (6);
for (i = 0; i < 3; i++)
perm_mask_high = vect_gen_perm_mask_checked (vectype, indices);
for (i = 0; i < 6; i++)
- sel[i] += nelt / 2;
+ sel[i] += exact_div (nelt, 2);
indices.new_vector (sel, 2, nelt);
perm_mask_low = vect_gen_perm_mask_checked (vectype, indices);
that leaves unused vector loads around punt - we at least create
very sub-optimal code in that case (and blow up memory,
see PR65518). */
- if (single_element_p && count > TYPE_VECTOR_SUBPARTS (vectype))
+ if (single_element_p && maybe_gt (count, TYPE_VECTOR_SUBPARTS (vectype)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
if (length == 3)
{
/* vect_grouped_load_supported ensures that this is constant. */
- unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
+ unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
unsigned int k;
vec_perm_builder sel (nelt, nelt, 1);
gcc_assert (pow2p_hwi (length));
/* The encoding has a single stepped pattern. */
- unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype);
+ poly_uint64 nelt = TYPE_VECTOR_SUBPARTS (vectype);
vec_perm_builder sel (nelt, 1, 3);
sel.quick_grow (3);
for (i = 0; i < 3; ++i)
tree vectype = STMT_VINFO_VECTYPE (vinfo_for_stmt (stmt));
unsigned int i;
- unsigned nelt = TYPE_VECTOR_SUBPARTS (vectype);
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- unsigned HOST_WIDE_INT vf;
- if (!LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
+ unsigned HOST_WIDE_INT nelt, vf;
+ if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nelt)
+ || !LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
/* Not supported for variable-length vectors. */
return false;
static unsigned int
nunits_for_known_piecewise_op (const_tree type)
{
- return TYPE_VECTOR_SUBPARTS (type);
+ return TYPE_VECTOR_SUBPARTS (type).to_constant ();
}
/* Return true if TYPE1 has more elements than TYPE2, where either
Similarly for vbfld_10 instead of x_2 < y_3. */
if (VECTOR_BOOLEAN_TYPE_P (type)
&& SCALAR_INT_MODE_P (TYPE_MODE (type))
- && (GET_MODE_BITSIZE (TYPE_MODE (type))
- < (TYPE_VECTOR_SUBPARTS (type)
- * GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (type)))))
+ && known_lt (GET_MODE_BITSIZE (TYPE_MODE (type)),
+ TYPE_VECTOR_SUBPARTS (type)
+ * GET_MODE_BITSIZE (SCALAR_TYPE_MODE (TREE_TYPE (type))))
&& (a_is_comparison
? useless_type_conversion_p (type, TREE_TYPE (a))
: expand_vec_cmp_expr_p (TREE_TYPE (a1), type, TREE_CODE (a))))
tree lhs = gimple_assign_lhs (stmt);
tree rhs = gimple_assign_rhs1 (stmt);
tree type = TREE_TYPE (rhs);
- unsigned int i, j, nelts = TYPE_VECTOR_SUBPARTS (type);
+ unsigned int i, j;
+ unsigned HOST_WIDE_INT nelts;
bool all_same = true;
constructor_elt *elt;
gimple *g;
tree base = NULL_TREE;
optab op;
- if (nelts <= 2 || CONSTRUCTOR_NELTS (rhs) != nelts)
+ if (!TYPE_VECTOR_SUBPARTS (type).is_constant (&nelts)
+ || nelts <= 2
+ || CONSTRUCTOR_NELTS (rhs) != nelts)
return;
op = optab_for_tree_code (PLUS_EXPR, type, optab_default);
if (op == unknown_optab
tree mask_type = TREE_TYPE (mask);
tree vect_elt_type = TREE_TYPE (vect_type);
tree mask_elt_type = TREE_TYPE (mask_type);
- unsigned int elements = TYPE_VECTOR_SUBPARTS (vect_type);
+ unsigned HOST_WIDE_INT elements;
vec<constructor_elt, va_gc> *v;
tree constr, t, si, i_val;
tree vec0tmp = NULL_TREE, vec1tmp = NULL_TREE, masktmp = NULL_TREE;
location_t loc = gimple_location (gsi_stmt (*gsi));
unsigned i;
+ if (!TYPE_VECTOR_SUBPARTS (vect_type).is_constant (&elements))
+ return;
+
if (TREE_CODE (mask) == SSA_NAME)
{
gimple *def_stmt = SSA_NAME_DEF_STMT (mask);
&& TREE_CODE (vec1) == VECTOR_CST
&& initializer_zerop (vec1)
&& maybe_ne (indices[0], 0)
- && known_lt (indices[0], elements))
+ && known_lt (poly_uint64 (indices[0]), elements))
{
bool ok_p = indices.series_p (0, 1, indices[0], 1);
if (!ok_p)
{
for (i = 1; i < elements; ++i)
{
- poly_int64 expected = i + indices[0];
+ poly_uint64 actual = indices[i];
+ poly_uint64 expected = i + indices[0];
/* Indices into the second vector are all equivalent. */
- if (maybe_lt (indices[i], elements)
- ? maybe_ne (indices[i], expected)
+ if (maybe_lt (actual, elements)
+ ? maybe_ne (actual, expected)
: maybe_lt (expected, elements))
break;
}
= type_for_widest_vector_mode (TREE_TYPE (type), op);
if (vector_compute_type != NULL_TREE
&& subparts_gt (compute_type, vector_compute_type)
- && TYPE_VECTOR_SUBPARTS (vector_compute_type) > 1
+ && maybe_ne (TYPE_VECTOR_SUBPARTS (vector_compute_type), 1U)
&& (optab_handler (op, TYPE_MODE (vector_compute_type))
!= CODE_FOR_nothing))
compute_type = vector_compute_type;
}
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "nunits = " HOST_WIDE_INT_PRINT_DEC "\n",
- TYPE_VECTOR_SUBPARTS (vectype));
+ {
+ dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
+ dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vectype));
+ dump_printf (MSG_NOTE, "\n");
+ }
vect_update_max_nunits (&vectorization_factor, vectype);
}
}
if (dump_enabled_p ())
- dump_printf_loc (MSG_NOTE, vect_location,
- "nunits = " HOST_WIDE_INT_PRINT_DEC "\n",
- TYPE_VECTOR_SUBPARTS (vf_vectype));
+ {
+ dump_printf_loc (MSG_NOTE, vect_location, "nunits = ");
+ dump_dec (MSG_NOTE, TYPE_VECTOR_SUBPARTS (vf_vectype));
+ dump_printf (MSG_NOTE, "\n");
+ }
vect_update_max_nunits (&vectorization_factor, vf_vectype);
if (!mask_type)
mask_type = vectype;
- else if (TYPE_VECTOR_SUBPARTS (mask_type)
- != TYPE_VECTOR_SUBPARTS (vectype))
+ else if (maybe_ne (TYPE_VECTOR_SUBPARTS (mask_type),
+ TYPE_VECTOR_SUBPARTS (vectype)))
{
if (dump_enabled_p ())
{
scalar_type = TREE_TYPE (vector_type);
/* vectorizable_reduction has already rejected SLP reductions on
variable-length vectors. */
- nunits = TYPE_VECTOR_SUBPARTS (vector_type);
+ nunits = TYPE_VECTOR_SUBPARTS (vector_type).to_constant ();
gcc_assert (STMT_VINFO_DEF_TYPE (stmt_vinfo) == vect_reduction_def);
if (STMT_VINFO_VECTYPE (stmt_info))
{
- unsigned int nunits
- = (unsigned int)
- TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
+ poly_uint64 nunits
+ = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (stmt_info));
if (!STMT_SLP_TYPE (stmt_info)
&& maybe_ne (nunits, vf)
&& dump_enabled_p ())
vectorized matches the vector type of the result in
size and number of elements. */
unsigned prec
- = wi::udiv_trunc (wi::to_wide (TYPE_SIZE (vectype)),
- TYPE_VECTOR_SUBPARTS (vectype)).to_uhwi ();
+ = vector_element_size (tree_to_poly_uint64 (TYPE_SIZE (vectype)),
+ TYPE_VECTOR_SUBPARTS (vectype));
+
tree type
= build_nonstandard_integer_type (prec,
TYPE_UNSIGNED (TREE_TYPE (var)));
vectype2 = get_mask_type_for_scalar_type (rhs1_type);
if (!vectype1 || !vectype2
- || TYPE_VECTOR_SUBPARTS (vectype1) == TYPE_VECTOR_SUBPARTS (vectype2))
+ || known_eq (TYPE_VECTOR_SUBPARTS (vectype1),
+ TYPE_VECTOR_SUBPARTS (vectype2)))
return NULL;
tmp = build_mask_conversion (rhs1, vectype1, stmt_vinfo, vinfo);
vectype2 = get_mask_type_for_scalar_type (rhs1_type);
if (!vectype1 || !vectype2
- || TYPE_VECTOR_SUBPARTS (vectype1) == TYPE_VECTOR_SUBPARTS (vectype2))
+ || known_eq (TYPE_VECTOR_SUBPARTS (vectype1),
+ TYPE_VECTOR_SUBPARTS (vectype2)))
return NULL;
/* If rhs1 is invariant and we can promote it leave the COND_EXPR
unnecessary promotion stmts and increased vectorization factor. */
if (COMPARISON_CLASS_P (rhs1)
&& INTEGRAL_TYPE_P (rhs1_type)
- && TYPE_VECTOR_SUBPARTS (vectype1) < TYPE_VECTOR_SUBPARTS (vectype2))
+ && known_le (TYPE_VECTOR_SUBPARTS (vectype1),
+ TYPE_VECTOR_SUBPARTS (vectype2)))
{
gimple *dummy;
enum vect_def_type dt;
stmt_vec_info group_info
= vinfo_for_stmt (SLP_TREE_SCALAR_STMTS (node)[0]);
group_info = vinfo_for_stmt (GROUP_FIRST_ELEMENT (group_info));
- unsigned nunits
- = TYPE_VECTOR_SUBPARTS (STMT_VINFO_VECTYPE (group_info));
+ unsigned HOST_WIDE_INT nunits;
unsigned k, maxk = 0;
FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (node), j, k)
if (k > maxk)
maxk = k;
/* In BB vectorization we may not actually use a loaded vector
accessing elements in excess of GROUP_SIZE. */
- if (maxk >= (GROUP_SIZE (group_info) & ~(nunits - 1)))
+ tree vectype = STMT_VINFO_VECTYPE (group_info);
+ if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)
+ || maxk >= (GROUP_SIZE (group_info) & ~(nunits - 1)))
{
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
"BB vectorization with gaps at the end of "
else
vector_type = get_vectype_for_scalar_type (TREE_TYPE (op));
/* Enforced by vect_get_and_check_slp_defs. */
- nunits = TYPE_VECTOR_SUBPARTS (vector_type);
+ nunits = TYPE_VECTOR_SUBPARTS (vector_type).to_constant ();
if (STMT_VINFO_DATA_REF (stmt_vinfo))
{
gimple *stmt = SLP_TREE_SCALAR_STMTS (node)[0];
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
tree mask_element_type = NULL_TREE, mask_type;
- int nunits, vec_index = 0;
+ int vec_index = 0;
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
int group_size = SLP_INSTANCE_GROUP_SIZE (slp_node_instance);
- int mask_element;
+ unsigned int mask_element;
machine_mode mode;
- unsigned HOST_WIDE_INT const_vf;
+ unsigned HOST_WIDE_INT nunits, const_vf;
if (!STMT_VINFO_GROUPED_ACCESS (stmt_info))
return false;
mode = TYPE_MODE (vectype);
/* At the moment, all permutations are represented using per-element
- indices, so we can't cope with variable vectorization factors. */
- if (!vf.is_constant (&const_vf))
+ indices, so we can't cope with variable vector lengths or
+ vectorization factors. */
+ if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits)
+ || !vf.is_constant (&const_vf))
return false;
/* The generic VEC_PERM_EXPR code always uses an integral type of the
mask_element_type = lang_hooks.types.type_for_mode
(int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))).require (), 1);
mask_type = get_vectype_for_scalar_type (mask_element_type);
- nunits = TYPE_VECTOR_SUBPARTS (vectype);
vec_perm_builder mask (nunits, nunits, 1);
mask.quick_grow (nunits);
vec_perm_indices indices;
{c2,a3,b3,c3}. */
int vect_stmts_counter = 0;
- int index = 0;
+ unsigned int index = 0;
int first_vec_index = -1;
int second_vec_index = -1;
bool noop_p = true;
{
for (int k = 0; k < group_size; k++)
{
- int i = (SLP_TREE_LOAD_PERMUTATION (node)[k]
- + j * STMT_VINFO_GROUP_SIZE (stmt_info));
+ unsigned int i = (SLP_TREE_LOAD_PERMUTATION (node)[k]
+ + j * STMT_VINFO_GROUP_SIZE (stmt_info));
vec_index = i / nunits;
mask_element = i % nunits;
if (vec_index == first_vec_index
return false;
}
- gcc_assert (mask_element >= 0
- && mask_element < 2 * nunits);
+ gcc_assert (mask_element < 2 * nunits);
if (mask_element != index)
noop_p = false;
mask[index++] = mask_element;
static tree
perm_mask_for_reverse (tree vectype)
{
- int i, nunits;
-
- nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* The encoding has a single stepped pattern. */
vec_perm_builder sel (nunits, 1, 3);
- for (i = 0; i < 3; ++i)
+ for (int i = 0; i < 3; ++i)
sel.quick_push (nunits - 1 - i);
vec_perm_indices indices (sel, 1, nunits);
bool single_element_p = (stmt == first_stmt
&& !GROUP_NEXT_ELEMENT (stmt_info));
unsigned HOST_WIDE_INT gap = GROUP_GAP (vinfo_for_stmt (first_stmt));
- unsigned nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* True if the vectorized statements would access beyond the last
statement in the group. */
/* Try to use consecutive accesses of GROUP_SIZE elements,
separated by the stride, until we have a complete vector.
Fall back to scalar accesses if that isn't possible. */
- if (nunits % group_size == 0)
+ if (multiple_p (nunits, group_size))
*memory_access_type = VMAT_STRIDED_SLP;
else
*memory_access_type = VMAT_ELEMENTWISE;
mask_vectype = get_mask_type_for_scalar_type (TREE_TYPE (vectype));
if (!mask_vectype || !VECTOR_BOOLEAN_TYPE_P (mask_vectype)
- || TYPE_VECTOR_SUBPARTS (mask_vectype) != TYPE_VECTOR_SUBPARTS (vectype))
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (mask_vectype),
+ TYPE_VECTOR_SUBPARTS (vectype)))
return false;
if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
- == TYPE_VECTOR_SUBPARTS (idxtype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
+ TYPE_VECTOR_SUBPARTS (idxtype)));
var = vect_get_new_ssa_name (idxtype, vect_simple_var);
op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
new_stmt
mask_op = vec_mask;
if (!useless_type_conversion_p (masktype, TREE_TYPE (vec_mask)))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op))
- == TYPE_VECTOR_SUBPARTS (masktype));
+ gcc_assert
+ (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask_op)),
+ TYPE_VECTOR_SUBPARTS (masktype)));
var = vect_get_new_ssa_name (masktype, vect_simple_var);
mask_op = build1 (VIEW_CONVERT_EXPR, masktype, mask_op);
new_stmt
if (!useless_type_conversion_p (vectype, rettype))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
- == TYPE_VECTOR_SUBPARTS (rettype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
+ TYPE_VECTOR_SUBPARTS (rettype)));
op = vect_get_new_ssa_name (rettype, vect_simple_var);
gimple_call_set_lhs (new_stmt, op);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
tree op, vectype;
stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
- unsigned ncopies, nunits;
+ unsigned ncopies;
+ unsigned HOST_WIDE_INT nunits, num_bytes;
op = gimple_call_arg (stmt, 0);
vectype = STMT_VINFO_VECTYPE (stmt_info);
- nunits = TYPE_VECTOR_SUBPARTS (vectype);
+
+ if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
+ return false;
/* Multiple types in SLP are handled by creating the appropriate number of
vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
if (! char_vectype)
return false;
- unsigned int num_bytes = TYPE_VECTOR_SUBPARTS (char_vectype);
+ if (!TYPE_VECTOR_SUBPARTS (char_vectype).is_constant (&num_bytes))
+ return false;
+
unsigned word_bytes = num_bytes / nunits;
/* The encoding uses one stepped pattern for each byte in the word. */
static unsigned HOST_WIDE_INT
simd_clone_subparts (tree vectype)
{
- return TYPE_VECTOR_SUBPARTS (vectype);
+ return TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
}
/* Function vectorizable_simd_clone_call.
op = TREE_OPERAND (op, 0);
tree vectype = STMT_VINFO_VECTYPE (stmt_info);
- unsigned int nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
/* Multiple types in SLP are handled by creating the appropriate number of
vectorized stmts for each SLP node. Hence, NCOPIES is always 1 in
if ((CONVERT_EXPR_CODE_P (code)
|| code == VIEW_CONVERT_EXPR)
&& (!vectype_in
- || TYPE_VECTOR_SUBPARTS (vectype_in) != nunits
+ || maybe_ne (TYPE_VECTOR_SUBPARTS (vectype_in), nunits)
|| (GET_MODE_SIZE (TYPE_MODE (vectype))
!= GET_MODE_SIZE (TYPE_MODE (vectype_in)))))
return false;
int ndts = 2;
gimple *new_stmt = NULL;
stmt_vec_info prev_stmt_info;
- int nunits_in;
- int nunits_out;
+ poly_uint64 nunits_in;
+ poly_uint64 nunits_out;
tree vectype_out;
tree op1_vectype;
int ncopies;
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
- if (nunits_out != nunits_in)
+ if (maybe_ne (nunits_out, nunits_in))
return false;
op1 = gimple_assign_rhs2 (stmt);
int ndts = 3;
gimple *new_stmt = NULL;
stmt_vec_info prev_stmt_info;
- int nunits_in;
- int nunits_out;
+ poly_uint64 nunits_in;
+ poly_uint64 nunits_out;
tree vectype_out;
int ncopies;
int j, i;
nunits_out = TYPE_VECTOR_SUBPARTS (vectype_out);
nunits_in = TYPE_VECTOR_SUBPARTS (vectype);
- if (nunits_out != nunits_in)
+ if (maybe_ne (nunits_out, nunits_in))
return false;
if (op_type == binary_op || op_type == ternary_op)
if (!useless_type_conversion_p (srctype, TREE_TYPE (src)))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src))
- == TYPE_VECTOR_SUBPARTS (srctype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (src)),
+ TYPE_VECTOR_SUBPARTS (srctype)));
var = vect_get_new_ssa_name (srctype, vect_simple_var);
src = build1 (VIEW_CONVERT_EXPR, srctype, src);
new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, src);
if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
- == TYPE_VECTOR_SUBPARTS (idxtype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
+ TYPE_VECTOR_SUBPARTS (idxtype)));
var = vect_get_new_ssa_name (idxtype, vect_simple_var);
op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
new_stmt = gimple_build_assign (var, VIEW_CONVERT_EXPR, op);
if (!useless_type_conversion_p (idxtype, TREE_TYPE (op)))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op))
- == TYPE_VECTOR_SUBPARTS (idxtype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (TREE_TYPE (op)),
+ TYPE_VECTOR_SUBPARTS (idxtype)));
var = vect_get_new_ssa_name (idxtype, vect_simple_var);
op = build1 (VIEW_CONVERT_EXPR, idxtype, op);
new_stmt
if (!useless_type_conversion_p (vectype, rettype))
{
- gcc_assert (TYPE_VECTOR_SUBPARTS (vectype)
- == TYPE_VECTOR_SUBPARTS (rettype));
+ gcc_assert (known_eq (TYPE_VECTOR_SUBPARTS (vectype),
+ TYPE_VECTOR_SUBPARTS (rettype)));
op = vect_get_new_ssa_name (rettype, vect_simple_var);
gimple_call_set_lhs (new_stmt, op);
vect_finish_stmt_generation (stmt, new_stmt, gsi);
return false;
if (vectype1 && vectype2
- && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
+ && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
+ TYPE_VECTOR_SUBPARTS (vectype2)))
return false;
*comp_vectype = vectype1 ? vectype1 : vectype2;
loop_vec_info loop_vinfo = STMT_VINFO_LOOP_VINFO (stmt_info);
enum vect_def_type dts[2] = {vect_unknown_def_type, vect_unknown_def_type};
int ndts = 2;
- unsigned nunits;
+ poly_uint64 nunits;
int ncopies;
enum tree_code code, bitop1 = NOP_EXPR, bitop2 = NOP_EXPR;
stmt_vec_info prev_stmt_info = NULL;
return false;
if (vectype1 && vectype2
- && TYPE_VECTOR_SUBPARTS (vectype1) != TYPE_VECTOR_SUBPARTS (vectype2))
+ && maybe_ne (TYPE_VECTOR_SUBPARTS (vectype1),
+ TYPE_VECTOR_SUBPARTS (vectype2)))
return false;
vectype = vectype1 ? vectype1 : vectype2;
if (!vectype)
{
vectype = get_vectype_for_scalar_type (TREE_TYPE (rhs1));
- if (TYPE_VECTOR_SUBPARTS (vectype) != nunits)
+ if (maybe_ne (TYPE_VECTOR_SUBPARTS (vectype), nunits))
return false;
}
- else if (nunits != TYPE_VECTOR_SUBPARTS (vectype))
+ else if (maybe_ne (nunits, TYPE_VECTOR_SUBPARTS (vectype)))
return false;
/* Can't compare mask and non-mask types. */
vector types having the same QImode. Thus we
add additional check for elements number. */
return (!VECTOR_BOOLEAN_TYPE_P (vectype)
- || (TYPE_VECTOR_SUBPARTS (vectype) / 2
- == TYPE_VECTOR_SUBPARTS (wide_vectype)));
+ || known_eq (TYPE_VECTOR_SUBPARTS (vectype),
+ TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
/* Check if it's a multi-step conversion that can be done using intermediate
types. */
intermediate_mode = insn_data[icode1].operand[0].mode;
if (VECTOR_BOOLEAN_TYPE_P (prev_type))
{
+ poly_uint64 intermediate_nelts
+ = exact_div (TYPE_VECTOR_SUBPARTS (prev_type), 2);
intermediate_type
- = build_truth_vector_type (TYPE_VECTOR_SUBPARTS (prev_type) / 2,
+ = build_truth_vector_type (intermediate_nelts,
current_vector_size);
if (intermediate_mode != TYPE_MODE (intermediate_type))
return false;
if (insn_data[icode1].operand[0].mode == TYPE_MODE (wide_vectype)
&& insn_data[icode2].operand[0].mode == TYPE_MODE (wide_vectype))
return (!VECTOR_BOOLEAN_TYPE_P (vectype)
- || (TYPE_VECTOR_SUBPARTS (intermediate_type) / 2
- == TYPE_VECTOR_SUBPARTS (wide_vectype)));
+ || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type),
+ TYPE_VECTOR_SUBPARTS (wide_vectype) * 2));
prev_type = intermediate_type;
prev_mode = intermediate_mode;
vector types having the same QImode. Thus we
add additional check for elements number. */
return (!VECTOR_BOOLEAN_TYPE_P (vectype)
- || (TYPE_VECTOR_SUBPARTS (vectype) * 2
- == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
+ || known_eq (TYPE_VECTOR_SUBPARTS (vectype) * 2,
+ TYPE_VECTOR_SUBPARTS (narrow_vectype)));
/* Check if it's a multi-step conversion that can be done using intermediate
types. */
if (insn_data[icode1].operand[0].mode == TYPE_MODE (narrow_vectype))
return (!VECTOR_BOOLEAN_TYPE_P (vectype)
- || (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2
- == TYPE_VECTOR_SUBPARTS (narrow_vectype)));
+ || known_eq (TYPE_VECTOR_SUBPARTS (intermediate_type) * 2,
+ TYPE_VECTOR_SUBPARTS (narrow_vectype)));
prev_mode = intermediate_mode;
prev_type = intermediate_type;
unsigned int
tree_vector_builder::binary_encoded_nelts (tree t1, tree t2)
{
- unsigned int nelts = TYPE_VECTOR_SUBPARTS (TREE_TYPE (t1));
- gcc_assert (nelts == TYPE_VECTOR_SUBPARTS (TREE_TYPE (t2)));
+ poly_uint64 nelts = TYPE_VECTOR_SUBPARTS (TREE_TYPE (t1));
+ gcc_assert (known_eq (nelts, TYPE_VECTOR_SUBPARTS (TREE_TYPE (t2))));
/* See new_binary_operation for details. */
unsigned int npatterns = least_common_multiple (VECTOR_CST_NPATTERNS (t1),
VECTOR_CST_NPATTERNS (t2));
unsigned int nelts_per_pattern = MAX (VECTOR_CST_NELTS_PER_PATTERN (t1),
VECTOR_CST_NELTS_PER_PATTERN (t2));
- return MIN (npatterns * nelts_per_pattern, nelts);
+ unsigned HOST_WIDE_INT const_nelts;
+ if (nelts.is_constant (&const_nelts))
+ return MIN (npatterns * nelts_per_pattern, const_nelts);
+ return npatterns * nelts_per_pattern;
}
/* Return a vector element with the value BASE + FACTOR * STEP. */
tree
build_vector_from_ctor (tree type, vec<constructor_elt, va_gc> *v)
{
- unsigned int nelts = TYPE_VECTOR_SUBPARTS (type);
- unsigned HOST_WIDE_INT idx;
+ unsigned HOST_WIDE_INT idx, nelts;
tree value;
+ /* We can't construct a VECTOR_CST for a variable number of elements. */
+ nelts = TYPE_VECTOR_SUBPARTS (type).to_constant ();
tree_vector_builder vec (type, nelts, 1);
FOR_EACH_CONSTRUCTOR_VALUE (v, idx, value)
{
if (TREE_CODE (value) == VECTOR_CST)
- for (unsigned i = 0; i < VECTOR_CST_NELTS (value); ++i)
- vec.quick_push (VECTOR_CST_ELT (value, i));
+ {
+ /* If NELTS is constant then this must be too. */
+ unsigned int sub_nelts = VECTOR_CST_NELTS (value).to_constant ();
+ for (unsigned i = 0; i < sub_nelts; ++i)
+ vec.quick_push (VECTOR_CST_ELT (value, i));
+ }
else
vec.quick_push (value);
}
/* Build a vector of type VECTYPE where all the elements are SCs. */
tree
-build_vector_from_val (tree vectype, tree sc)
+build_vector_from_val (tree vectype, tree sc)
{
- int i, nunits = TYPE_VECTOR_SUBPARTS (vectype);
+ unsigned HOST_WIDE_INT i, nunits;
if (sc == error_mark_node)
return sc;
v.quick_push (sc);
return v.build ();
}
- else if (0)
+ else if (!TYPE_VECTOR_SUBPARTS (vectype).is_constant (&nunits))
return fold_build1 (VEC_DUPLICATE_EXPR, vectype, sc);
else
{
}
case VECTOR_TYPE:
- {
- unsigned nunits = TYPE_VECTOR_SUBPARTS (type);
- hstate.add_object (nunits);
- break;
- }
+ hstate.add_poly_int (TYPE_VECTOR_SUBPARTS (type));
+ break;
default:
break;
return 1;
case VECTOR_TYPE:
- return TYPE_VECTOR_SUBPARTS (a->type) == TYPE_VECTOR_SUBPARTS (b->type);
+ return known_eq (TYPE_VECTOR_SUBPARTS (a->type),
+ TYPE_VECTOR_SUBPARTS (b->type));
case ENUMERAL_TYPE:
if (TYPE_VALUES (a->type) != TYPE_VALUES (b->type)
t = make_node (VECTOR_TYPE);
TREE_TYPE (t) = mv_innertype;
- SET_TYPE_VECTOR_SUBPARTS (t, nunits.to_constant ()); /* Temporary */
+ SET_TYPE_VECTOR_SUBPARTS (t, nunits);
SET_TYPE_MODE (t, mode);
if (TYPE_STRUCTURAL_EQUALITY_P (mv_innertype) || in_lto_p)
a power of two. */
tree
-build_vector_type (tree innertype, int nunits)
+build_vector_type (tree innertype, poly_int64 nunits)
{
return make_vector_type (innertype, nunits, VOIDmode);
}
/* Similarly, but builds a variant type with TYPE_VECTOR_OPAQUE set. */
tree
-build_opaque_vector_type (tree innertype, int nunits)
+build_opaque_vector_type (tree innertype, poly_int64 nunits)
{
tree t = make_vector_type (innertype, nunits, VOIDmode);
tree cand;
uniform_vector_p (const_tree vec)
{
tree first, t;
- unsigned i;
+ unsigned HOST_WIDE_INT i, nelts;
if (vec == NULL_TREE)
return NULL_TREE;
return NULL_TREE;
}
- else if (TREE_CODE (vec) == CONSTRUCTOR)
+ else if (TREE_CODE (vec) == CONSTRUCTOR
+ && TYPE_VECTOR_SUBPARTS (TREE_TYPE (vec)).is_constant (&nelts))
{
first = error_mark_node;
if (!operand_equal_p (first, t, 0))
return NULL_TREE;
}
- if (i != TYPE_VECTOR_SUBPARTS (TREE_TYPE (vec)))
+ if (i != nelts)
return NULL_TREE;
return first;
/* For integers, try mapping it to a same-sized scalar mode. */
if (is_int_mode (TREE_TYPE (t)->type_common.mode, &innermode))
{
- unsigned int size = (TYPE_VECTOR_SUBPARTS (t)
- * GET_MODE_BITSIZE (innermode));
+ poly_int64 size = (TYPE_VECTOR_SUBPARTS (t)
+ * GET_MODE_BITSIZE (innermode));
scalar_int_mode mode;
if (int_mode_for_size (size, 0).exists (&mode)
&& have_regs_of_mode[mode])
static tree
build_vector (tree type, vec<tree> vals MEM_STAT_DECL)
{
- gcc_assert (vals.length () == TYPE_VECTOR_SUBPARTS (type));
+ gcc_assert (known_eq (vals.length (), TYPE_VECTOR_SUBPARTS (type)));
tree_vector_builder builder (type, vals.length (), 1);
builder.splice (vals);
return builder.build ();
static void
check_vector_cst (vec<tree> expected, tree actual)
{
- ASSERT_EQ (expected.length (), TYPE_VECTOR_SUBPARTS (TREE_TYPE (actual)));
+ ASSERT_KNOWN_EQ (expected.length (),
+ TYPE_VECTOR_SUBPARTS (TREE_TYPE (actual)));
for (unsigned int i = 0; i < expected.length (); ++i)
ASSERT_EQ (wi::to_wide (expected[i]),
wi::to_wide (vector_cst_elt (actual, i)));
If set in a INTEGER_TYPE, indicates a character type. */
#define TYPE_STRING_FLAG(NODE) (TYPE_CHECK (NODE)->type_common.string_flag)
-/* For a VECTOR_TYPE, this is the number of sub-parts of the vector. */
-#define TYPE_VECTOR_SUBPARTS(VECTOR_TYPE) \
- (HOST_WIDE_INT_1U \
- << VECTOR_TYPE_CHECK (VECTOR_TYPE)->type_common.precision)
-
-/* Set precision to n when we have 2^n sub-parts of the vector. */
-#define SET_TYPE_VECTOR_SUBPARTS(VECTOR_TYPE, X) \
- (VECTOR_TYPE_CHECK (VECTOR_TYPE)->type_common.precision = exact_log2 (X))
-
/* Nonzero in a VECTOR_TYPE if the frontends should not emit warnings
about missing conversions to other vector types of the same size. */
#define TYPE_VECTOR_OPAQUE(NODE) \
return !strcmp (str, IDENTIFIER_POINTER (id));
}
+/* Return the number of elements in the VECTOR_TYPE given by NODE. */
+
+inline poly_uint64
+TYPE_VECTOR_SUBPARTS (const_tree node)
+{
+ STATIC_ASSERT (NUM_POLY_INT_COEFFS <= 2);
+ unsigned int precision = VECTOR_TYPE_CHECK (node)->type_common.precision;
+ if (NUM_POLY_INT_COEFFS == 2)
+ {
+ poly_uint64 res = 0;
+ res.coeffs[0] = 1 << (precision & 0xff);
+ if (precision & 0x100)
+ res.coeffs[1] = 1 << (precision & 0xff);
+ return res;
+ }
+ else
+ return 1 << precision;
+}
+
+/* Set the number of elements in VECTOR_TYPE NODE to SUBPARTS, which must
+ satisfy valid_vector_subparts_p. */
+
+inline void
+SET_TYPE_VECTOR_SUBPARTS (tree node, poly_uint64 subparts)
+{
+ STATIC_ASSERT (NUM_POLY_INT_COEFFS <= 2);
+ unsigned HOST_WIDE_INT coeff0 = subparts.coeffs[0];
+ int index = exact_log2 (coeff0);
+ gcc_assert (index >= 0);
+ if (NUM_POLY_INT_COEFFS == 2)
+ {
+ unsigned HOST_WIDE_INT coeff1 = subparts.coeffs[1];
+ gcc_assert (coeff1 == 0 || coeff1 == coeff0);
+ VECTOR_TYPE_CHECK (node)->type_common.precision
+ = index + (coeff1 != 0 ? 0x100 : 0);
+ }
+ else
+ VECTOR_TYPE_CHECK (node)->type_common.precision = index;
+}
+
+/* Return true if we can construct vector types with the given number
+ of subparts. */
+
+static inline bool
+valid_vector_subparts_p (poly_uint64 subparts)
+{
+ unsigned HOST_WIDE_INT coeff0 = subparts.coeffs[0];
+ if (!pow2p_hwi (coeff0))
+ return false;
+ if (NUM_POLY_INT_COEFFS == 2)
+ {
+ unsigned HOST_WIDE_INT coeff1 = subparts.coeffs[1];
+ if (coeff1 != 0 && coeff1 != coeff0)
+ return false;
+ }
+ return true;
+}
+
#define error_mark_node global_trees[TI_ERROR_MARK]
#define intQI_type_node global_trees[TI_INTQI_TYPE]
extern tree build_reference_type_for_mode (tree, machine_mode, bool);
extern tree build_reference_type (tree);
extern tree build_vector_type_for_mode (tree, machine_mode);
-extern tree build_vector_type (tree innertype, int nunits);
-/* Temporary. */
-inline tree
-build_vector_type (tree innertype, poly_uint64 nunits)
-{
- return build_vector_type (innertype, (int) nunits.to_constant ());
-}
+extern tree build_vector_type (tree, poly_int64);
extern tree build_truth_vector_type (poly_uint64, poly_uint64);
extern tree build_same_sized_truth_vector_type (tree vectype);
-extern tree build_opaque_vector_type (tree innertype, int nunits);
+extern tree build_opaque_vector_type (tree, poly_int64);
extern tree build_index_type (tree);
extern tree build_array_type (tree, tree, bool = false);
extern tree build_nonshared_array_type (tree, tree);
output_constant (VECTOR_CST_ELT (exp, 0), elt_size, align,
reverse);
thissize = elt_size;
- for (unsigned int i = 1; i < VECTOR_CST_NELTS (exp); i++)
+ /* Static constants must have a fixed size. */
+ unsigned int nunits = VECTOR_CST_NELTS (exp).to_constant ();
+ for (unsigned int i = 1; i < nunits; i++)
{
output_constant (VECTOR_CST_ELT (exp, i), elt_size, nalign,
reverse);