+2019-11-14 Richard Sandiford <richard.sandiford@arm.com>
+
+ * tree.h (build_truth_vector_type_for_mode): Declare.
+ * tree.c (build_truth_vector_type_for_mode): New function,
+ split out from...
+ (build_truth_vector_type): ...here.
+ (build_opaque_vector_type): Fix head comment.
+ * tree-vectorizer.h (supportable_narrowing_operation): Remove
+ vec_info parameter.
+ (vect_halve_mask_nunits): Replace vec_info parameter with the
+ mode of the new vector.
+ (vect_double_mask_nunits): Likewise.
+ * tree-vect-loop.c (vect_halve_mask_nunits): Likewise.
+ (vect_double_mask_nunits): Likewise.
+ * tree-vect-loop-manip.c: Include insn-config.h, rtl.h and recog.h.
+ (vect_maybe_permute_loop_masks): Remove vinfo parameter. Update call
+ to vect_halve_mask_nunits, getting the required mode from the unpack
+ patterns.
+ (vect_set_loop_condition_masked): Update call accordingly.
+ * tree-vect-stmts.c (supportable_narrowing_operation): Remove vec_info
+ parameter and update call to vect_double_mask_nunits.
+ (vectorizable_conversion): Update call accordingly.
+ (simple_integer_narrowing): Likewise. Remove vec_info parameter.
+ (vectorizable_call): Update call accordingly.
+ (supportable_widening_operation): Update call to
+ vect_halve_mask_nunits.
+ * config/aarch64/aarch64-sve-builtins.cc (register_builtin_types):
+ Use build_truth_vector_type_mode instead of build_truth_vector_type.
+
2019-11-14 Richard Sandiford <richard.sandiford@arm.com>
* machmode.h (mode_for_int_vector): Delete.
tree vectype;
if (eltype == boolean_type_node)
{
- vectype = build_truth_vector_type (BYTES_PER_SVE_VECTOR,
- BYTES_PER_SVE_VECTOR);
+ vectype = build_truth_vector_type_for_mode (BYTES_PER_SVE_VECTOR,
+ VNx16BImode);
gcc_assert (TYPE_MODE (vectype) == VNx16BImode
&& TYPE_MODE (vectype) == TYPE_MODE_RAW (vectype)
&& TYPE_ALIGN (vectype) == 16
#include "stor-layout.h"
#include "optabs-query.h"
#include "vec-perm-indices.h"
+#include "insn-config.h"
+#include "rtl.h"
+#include "recog.h"
/*************************************************************************
Simple Loop Peeling Utilities
latter. Return true on success, adding any new statements to SEQ. */
static bool
-vect_maybe_permute_loop_masks (loop_vec_info loop_vinfo, gimple_seq *seq,
- rgroup_masks *dest_rgm,
+vect_maybe_permute_loop_masks (gimple_seq *seq, rgroup_masks *dest_rgm,
rgroup_masks *src_rgm)
{
tree src_masktype = src_rgm->mask_type;
tree dest_masktype = dest_rgm->mask_type;
machine_mode src_mode = TYPE_MODE (src_masktype);
+ insn_code icode1, icode2;
if (dest_rgm->max_nscalars_per_iter <= src_rgm->max_nscalars_per_iter
- && optab_handler (vec_unpacku_hi_optab, src_mode) != CODE_FOR_nothing
- && optab_handler (vec_unpacku_lo_optab, src_mode) != CODE_FOR_nothing)
+ && (icode1 = optab_handler (vec_unpacku_hi_optab,
+ src_mode)) != CODE_FOR_nothing
+ && (icode2 = optab_handler (vec_unpacku_lo_optab,
+ src_mode)) != CODE_FOR_nothing)
{
/* Unpacking the source masks gives at least as many mask bits as
we need. We can then VIEW_CONVERT any excess bits away. */
- tree unpack_masktype = vect_halve_mask_nunits (loop_vinfo, src_masktype);
+ machine_mode dest_mode = insn_data[icode1].operand[0].mode;
+ gcc_assert (dest_mode == insn_data[icode2].operand[0].mode);
+ tree unpack_masktype = vect_halve_mask_nunits (src_masktype, dest_mode);
for (unsigned int i = 0; i < dest_rgm->masks.length (); ++i)
{
tree src = src_rgm->masks[i / 2];
{
rgroup_masks *half_rgm = &(*masks)[nmasks / 2 - 1];
if (!half_rgm->masks.is_empty ()
- && vect_maybe_permute_loop_masks (loop_vinfo, &header_seq,
- rgm, half_rgm))
+ && vect_maybe_permute_loop_masks (&header_seq, rgm, half_rgm))
continue;
}
return false;
}
-/* Return a mask type with half the number of elements as TYPE. */
+/* Return a mask type with half the number of elements as OLD_TYPE,
+ given that it should have mode NEW_MODE. */
tree
-vect_halve_mask_nunits (vec_info *vinfo, tree type)
+vect_halve_mask_nunits (tree old_type, machine_mode new_mode)
{
- poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (type), 2);
- return build_truth_vector_type (nunits, vinfo->vector_size);
+ poly_uint64 nunits = exact_div (TYPE_VECTOR_SUBPARTS (old_type), 2);
+ return build_truth_vector_type_for_mode (nunits, new_mode);
}
-/* Return a mask type with twice as many elements as TYPE. */
+/* Return a mask type with twice as many elements as OLD_TYPE,
+ given that it should have mode NEW_MODE. */
tree
-vect_double_mask_nunits (vec_info *vinfo, tree type)
+vect_double_mask_nunits (tree old_type, machine_mode new_mode)
{
- poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (type) * 2;
- return build_truth_vector_type (nunits, vinfo->vector_size);
+ poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (old_type) * 2;
+ return build_truth_vector_type_for_mode (nunits, new_mode);
}
/* Record that a fully-masked version of LOOP_VINFO would need MASKS to
*CONVERT_CODE. */
static bool
-simple_integer_narrowing (vec_info *vinfo, tree vectype_out, tree vectype_in,
+simple_integer_narrowing (tree vectype_out, tree vectype_in,
tree_code *convert_code)
{
if (!INTEGRAL_TYPE_P (TREE_TYPE (vectype_out))
tree_code code;
int multi_step_cvt = 0;
auto_vec <tree, 8> interm_types;
- if (!supportable_narrowing_operation (vinfo, NOP_EXPR, vectype_out,
- vectype_in, &code, &multi_step_cvt,
- &interm_types)
+ if (!supportable_narrowing_operation (NOP_EXPR, vectype_out, vectype_in,
+ &code, &multi_step_cvt, &interm_types)
|| multi_step_cvt)
return false;
if (cfn != CFN_LAST
&& (modifier == NONE
|| (modifier == NARROW
- && simple_integer_narrowing (vinfo, vectype_out, vectype_in,
+ && simple_integer_narrowing (vectype_out, vectype_in,
&convert_code))))
ifn = vectorizable_internal_function (cfn, callee, vectype_out,
vectype_in);
case NARROW:
gcc_assert (op_type == unary_op);
- if (supportable_narrowing_operation (vinfo, code, vectype_out,
- vectype_in, &code1, &multi_step_cvt,
+ if (supportable_narrowing_operation (code, vectype_out, vectype_in,
+ &code1, &multi_step_cvt,
&interm_types))
break;
if (!supportable_convert_operation (code, cvt_type, vectype_in,
&decl1, &codecvt1))
goto unsupported;
- if (supportable_narrowing_operation (vinfo, NOP_EXPR, vectype_out,
- cvt_type, &code1, &multi_step_cvt,
+ if (supportable_narrowing_operation (NOP_EXPR, vectype_out, cvt_type,
+ &code1, &multi_step_cvt,
&interm_types))
break;
goto unsupported;
int *multi_step_cvt,
vec<tree> *interm_types)
{
- vec_info *vinfo = stmt_info->vinfo;
loop_vec_info loop_info = STMT_VINFO_LOOP_VINFO (stmt_info);
class loop *vect_loop = NULL;
machine_mode vec_mode;
{
intermediate_mode = insn_data[icode1].operand[0].mode;
if (VECTOR_BOOLEAN_TYPE_P (prev_type))
- {
- intermediate_type = vect_halve_mask_nunits (vinfo, prev_type);
- if (intermediate_mode != TYPE_MODE (intermediate_type))
- return false;
- }
+ intermediate_type
+ = vect_halve_mask_nunits (prev_type, intermediate_mode);
else
intermediate_type
= lang_hooks.types.type_for_mode (intermediate_mode,
narrowing operation (short in the above example). */
bool
-supportable_narrowing_operation (vec_info *vinfo, enum tree_code code,
+supportable_narrowing_operation (enum tree_code code,
tree vectype_out, tree vectype_in,
enum tree_code *code1, int *multi_step_cvt,
vec<tree> *interm_types)
{
intermediate_mode = insn_data[icode1].operand[0].mode;
if (VECTOR_BOOLEAN_TYPE_P (prev_type))
- {
- intermediate_type = vect_double_mask_nunits (vinfo, prev_type);
- if (intermediate_mode != TYPE_MODE (intermediate_type))
- return false;
- }
+ intermediate_type
+ = vect_double_mask_nunits (prev_type, intermediate_mode);
else
intermediate_type
= lang_hooks.types.type_for_mode (intermediate_mode, uns);
tree, tree, enum tree_code *,
enum tree_code *, int *,
vec<tree> *);
-extern bool supportable_narrowing_operation (vec_info *, enum tree_code, tree,
- tree, enum tree_code *,
- int *, vec<tree> *);
+extern bool supportable_narrowing_operation (enum tree_code, tree, tree,
+ enum tree_code *, int *,
+ vec<tree> *);
extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
enum vect_cost_for_stmt, stmt_vec_info,
int, enum vect_cost_model_location);
extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *,
tree *, bool);
-extern tree vect_halve_mask_nunits (vec_info *, tree);
-extern tree vect_double_mask_nunits (vec_info *, tree);
+extern tree vect_halve_mask_nunits (tree, machine_mode);
+extern tree vect_double_mask_nunits (tree, machine_mode);
extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *,
unsigned int, tree, tree);
extern tree vect_get_loop_mask (gimple_stmt_iterator *, vec_loop_masks *,
return make_vector_type (innertype, nunits, VOIDmode);
}
+/* Build a truth vector with NUNITS units, giving it mode MASK_MODE. */
+
+tree
+build_truth_vector_type_for_mode (poly_uint64 nunits, machine_mode mask_mode)
+{
+ gcc_assert (mask_mode != BLKmode);
+
+ poly_uint64 vsize = GET_MODE_BITSIZE (mask_mode);
+ unsigned HOST_WIDE_INT esize = vector_element_size (vsize, nunits);
+ tree bool_type = build_nonstandard_boolean_type (esize);
+
+ return make_vector_type (bool_type, nunits, mask_mode);
+}
+
/* Build truth vector with specified length and number of units. */
tree
build_truth_vector_type (poly_uint64 nunits, poly_uint64 vector_size)
{
- machine_mode mask_mode
- = targetm.vectorize.get_mask_mode (nunits, vector_size).else_blk ();
-
- poly_uint64 vsize;
- if (mask_mode == BLKmode)
- vsize = vector_size * BITS_PER_UNIT;
- else
- vsize = GET_MODE_BITSIZE (mask_mode);
+ machine_mode mask_mode;
+ if (targetm.vectorize.get_mask_mode (nunits,
+ vector_size).exists (&mask_mode))
+ return build_truth_vector_type_for_mode (nunits, mask_mode);
+ poly_uint64 vsize = vector_size * BITS_PER_UNIT;
unsigned HOST_WIDE_INT esize = vector_element_size (vsize, nunits);
-
tree bool_type = build_nonstandard_boolean_type (esize);
- return make_vector_type (bool_type, nunits, mask_mode);
+ return make_vector_type (bool_type, nunits, BLKmode);
}
/* Returns a vector type corresponding to a comparison of VECTYPE. */
return build_truth_vector_type (TYPE_VECTOR_SUBPARTS (vectype), size);
}
-/* Similarly, but builds a variant type with TYPE_VECTOR_OPAQUE set. */
+/* Like build_vector_type, but builds a variant type with TYPE_VECTOR_OPAQUE
+ set. */
tree
build_opaque_vector_type (tree innertype, poly_int64 nunits)
extern tree build_reference_type (tree);
extern tree build_vector_type_for_mode (tree, machine_mode);
extern tree build_vector_type (tree, poly_int64);
+extern tree build_truth_vector_type_for_mode (poly_uint64, machine_mode);
extern tree build_truth_vector_type (poly_uint64, poly_uint64);
extern tree build_same_sized_truth_vector_type (tree vectype);
extern tree build_opaque_vector_type (tree, poly_int64);