+2017-08-21 Richard Sandiford <richard.sandiford@linaro.org>
+
+ * tree.h (type_has_mode_precision_p): New function.
+ * convert.c (convert_to_integer_1): Use it.
+ * expr.c (expand_expr_real_2): Likewise.
+ (expand_expr_real_1): Likewise.
+ * fold-const.c (fold_single_bit_test_into_sign_test): Likewise.
+ * match.pd: Likewise.
+ * tree-ssa-forwprop.c (simplify_rotate): Likewise.
+ * tree-ssa-math-opts.c (convert_mult_to_fma): Likewise.
+ * tree-tailcall.c (process_assignment): Likewise.
+ * tree-vect-loop.c (vectorizable_reduction): Likewise.
+ * tree-vect-patterns.c (vect_recog_vector_vector_shift_pattern)
+ (vect_recog_mult_pattern, vect_recog_divmod_pattern): Likewise.
+ * tree-vect-stmts.c (vectorizable_conversion): Likewise.
+ (vectorizable_assignment): Likewise.
+ (vectorizable_shift): Likewise.
+ (vectorizable_operation): Likewise.
+ * tree-vrp.c (register_edge_assert_for_2): Likewise.
+
2017-08-21 Wilco Dijkstra <wdijkstr@arm.com>
* match.pd: Add pow (C, x) simplification.
the signed-to-unsigned case the high-order bits have to
be cleared. */
if (TYPE_UNSIGNED (type) != TYPE_UNSIGNED (TREE_TYPE (expr))
- && (TYPE_PRECISION (TREE_TYPE (expr))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (expr)))))
+ && !type_has_mode_precision_p (TREE_TYPE (expr)))
code = CONVERT_EXPR;
else
code = NOP_EXPR;
result to be reduced to the precision of the bit-field type,
which is narrower than that of the type's mode. */
reduce_bit_field = (INTEGRAL_TYPE_P (type)
- && GET_MODE_PRECISION (mode) > TYPE_PRECISION (type));
+ && !type_has_mode_precision_p (type));
if (reduce_bit_field && modifier == EXPAND_STACK_PARM)
target = 0;
case LROTATE_EXPR:
case RROTATE_EXPR:
gcc_assert (VECTOR_MODE_P (TYPE_MODE (type))
- || (GET_MODE_PRECISION (TYPE_MODE (type))
- == TYPE_PRECISION (type)));
+ || type_has_mode_precision_p (type));
/* fall through */
case LSHIFT_EXPR:
which is narrower than that of the type's mode. */
reduce_bit_field = (!ignore
&& INTEGRAL_TYPE_P (type)
- && GET_MODE_PRECISION (mode) > TYPE_PRECISION (type));
+ && !type_has_mode_precision_p (type));
/* If we are going to ignore this result, we need only do something
if there is a side-effect somewhere in the expression. If there
if (arg00 != NULL_TREE
/* This is only a win if casting to a signed type is cheap,
i.e. when arg00's type is not a partial mode. */
- && TYPE_PRECISION (TREE_TYPE (arg00))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (arg00))))
+ && type_has_mode_precision_p (TREE_TYPE (arg00)))
{
tree stype = signed_type_for (TREE_TYPE (arg00));
return fold_build2_loc (loc, code == EQ_EXPR ? GE_EXPR : LT_EXPR,
|| GET_MODE_CLASS (TYPE_MODE (type)) != MODE_INT
/* Or if the precision of TO is not the same as the precision
of its mode. */
- || TYPE_PRECISION (type) != GET_MODE_PRECISION (TYPE_MODE (type))))
+ || !type_has_mode_precision_p (type)))
(convert (bitop @0 (convert @1))))))
(for bitop (bit_and bit_ior)
if (shift == LSHIFT_EXPR)
zerobits = ((HOST_WIDE_INT_1U << shiftc) - 1);
else if (shift == RSHIFT_EXPR
- && (TYPE_PRECISION (shift_type)
- == GET_MODE_PRECISION (TYPE_MODE (shift_type))))
+ && type_has_mode_precision_p (shift_type))
{
prec = TYPE_PRECISION (TREE_TYPE (@3));
tree arg00 = @0;
&& TYPE_UNSIGNED (TREE_TYPE (@0)))
{
tree inner_type = TREE_TYPE (@0);
- if ((TYPE_PRECISION (inner_type)
- == GET_MODE_PRECISION (TYPE_MODE (inner_type)))
+ if (type_has_mode_precision_p (inner_type)
&& TYPE_PRECISION (inner_type) < prec)
{
prec = TYPE_PRECISION (inner_type);
(simplify
(cmp (bit_and (convert?@2 @0) integer_pow2p@1) integer_zerop)
(if (INTEGRAL_TYPE_P (TREE_TYPE (@0))
- && (TYPE_PRECISION (TREE_TYPE (@0))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
+ && type_has_mode_precision_p (TREE_TYPE (@0))
&& element_precision (@2) >= element_precision (@0)
&& wi::only_sign_bit_p (@1, element_precision (@0)))
(with { tree stype = signed_type_for (TREE_TYPE (@0)); }
/* The precision of the type of each operand must match the
precision of the mode of each operand, similarly for the
result. */
- && (TYPE_PRECISION (TREE_TYPE (@0))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
- && (TYPE_PRECISION (TREE_TYPE (@1))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
- && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
+ && type_has_mode_precision_p (TREE_TYPE (@0))
+ && type_has_mode_precision_p (TREE_TYPE (@1))
+ && type_has_mode_precision_p (type)
/* The inner conversion must be a widening conversion. */
&& TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
&& types_match (@0, type)
/* The precision of the type of each operand must match the
precision of the mode of each operand, similarly for the
result. */
- && (TYPE_PRECISION (TREE_TYPE (@0))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@0))))
- && (TYPE_PRECISION (TREE_TYPE (@1))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (@1))))
- && TYPE_PRECISION (type) == GET_MODE_PRECISION (TYPE_MODE (type))
+ && type_has_mode_precision_p (TREE_TYPE (@0))
+ && type_has_mode_precision_p (TREE_TYPE (@1))
+ && type_has_mode_precision_p (type)
/* The inner conversion must be a widening conversion. */
&& TYPE_PRECISION (TREE_TYPE (@2)) > TYPE_PRECISION (TREE_TYPE (@0))
&& types_match (@0, @1)
/* Only create rotates in complete modes. Other cases are not
expanded properly. */
if (!INTEGRAL_TYPE_P (rtype)
- || TYPE_PRECISION (rtype) != GET_MODE_PRECISION (TYPE_MODE (rtype)))
+ || !type_has_mode_precision_p (rtype))
return false;
for (i = 0; i < 2; i++)
&& INTEGRAL_TYPE_P (TREE_TYPE (cdef_arg1[i]))
&& TYPE_PRECISION (TREE_TYPE (cdef_arg1[i]))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (cdef_arg1[i]))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (cdef_arg1[i]))))
+ && type_has_mode_precision_p (TREE_TYPE (cdef_arg1[i])))
{
def_arg2_alt[i] = cdef_arg1[i];
defcodefor_name (def_arg2_alt[i], &cdef_code[i],
&& INTEGRAL_TYPE_P (TREE_TYPE (tem))
&& TYPE_PRECISION (TREE_TYPE (tem))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (tem))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (tem)))
+ && type_has_mode_precision_p (TREE_TYPE (tem))
&& (tem == def_arg2[1 - i]
|| tem == def_arg2_alt[1 - i]))
{
&& INTEGRAL_TYPE_P (TREE_TYPE (tem))
&& TYPE_PRECISION (TREE_TYPE (tem))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (tem))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (tem))))
+ && type_has_mode_precision_p (TREE_TYPE (tem)))
defcodefor_name (tem, &code, &tem, NULL);
if (code == NEGATE_EXPR)
&& INTEGRAL_TYPE_P (TREE_TYPE (tem))
&& TYPE_PRECISION (TREE_TYPE (tem))
> floor_log2 (TYPE_PRECISION (rtype))
- && TYPE_PRECISION (TREE_TYPE (tem))
- == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (tem)))
+ && type_has_mode_precision_p (TREE_TYPE (tem))
&& (tem == def_arg2[1 - i]
|| tem == def_arg2_alt[1 - i]))
{
/* We don't want to do bitfield reduction ops. */
if (INTEGRAL_TYPE_P (type)
- && (TYPE_PRECISION (type)
- != GET_MODE_PRECISION (TYPE_MODE (type))))
+ && !type_has_mode_precision_p (type))
return false;
/* If the target doesn't support it, don't generate it. We assume that
type is smaller than mode's precision,
reduce_to_bit_field_precision would generate additional code. */
if (INTEGRAL_TYPE_P (TREE_TYPE (dest))
- && (GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (dest)))
- > TYPE_PRECISION (TREE_TYPE (dest))))
+ && !type_has_mode_precision_p (TREE_TYPE (dest)))
return FAIL;
}
return false;
/* Do not try to vectorize bit-precision reductions. */
- if ((TYPE_PRECISION (scalar_type)
- != GET_MODE_PRECISION (TYPE_MODE (scalar_type))))
+ if (!type_has_mode_precision_p (scalar_type))
return false;
/* All uses but the last are expected to be defined in the loop.
if (TREE_CODE (oprnd0) != SSA_NAME
|| TREE_CODE (oprnd1) != SSA_NAME
|| TYPE_MODE (TREE_TYPE (oprnd0)) == TYPE_MODE (TREE_TYPE (oprnd1))
- || TYPE_PRECISION (TREE_TYPE (oprnd1))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (oprnd1)))
+ || !type_has_mode_precision_p (TREE_TYPE (oprnd1))
|| TYPE_PRECISION (TREE_TYPE (lhs))
!= TYPE_PRECISION (TREE_TYPE (oprnd0)))
return NULL;
if (TREE_CODE (oprnd0) != SSA_NAME
|| TREE_CODE (oprnd1) != INTEGER_CST
|| !INTEGRAL_TYPE_P (itype)
- || TYPE_PRECISION (itype) != GET_MODE_PRECISION (TYPE_MODE (itype)))
+ || !type_has_mode_precision_p (itype))
return NULL;
vectype = get_vectype_for_scalar_type (itype);
if (TREE_CODE (oprnd0) != SSA_NAME
|| TREE_CODE (oprnd1) != INTEGER_CST
|| TREE_CODE (itype) != INTEGER_TYPE
- || TYPE_PRECISION (itype) != GET_MODE_PRECISION (TYPE_MODE (itype)))
+ || !type_has_mode_precision_p (itype))
return NULL;
vectype = get_vectype_for_scalar_type (itype);
if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
&& ((INTEGRAL_TYPE_P (lhs_type)
- && (TYPE_PRECISION (lhs_type)
- != GET_MODE_PRECISION (TYPE_MODE (lhs_type))))
+ && !type_has_mode_precision_p (lhs_type))
|| (INTEGRAL_TYPE_P (rhs_type)
- && (TYPE_PRECISION (rhs_type)
- != GET_MODE_PRECISION (TYPE_MODE (rhs_type))))))
+ && !type_has_mode_precision_p (rhs_type))))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
if ((CONVERT_EXPR_CODE_P (code)
|| code == VIEW_CONVERT_EXPR)
&& INTEGRAL_TYPE_P (TREE_TYPE (scalar_dest))
- && ((TYPE_PRECISION (TREE_TYPE (scalar_dest))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
- || ((TYPE_PRECISION (TREE_TYPE (op))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (op))))))
+ && (!type_has_mode_precision_p (TREE_TYPE (scalar_dest))
+ || !type_has_mode_precision_p (TREE_TYPE (op)))
/* But a conversion that does not change the bit-pattern is ok. */
&& !((TYPE_PRECISION (TREE_TYPE (scalar_dest))
> TYPE_PRECISION (TREE_TYPE (op)))
scalar_dest = gimple_assign_lhs (stmt);
vectype_out = STMT_VINFO_VECTYPE (stmt_info);
- if (TYPE_PRECISION (TREE_TYPE (scalar_dest))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
+ if (!type_has_mode_precision_p (TREE_TYPE (scalar_dest)))
{
if (dump_enabled_p ())
dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
/* Most operations cannot handle bit-precision types without extra
truncations. */
if (!VECTOR_BOOLEAN_TYPE_P (vectype_out)
- && (TYPE_PRECISION (TREE_TYPE (scalar_dest))
- != GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (scalar_dest))))
+ && !type_has_mode_precision_p (TREE_TYPE (scalar_dest))
/* Exception are bitwise binary operations. */
&& code != BIT_IOR_EXPR
&& code != BIT_XOR_EXPR
&& tree_fits_uhwi_p (cst2)
&& INTEGRAL_TYPE_P (TREE_TYPE (name2))
&& IN_RANGE (tree_to_uhwi (cst2), 1, prec - 1)
- && prec == GET_MODE_PRECISION (TYPE_MODE (TREE_TYPE (val))))
+ && type_has_mode_precision_p (TREE_TYPE (val)))
{
mask = wi::mask (tree_to_uhwi (cst2), false, prec);
val2 = fold_binary (LSHIFT_EXPR, TREE_TYPE (val), val, cst2);
const char *str;
};
extern const builtin_structptr_type builtin_structptr_types[6];
+
+/* Return true if type T has the same precision as its underlying mode. */
+
+inline bool
+type_has_mode_precision_p (const_tree t)
+{
+ return TYPE_PRECISION (t) == GET_MODE_PRECISION (TYPE_MODE (t));
+}
+
#endif /* GCC_TREE_H */