/* Expand the basic unary and binary arithmetic operations, for GNU compiler.
- Copyright (C) 1987-2015 Free Software Foundation, Inc.
+ Copyright (C) 1987-2016 Free Software Foundation, Inc.
This file is part of GCC.
/* The mode of the result is different then the mode of the
arguments. */
tmp_mode = insn_data[(int) icode].operand[0].mode;
- if (GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
+ if (VECTOR_MODE_P (mode)
+ && GET_MODE_NUNITS (tmp_mode) != 2 * GET_MODE_NUNITS (mode))
{
delete_insns_since (last);
return NULL_RTX;
return 0;
}
+/* Try calculating popcount of a double-word quantity as two popcount's of
+ word-sized quantities and summing up the results. */
+static rtx
+expand_doubleword_popcount (machine_mode mode, rtx op0, rtx target)
+{
+ rtx t0, t1, t;
+ rtx_insn *seq;
+
+ start_sequence ();
+
+ t0 = expand_unop_direct (word_mode, popcount_optab,
+ operand_subword_force (op0, 0, mode), NULL_RTX,
+ true);
+ t1 = expand_unop_direct (word_mode, popcount_optab,
+ operand_subword_force (op0, 1, mode), NULL_RTX,
+ true);
+ if (!t0 || !t1)
+ {
+ end_sequence ();
+ return NULL_RTX;
+ }
+
+ /* If we were not given a target, use a word_mode register, not a
+ 'mode' register. The result will fit, and nobody is expecting
+ anything bigger (the return type of __builtin_popcount* is int). */
+ if (!target)
+ target = gen_reg_rtx (word_mode);
+
+ t = expand_binop (word_mode, add_optab, t0, t1, target, 0, OPTAB_DIRECT);
+
+ seq = get_insns ();
+ end_sequence ();
+
+ add_equal_note (seq, t, POPCOUNT, op0, 0);
+ emit_insn (seq);
+ return t;
+}
+
+/* Try calculating
+ (parity:wide x)
+ as
+ (parity:narrow (low (x) ^ high (x))) */
+static rtx
+expand_doubleword_parity (machine_mode mode, rtx op0, rtx target)
+{
+ rtx t = expand_binop (word_mode, xor_optab,
+ operand_subword_force (op0, 0, mode),
+ operand_subword_force (op0, 1, mode),
+ NULL_RTX, 0, OPTAB_DIRECT);
+ return expand_unop (word_mode, parity_optab, t, target, true);
+}
+
/* Try calculating
(bswap:narrow x)
as
different mode or with a libcall. */
static rtx
expand_unop_direct (machine_mode mode, optab unoptab, rtx op0, rtx target,
- int unsignedp)
+ int unsignedp)
{
if (optab_handler (unoptab, mode) != CODE_FOR_nothing)
{
goto try_libcall;
}
+ if (unoptab == popcount_optab
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && optab_handler (unoptab, word_mode) != CODE_FOR_nothing
+ && optimize_insn_for_speed_p ())
+ {
+ temp = expand_doubleword_popcount (mode, op0, target);
+ if (temp)
+ return temp;
+ }
+
+ if (unoptab == parity_optab
+ && GET_MODE_SIZE (mode) == 2 * UNITS_PER_WORD
+ && (optab_handler (unoptab, word_mode) != CODE_FOR_nothing
+ || optab_handler (popcount_optab, word_mode) != CODE_FOR_nothing)
+ && optimize_insn_for_speed_p ())
+ {
+ temp = expand_doubleword_parity (mode, op0, target);
+ if (temp)
+ return temp;
+ }
+
/* Widening (or narrowing) bswap needs special treatment. */
if (unoptab == bswap_optab)
{
return NULL_RTX;
}
+
+/* Emit a conditional negate or bitwise complement using the
+ negcc or notcc optabs if available. Return NULL_RTX if such operations
+ are not available. Otherwise return the RTX holding the result.
+ TARGET is the desired destination of the result. COMP is the comparison
+ on which to negate. If COND is true move into TARGET the negation
+ or bitwise complement of OP1. Otherwise move OP2 into TARGET.
+ CODE is either NEG or NOT. MODE is the machine mode in which the
+ operation is performed. */
+
+rtx
+emit_conditional_neg_or_complement (rtx target, rtx_code code,
+ machine_mode mode, rtx cond, rtx op1,
+ rtx op2)
+{
+ optab op = unknown_optab;
+ if (code == NEG)
+ op = negcc_optab;
+ else if (code == NOT)
+ op = notcc_optab;
+ else
+ gcc_unreachable ();
+
+ insn_code icode = direct_optab_handler (op, mode);
+
+ if (icode == CODE_FOR_nothing)
+ return NULL_RTX;
+
+ if (!target)
+ target = gen_reg_rtx (mode);
+
+ rtx_insn *last = get_last_insn ();
+ struct expand_operand ops[4];
+
+ create_output_operand (&ops[0], target, mode);
+ create_fixed_operand (&ops[1], cond);
+ create_input_operand (&ops[2], op1, mode);
+ create_input_operand (&ops[3], op2, mode);
+
+ if (maybe_expand_insn (icode, 4, ops))
+ {
+ if (ops[0].value != target)
+ convert_move (target, ops[0].value, false);
+
+ return target;
+ }
+ delete_insns_since (last);
+ return NULL_RTX;
+}
+
/* Emit a conditional addition instruction if the machine supports one for that
condition and machine mode.
}
}
+
+/* Promote integer arguments for a libcall if necessary.
+ emit_library_call_value cannot do the promotion because it does not
+ know if it should do a signed or unsigned promotion. This is because
+ there are no tree types defined for libcalls. */
+
+static rtx
+prepare_libcall_arg (rtx arg, int uintp)
+{
+ machine_mode mode = GET_MODE (arg);
+ machine_mode arg_mode;
+ if (SCALAR_INT_MODE_P (mode))
+ {
+ /* If we need to promote the integer function argument we need to do
+ it here instead of inside emit_library_call_value because in
+ emit_library_call_value we don't know if we should do a signed or
+ unsigned promotion. */
+
+ int unsigned_p = 0;
+ arg_mode = promote_function_mode (NULL_TREE, mode,
+ &unsigned_p, NULL_TREE, 0);
+ if (arg_mode != mode)
+ return convert_to_mode (arg_mode, arg, uintp);
+ }
+ return arg;
+}
+
/* Generate code to convert FROM or TO a fixed-point.
If UINTP is true, either TO or FROM is an unsigned integer.
If SATP is true, we need to saturate the result. */
libfunc = convert_optab_libfunc (tab, to_mode, from_mode);
gcc_assert (libfunc);
+ from = prepare_libcall_arg (from, uintp);
+ from_mode = GET_MODE (from);
+
start_sequence ();
value = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST, to_mode,
1, from, from_mode);
}
/* Return comparison rtx for COND. Use UNSIGNEDP to select signed or
- unsigned operators. Do not generate compare instruction. */
+ unsigned operators. OPNO holds an index of the first comparison
+ operand in insn with code ICODE. Do not generate compare instruction. */
static rtx
vector_compare_rtx (enum tree_code tcode, tree t_op0, tree t_op1,
- bool unsignedp, enum insn_code icode)
+ bool unsignedp, enum insn_code icode,
+ unsigned int opno)
{
struct expand_operand ops[2];
rtx rtx_op0, rtx_op1;
create_input_operand (&ops[0], rtx_op0, m0);
create_input_operand (&ops[1], rtx_op1, m1);
- if (!maybe_legitimize_operands (icode, 4, 2, ops))
+ if (!maybe_legitimize_operands (icode, opno, 2, ops))
gcc_unreachable ();
return gen_rtx_fmt_ee (rcode, VOIDmode, ops[0].value, ops[1].value);
}
return NULL_RTX;
first = INTVAL (CONST_VECTOR_ELT (sel, 0));
- if (first >= 2*nelt)
+ if (first >= nelt)
return NULL_RTX;
for (i = 1; i < nelt; i++)
{
int idx = INTVAL (CONST_VECTOR_ELT (sel, i));
- unsigned int expected = (i + first) & (2 * nelt - 1);
+ unsigned int expected = i + first;
/* Indices into the second vector are all equivalent. */
if (idx < 0 || (MIN (nelt, (unsigned) idx) != MIN (nelt, expected)))
return NULL_RTX;
else
{
create_input_operand (&ops[1], v0, tmode);
- /* See if this can be handled with a vec_shr. We only do this if the
- second vector is all zeroes. */
- enum insn_code shift_code = optab_handler (vec_shr_optab, GET_MODE (v0));
- if (v1 == CONST0_RTX (GET_MODE (v1)) && shift_code)
- if (rtx shift_amt = shift_amt_for_vec_perm_mask (sel))
- {
- create_convert_operand_from_type (&ops[2], shift_amt,
- sizetype_tab[(int) stk_sizetype]);
- if (maybe_expand_insn (shift_code, 3, ops))
- return ops[0].value;
- }
create_input_operand (&ops[2], v1, tmode);
}
gcc_assert (GET_MODE_CLASS (GET_MODE (sel)) == MODE_VECTOR_INT);
if (GET_CODE (sel) == CONST_VECTOR)
{
+ /* See if this can be handled with a vec_shr. We only do this if the
+ second vector is all zeroes. */
+ enum insn_code shift_code = optab_handler (vec_shr_optab, mode);
+ enum insn_code shift_code_qi = ((qimode != VOIDmode && qimode != mode)
+ ? optab_handler (vec_shr_optab, qimode)
+ : CODE_FOR_nothing);
+ rtx shift_amt = NULL_RTX;
+ if (v1 == CONST0_RTX (GET_MODE (v1))
+ && (shift_code != CODE_FOR_nothing
+ || shift_code_qi != CODE_FOR_nothing))
+ {
+ shift_amt = shift_amt_for_vec_perm_mask (sel);
+ if (shift_amt)
+ {
+ struct expand_operand ops[3];
+ if (shift_code != CODE_FOR_nothing)
+ {
+ create_output_operand (&ops[0], target, mode);
+ create_input_operand (&ops[1], v0, mode);
+ create_convert_operand_from_type (&ops[2], shift_amt,
+ sizetype);
+ if (maybe_expand_insn (shift_code, 3, ops))
+ return ops[0].value;
+ }
+ if (shift_code_qi != CODE_FOR_nothing)
+ {
+ tmp = gen_reg_rtx (qimode);
+ create_output_operand (&ops[0], tmp, qimode);
+ create_input_operand (&ops[1], gen_lowpart (qimode, v0),
+ qimode);
+ create_convert_operand_from_type (&ops[2], shift_amt,
+ sizetype);
+ if (maybe_expand_insn (shift_code_qi, 3, ops))
+ return gen_lowpart (mode, ops[0].value);
+ }
+ }
+ }
+
icode = direct_optab_handler (vec_perm_const_optab, mode);
if (icode != CODE_FOR_nothing)
{
return tmp;
}
+/* Generate insns for a VEC_COND_EXPR with mask, given its TYPE and its
+ three operands. */
+
+rtx
+expand_vec_cond_mask_expr (tree vec_cond_type, tree op0, tree op1, tree op2,
+ rtx target)
+{
+ struct expand_operand ops[4];
+ machine_mode mode = TYPE_MODE (vec_cond_type);
+ machine_mode mask_mode = TYPE_MODE (TREE_TYPE (op0));
+ enum insn_code icode = get_vcond_mask_icode (mode, mask_mode);
+ rtx mask, rtx_op1, rtx_op2;
+
+ if (icode == CODE_FOR_nothing)
+ return 0;
+
+ mask = expand_normal (op0);
+ rtx_op1 = expand_normal (op1);
+ rtx_op2 = expand_normal (op2);
+
+ mask = force_reg (mask_mode, mask);
+ rtx_op1 = force_reg (GET_MODE (rtx_op1), rtx_op1);
+
+ create_output_operand (&ops[0], target, mode);
+ create_input_operand (&ops[1], rtx_op1, mode);
+ create_input_operand (&ops[2], rtx_op2, mode);
+ create_input_operand (&ops[3], mask, mask_mode);
+ expand_insn (icode, 4, ops);
+
+ return ops[0].value;
+}
+
/* Generate insns for a VEC_COND_EXPR, given its TYPE and its
three operands. */
}
else
{
- /* Fake op0 < 0. */
gcc_assert (VECTOR_BOOLEAN_TYPE_P (TREE_TYPE (op0)));
- op0a = op0;
- op0b = build_zero_cst (TREE_TYPE (op0));
- tcode = LT_EXPR;
+ if (get_vcond_mask_icode (mode, TYPE_MODE (TREE_TYPE (op0)))
+ != CODE_FOR_nothing)
+ return expand_vec_cond_mask_expr (vec_cond_type, op0, op1,
+ op2, target);
+ /* Fake op0 < 0. */
+ else
+ {
+ gcc_assert (GET_MODE_CLASS (TYPE_MODE (TREE_TYPE (op0)))
+ == MODE_VECTOR_INT);
+ op0a = op0;
+ op0b = build_zero_cst (TREE_TYPE (op0));
+ tcode = LT_EXPR;
+ }
}
cmp_op_mode = TYPE_MODE (TREE_TYPE (op0a));
unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
if (icode == CODE_FOR_nothing)
return 0;
- comparison = vector_compare_rtx (tcode, op0a, op0b, unsignedp, icode);
+ comparison = vector_compare_rtx (tcode, op0a, op0b, unsignedp, icode, 4);
rtx_op1 = expand_normal (op1);
rtx_op2 = expand_normal (op2);
return ops[0].value;
}
+/* Generate insns for a vector comparison into a mask. */
+
+rtx
+expand_vec_cmp_expr (tree type, tree exp, rtx target)
+{
+ struct expand_operand ops[4];
+ enum insn_code icode;
+ rtx comparison;
+ machine_mode mask_mode = TYPE_MODE (type);
+ machine_mode vmode;
+ bool unsignedp;
+ tree op0a, op0b;
+ enum tree_code tcode;
+
+ op0a = TREE_OPERAND (exp, 0);
+ op0b = TREE_OPERAND (exp, 1);
+ tcode = TREE_CODE (exp);
+
+ unsignedp = TYPE_UNSIGNED (TREE_TYPE (op0a));
+ vmode = TYPE_MODE (TREE_TYPE (op0a));
+
+ icode = get_vec_cmp_icode (vmode, mask_mode, unsignedp);
+ if (icode == CODE_FOR_nothing)
+ return 0;
+
+ comparison = vector_compare_rtx (tcode, op0a, op0b, unsignedp, icode, 2);
+ create_output_operand (&ops[0], target, mask_mode);
+ create_fixed_operand (&ops[1], comparison);
+ create_fixed_operand (&ops[2], XEXP (comparison, 0));
+ create_fixed_operand (&ops[3], XEXP (comparison, 1));
+ expand_insn (icode, 4, ops);
+ return ops[0].value;
+}
+
/* Expand a highpart multiply. */
rtx