+2015-06-14 Kyrylo Tkachov <kyrylo.tkachov@arm.com>
+
+ * config/aarch64/aarch64.c (aarch64_mask_and_shift_for_ubfiz_p):
+ New function.
+ (aarch64_rtx_costs): Use it. Rewrite CONST_INT_P (op1) case to handle
+ mask+shift version.
+ * config/aarch64/aarch64-protos.h (aarch64_mask_and_shift_for_ubfiz_p):
+ New prototype.
+ * config/aarch64/aarch64.md (*andim_ashift<mode>_bfiz): Replace
+ matching condition with aarch64_mask_and_shift_for_ubfiz_p.
+
2016-06-14 Richard Biener <rguenther@suse.de>
PR tree-optimization/71522
bool aarch64_label_mentioned_p (rtx);
void aarch64_declare_function_name (FILE *, const char*, tree);
bool aarch64_legitimate_pic_operand_p (rtx);
+bool aarch64_mask_and_shift_for_ubfiz_p (machine_mode, rtx, rtx);
bool aarch64_modes_tieable_p (machine_mode mode1,
machine_mode mode2);
bool aarch64_zero_extend_const_eq (machine_mode, rtx, machine_mode, rtx);
return op;
}
+/* Return true if the mask and a shift amount from an RTX of the form
+ (x << SHFT_AMNT) & MASK are valid to combine into a UBFIZ instruction of
+ mode MODE. See the *andim_ashift<mode>_bfiz pattern. */
+
+bool
+aarch64_mask_and_shift_for_ubfiz_p (machine_mode mode, rtx mask, rtx shft_amnt)
+{
+ return CONST_INT_P (mask) && CONST_INT_P (shft_amnt)
+ && INTVAL (shft_amnt) < GET_MODE_BITSIZE (mode)
+ && exact_log2 ((INTVAL (mask) >> INTVAL (shft_amnt)) + 1) >= 0
+ && (INTVAL (mask) & ((1 << INTVAL (shft_amnt)) - 1)) == 0;
+}
+
/* Calculate the cost of calculating X, storing it in *COST. Result
is true if the total cost of the operation has now been calculated. */
static bool
if (GET_MODE_CLASS (mode) == MODE_INT)
{
- /* We possibly get the immediate for free, this is not
- modelled. */
- if (CONST_INT_P (op1)
- && aarch64_bitmask_imm (INTVAL (op1), mode))
+ if (CONST_INT_P (op1))
{
- *cost += rtx_cost (op0, mode, (enum rtx_code) code, 0, speed);
+ /* We have a mask + shift version of a UBFIZ
+ i.e. the *andim_ashift<mode>_bfiz pattern. */
+ if (GET_CODE (op0) == ASHIFT
+ && aarch64_mask_and_shift_for_ubfiz_p (mode, op1,
+ XEXP (op0, 1)))
+ {
+ *cost += rtx_cost (XEXP (op0, 0), mode,
+ (enum rtx_code) code, 0, speed);
+ if (speed)
+ *cost += extra_cost->alu.bfx;
- if (speed)
- *cost += extra_cost->alu.logical;
+ return true;
+ }
+ else if (aarch64_bitmask_imm (INTVAL (op1), mode))
+ {
+ /* We possibly get the immediate for free, this is not
+ modelled. */
+ *cost += rtx_cost (op0, mode, (enum rtx_code) code, 0, speed);
+ if (speed)
+ *cost += extra_cost->alu.logical;
- return true;
+ return true;
+ }
}
else
{
(and:GPI (ashift:GPI (match_operand:GPI 1 "register_operand" "r")
(match_operand 2 "const_int_operand" "n"))
(match_operand 3 "const_int_operand" "n")))]
- "(INTVAL (operands[2]) < (<GPI:sizen>))
- && exact_log2 ((INTVAL (operands[3]) >> INTVAL (operands[2])) + 1) >= 0
- && (INTVAL (operands[3]) & ((1 << INTVAL (operands[2])) - 1)) == 0"
+ "aarch64_mask_and_shift_for_ubfiz_p (<MODE>mode, operands[3], operands[2])"
"ubfiz\\t%<w>0, %<w>1, %2, %P3"
[(set_attr "type" "bfm")]
)