machine_mode
aarch64_select_cc_mode (RTX_CODE code, rtx x, rtx y)
{
+ machine_mode mode_x = GET_MODE (x);
+ rtx_code code_x = GET_CODE (x);
+
/* All floating point compares return CCFP if it is an equality
comparison, and CCFPE otherwise. */
- if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
+ if (GET_MODE_CLASS (mode_x) == MODE_FLOAT)
{
switch (code)
{
using the TST instruction with the appropriate bitmask. */
if (y == const0_rtx && REG_P (x)
&& (code == EQ || code == NE)
- && (GET_MODE (x) == HImode || GET_MODE (x) == QImode))
+ && (mode_x == HImode || mode_x == QImode))
return CC_NZmode;
/* Similarly, comparisons of zero_extends from shorter modes can
be performed using an ANDS with an immediate mask. */
- if (y == const0_rtx && GET_CODE (x) == ZERO_EXTEND
- && (GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+ if (y == const0_rtx && code_x == ZERO_EXTEND
+ && (mode_x == SImode || mode_x == DImode)
&& (GET_MODE (XEXP (x, 0)) == HImode || GET_MODE (XEXP (x, 0)) == QImode)
&& (code == EQ || code == NE))
return CC_NZmode;
- if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+ if ((mode_x == SImode || mode_x == DImode)
&& y == const0_rtx
&& (code == EQ || code == NE || code == LT || code == GE)
- && (GET_CODE (x) == PLUS || GET_CODE (x) == MINUS || GET_CODE (x) == AND
- || GET_CODE (x) == NEG
- || (GET_CODE (x) == ZERO_EXTRACT && CONST_INT_P (XEXP (x, 1))
+ && (code_x == PLUS || code_x == MINUS || code_x == AND
+ || code_x == NEG
+ || (code_x == ZERO_EXTRACT && CONST_INT_P (XEXP (x, 1))
&& CONST_INT_P (XEXP (x, 2)))))
return CC_NZmode;
/* A compare with a shifted operand. Because of canonicalization,
the comparison will have to be swapped when we emit the assembly
code. */
- if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+ if ((mode_x == SImode || mode_x == DImode)
&& (REG_P (y) || GET_CODE (y) == SUBREG || y == const0_rtx)
- && (GET_CODE (x) == ASHIFT || GET_CODE (x) == ASHIFTRT
- || GET_CODE (x) == LSHIFTRT
- || GET_CODE (x) == ZERO_EXTEND || GET_CODE (x) == SIGN_EXTEND))
+ && (code_x == ASHIFT || code_x == ASHIFTRT
+ || code_x == LSHIFTRT
+ || code_x == ZERO_EXTEND || code_x == SIGN_EXTEND))
return CC_SWPmode;
/* Similarly for a negated operand, but we can only do this for
equalities. */
- if ((GET_MODE (x) == SImode || GET_MODE (x) == DImode)
+ if ((mode_x == SImode || mode_x == DImode)
&& (REG_P (y) || GET_CODE (y) == SUBREG)
&& (code == EQ || code == NE)
- && GET_CODE (x) == NEG)
+ && code_x == NEG)
return CC_Zmode;
- /* A test for unsigned overflow. */
- if ((GET_MODE (x) == DImode || GET_MODE (x) == TImode)
- && code == NE
- && GET_CODE (x) == PLUS
- && GET_CODE (y) == ZERO_EXTEND)
+ /* A test for unsigned overflow from an addition. */
+ if ((mode_x == DImode || mode_x == TImode)
+ && (code == LTU || code == GEU)
+ && code_x == PLUS
+ && rtx_equal_p (XEXP (x, 0), y))
return CC_Cmode;
+ /* A test for unsigned overflow from an add with carry. */
+ if ((mode_x == DImode || mode_x == TImode)
+ && (code == LTU || code == GEU)
+ && code_x == PLUS
+ && CONST_SCALAR_INT_P (y)
+ && (rtx_mode_t (y, mode_x)
+ == (wi::shwi (1, mode_x)
+ << (GET_MODE_BITSIZE (mode_x).to_constant () / 2))))
+ return CC_ADCmode;
+
/* A test for signed overflow. */
- if ((GET_MODE (x) == DImode || GET_MODE (x) == TImode)
+ if ((mode_x == DImode || mode_x == TImode)
&& code == NE
- && GET_CODE (x) == PLUS
+ && code_x == PLUS
&& GET_CODE (y) == SIGN_EXTEND)
return CC_Vmode;
case E_CC_Cmode:
switch (comp_code)
{
- case NE: return AARCH64_CS;
- case EQ: return AARCH64_CC;
+ case LTU: return AARCH64_CS;
+ case GEU: return AARCH64_CC;
+ default: return -1;
+ }
+ break;
+
+ case E_CC_ADCmode:
+ switch (comp_code)
+ {
+ case GEU: return AARCH64_CS;
+ case LTU: return AARCH64_CC;
default: return -1;
}
break;
""
{
emit_insn (gen_add<mode>3_compareC (operands[0], operands[1], operands[2]));
- aarch64_gen_unlikely_cbranch (NE, CC_Cmode, operands[3]);
+ aarch64_gen_unlikely_cbranch (LTU, CC_Cmode, operands[3]);
DONE;
})
emit_move_insn (gen_lowpart (DImode, operands[0]), low_dest);
emit_move_insn (gen_highpart (DImode, operands[0]), high_dest);
- aarch64_gen_unlikely_cbranch (NE, CC_Cmode, operands[3]);
+ aarch64_gen_unlikely_cbranch (GEU, CC_ADCmode, operands[3]);
DONE;
})
[(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
)
-(define_insn "*add<mode>3_compareC_cconly_imm"
- [(set (reg:CC_C CC_REGNUM)
- (ne:CC_C
- (plus:<DWI>
- (zero_extend:<DWI> (match_operand:GPI 0 "register_operand" "r,r"))
- (match_operand:<DWI> 2 "const_scalar_int_operand" ""))
- (zero_extend:<DWI>
- (plus:GPI
- (match_dup 0)
- (match_operand:GPI 1 "aarch64_plus_immediate" "I,J")))))]
- "aarch64_zero_extend_const_eq (<DWI>mode, operands[2],
- <MODE>mode, operands[1])"
- "@
- cmn\\t%<w>0, %1
- cmp\\t%<w>0, #%n1"
- [(set_attr "type" "alus_imm")]
-)
-
(define_insn "*add<mode>3_compareC_cconly"
[(set (reg:CC_C CC_REGNUM)
- (ne:CC_C
- (plus:<DWI>
- (zero_extend:<DWI> (match_operand:GPI 0 "register_operand" "r"))
- (zero_extend:<DWI> (match_operand:GPI 1 "register_operand" "r")))
- (zero_extend:<DWI> (plus:GPI (match_dup 0) (match_dup 1)))))]
+ (compare:CC_C
+ (plus:GPI
+ (match_operand:GPI 0 "register_operand" "r,r,r")
+ (match_operand:GPI 1 "aarch64_plus_operand" "r,I,J"))
+ (match_dup 0)))]
""
- "cmn\\t%<w>0, %<w>1"
- [(set_attr "type" "alus_sreg")]
-)
-
-(define_insn "*add<mode>3_compareC_imm"
- [(set (reg:CC_C CC_REGNUM)
- (ne:CC_C
- (plus:<DWI>
- (zero_extend:<DWI> (match_operand:GPI 1 "register_operand" "r,r"))
- (match_operand:<DWI> 3 "const_scalar_int_operand" ""))
- (zero_extend:<DWI>
- (plus:GPI
- (match_dup 1)
- (match_operand:GPI 2 "aarch64_plus_immediate" "I,J")))))
- (set (match_operand:GPI 0 "register_operand" "=r,r")
- (plus:GPI (match_dup 1) (match_dup 2)))]
- "aarch64_zero_extend_const_eq (<DWI>mode, operands[3],
- <MODE>mode, operands[2])"
"@
- adds\\t%<w>0, %<w>1, %2
- subs\\t%<w>0, %<w>1, #%n2"
- [(set_attr "type" "alus_imm")]
+ cmn\\t%<w>0, %<w>1
+ cmn\\t%<w>0, %1
+ cmp\\t%<w>0, #%n1"
+ [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
)
(define_insn "add<mode>3_compareC"
[(set (reg:CC_C CC_REGNUM)
(compare:CC_C
- (plus:<DWI>
- (zero_extend:<DWI> (match_operand:GPI 1 "register_operand" "r"))
- (zero_extend:<DWI> (match_operand:GPI 2 "register_operand" "r")))
- (zero_extend:<DWI>
- (plus:GPI (match_dup 1) (match_dup 2)))))
- (set (match_operand:GPI 0 "register_operand" "=r")
+ (plus:GPI
+ (match_operand:GPI 1 "register_operand" "r,r,r")
+ (match_operand:GPI 2 "aarch64_plus_operand" "r,I,J"))
+ (match_dup 1)))
+ (set (match_operand:GPI 0 "register_operand" "=r,r,r")
(plus:GPI (match_dup 1) (match_dup 2)))]
""
- "adds\\t%<w>0, %<w>1, %<w>2"
- [(set_attr "type" "alus_sreg")]
+ "@
+ adds\\t%<w>0, %<w>1, %<w>2
+ adds\\t%<w>0, %<w>1, %2
+ subs\\t%<w>0, %<w>1, #%n2"
+ [(set_attr "type" "alus_sreg,alus_imm,alus_imm")]
)
(define_insn "*add<mode>3_compareV_cconly_imm"
[(set (match_operand:GPI 0 "register_operand")
(plus:GPI
(plus:GPI
- (ne:GPI (reg:CC_C CC_REGNUM) (const_int 0))
+ (ltu:GPI (reg:CC_C CC_REGNUM) (const_int 0))
(match_operand:GPI 1 "aarch64_reg_or_zero"))
(match_operand:GPI 2 "aarch64_reg_or_zero")))]
""
(define_expand "add<mode>3_carryinC"
[(parallel
[(set (match_dup 3)
- (compare:CC_C
+ (compare:CC_ADC
(plus:<DWI>
(plus:<DWI>
(match_dup 4)
(match_operand:GPI 1 "register_operand" "")))
(zero_extend:<DWI>
(match_operand:GPI 2 "register_operand" "")))
- (zero_extend:<DWI>
- (plus:GPI
- (plus:GPI (match_dup 5) (match_dup 1))
- (match_dup 2)))))
+ (match_dup 6)))
(set (match_operand:GPI 0 "register_operand")
(plus:GPI
(plus:GPI (match_dup 5) (match_dup 1))
(match_dup 2)))])]
""
{
- operands[3] = gen_rtx_REG (CC_Cmode, CC_REGNUM);
- operands[4] = gen_rtx_NE (<DWI>mode, operands[3], const0_rtx);
- operands[5] = gen_rtx_NE (<MODE>mode, operands[3], const0_rtx);
+ operands[3] = gen_rtx_REG (CC_ADCmode, CC_REGNUM);
+ rtx ccin = gen_rtx_REG (CC_Cmode, CC_REGNUM);
+ operands[4] = gen_rtx_LTU (<DWI>mode, ccin, const0_rtx);
+ operands[5] = gen_rtx_LTU (<MODE>mode, ccin, const0_rtx);
+ operands[6] = immed_wide_int_const (wi::shwi (1, <DWI>mode)
+ << GET_MODE_BITSIZE (<MODE>mode),
+ TImode);
})
(define_insn "*add<mode>3_carryinC_zero"
- [(set (reg:CC_C CC_REGNUM)
- (compare:CC_C
+ [(set (reg:CC_ADC CC_REGNUM)
+ (compare:CC_ADC
(plus:<DWI>
(match_operand:<DWI> 2 "aarch64_carry_operation" "")
(zero_extend:<DWI> (match_operand:GPI 1 "register_operand" "r")))
- (zero_extend:<DWI>
- (plus:GPI
- (match_operand:GPI 3 "aarch64_carry_operation" "")
- (match_dup 1)))))
+ (match_operand 4 "const_scalar_int_operand" "")))
(set (match_operand:GPI 0 "register_operand" "=r")
- (plus:GPI (match_dup 3) (match_dup 1)))]
- ""
+ (plus:GPI (match_operand:GPI 3 "aarch64_carry_operation" "")
+ (match_dup 1)))]
+ "rtx_mode_t (operands[4], <DWI>mode)
+ == (wi::shwi (1, <DWI>mode) << (unsigned) GET_MODE_BITSIZE (<MODE>mode))"
"adcs\\t%<w>0, %<w>1, <w>zr"
[(set_attr "type" "adc_reg")]
)
(define_insn "*add<mode>3_carryinC"
- [(set (reg:CC_C CC_REGNUM)
- (compare:CC_C
+ [(set (reg:CC_ADC CC_REGNUM)
+ (compare:CC_ADC
(plus:<DWI>
(plus:<DWI>
(match_operand:<DWI> 3 "aarch64_carry_operation" "")
(zero_extend:<DWI> (match_operand:GPI 1 "register_operand" "r")))
(zero_extend:<DWI> (match_operand:GPI 2 "register_operand" "r")))
- (zero_extend:<DWI>
- (plus:GPI
- (plus:GPI
- (match_operand:GPI 4 "aarch64_carry_operation" "")
- (match_dup 1))
- (match_dup 2)))))
+ (match_operand 5 "const_scalar_int_operand" "")))
(set (match_operand:GPI 0 "register_operand" "=r")
(plus:GPI
- (plus:GPI (match_dup 4) (match_dup 1))
+ (plus:GPI (match_operand:GPI 4 "aarch64_carry_operation" "")
+ (match_dup 1))
(match_dup 2)))]
- ""
+ "rtx_mode_t (operands[5], <DWI>mode)
+ == (wi::shwi (1, <DWI>mode) << (unsigned) GET_MODE_BITSIZE (<MODE>mode))"
"adcs\\t%<w>0, %<w>1, %<w>2"
[(set_attr "type" "adc_reg")]
)
""
{
rtx cc = gen_rtx_REG (CC_Cmode, CC_REGNUM);
- operands[3] = gen_rtx_NE (<DWI>mode, cc, const0_rtx);
- operands[4] = gen_rtx_NE (<MODE>mode, cc, const0_rtx);
+ operands[3] = gen_rtx_LTU (<DWI>mode, cc, const0_rtx);
+ operands[4] = gen_rtx_LTU (<MODE>mode, cc, const0_rtx);
})
(define_insn "*add<mode>3_carryinV_zero"