* compare-elim.c (conforming_compare): Accept UNSPECs.
(find_comparison_dom_walker::before_dom_children): Deal with
instructions both using and killing the flags register.
(equivalent_reg_at_start): New function extracted from...
(try_eliminate_compare): ...here. Use it and add support for
registers and UNSPECs as second operand of the compare.
* config/visium/visium-modes.def (CCV): New.
* config/visium/predicates.md (visium_v_comparison_operator): New.
(visium_branch_operator): Deal with CCV mode.
* config/visium/visium.c (visium_select_cc_mode): Likewise.
(output_cbranch): Likewise.
* config/visium/visium.md (UNSPEC_{ADD,SUB,NEG}V): New constants.
(uaddv<mode>4): New expander.
(addv<mode>4): Likewise.
(add<mode>3_insn_set_carry): New instruction.
(add<mode>3_insn_set_overflow): Likewise.
(addsi3_insn_set_overflow): Likewise.
(usubv<mode>4): New expander.
(subv<mode>4): Likewise.
(sub<mode>3_insn_set_carry): New instruction.
(sub<mode>3_insn_set_overflow): Likewise.
(subsi3_insn_set_overflow): Likewise.
(unegv<mode>3): New expander.
(negv<mode>3): Likewise.
(neg<mode>2_insn_set_overflow): New instruction.
(addv_tst<mode>): Likewise.
(subv_tst<mode>): Likewise.
(negv_tst<mode>): Likewise.
(cbranch<mode>4_addv_insn): New splitter and instruction.
(cbranch<mode>4_subv_insn): Likewise.
(cbranch<mode>4_negv_insn): Likewise.
From-SVN: r241379
+2016-10-20 Eric Botcazou <ebotcazou@adacore.com>
+
+ * compare-elim.c (conforming_compare): Accept UNSPECs.
+ (find_comparison_dom_walker::before_dom_children): Deal with
+ instructions both using and killing the flags register.
+ (equivalent_reg_at_start): New function extracted from...
+ (try_eliminate_compare): ...here. Use it and add support for
+ registers and UNSPECs as second operand of the compare.
+ * config/visium/visium-modes.def (CCV): New.
+ * config/visium/predicates.md (visium_v_comparison_operator): New.
+ (visium_branch_operator): Deal with CCV mode.
+ * config/visium/visium.c (visium_select_cc_mode): Likewise.
+ (output_cbranch): Likewise.
+ * config/visium/visium.md (UNSPEC_{ADD,SUB,NEG}V): New constants.
+ (uaddv<mode>4): New expander.
+ (addv<mode>4): Likewise.
+ (add<mode>3_insn_set_carry): New instruction.
+ (add<mode>3_insn_set_overflow): Likewise.
+ (addsi3_insn_set_overflow): Likewise.
+ (usubv<mode>4): New expander.
+ (subv<mode>4): Likewise.
+ (sub<mode>3_insn_set_carry): New instruction.
+ (sub<mode>3_insn_set_overflow): Likewise.
+ (subsi3_insn_set_overflow): Likewise.
+ (unegv<mode>3): New expander.
+ (negv<mode>3): Likewise.
+ (neg<mode>2_insn_set_overflow): New instruction.
+ (addv_tst<mode>): Likewise.
+ (subv_tst<mode>): Likewise.
+ (negv_tst<mode>): Likewise.
+ (cbranch<mode>4_addv_insn): New splitter and instruction.
+ (cbranch<mode>4_subv_insn): Likewise.
+ (cbranch<mode>4_negv_insn): Likewise.
+
2016-10-20 Richard Biener <rguenther@suse.de>
* tree-ssa-alias.c (ptrs_compare_unequal): Remove code duplication.
if (!REG_P (dest) || REGNO (dest) != targetm.flags_regnum)
return NULL;
- if (REG_P (XEXP (src, 0))
- && (REG_P (XEXP (src, 1)) || CONSTANT_P (XEXP (src, 1))))
+ if (!REG_P (XEXP (src, 0)))
+ return NULL;
+
+ if (CONSTANT_P (XEXP (src, 1)) || REG_P (XEXP (src, 1)))
return src;
+ if (GET_CODE (XEXP (src, 1)) == UNSPEC)
+ {
+ for (int i = 0; i < XVECLEN (XEXP (src, 1), 0); i++)
+ if (!REG_P (XVECEXP (XEXP (src, 1), 0, i)))
+ return NULL;
+ return src;
+ }
+
return NULL;
}
last_cmp_valid = true;
}
- /* Notice if this instruction kills the flags register. */
- else if (bitmap_bit_p (killed, targetm.flags_regnum))
+ else
{
- /* See if this insn could be the "clobber" that eliminates
- a future comparison. */
- last_clobber = (arithmetic_flags_clobber_p (insn) ? insn : NULL);
+ /* Notice if this instruction uses the flags register. */
+ if (last_cmp)
+ find_flags_uses_in_insn (last_cmp, insn);
- /* In either case, the previous compare is no longer valid. */
- last_cmp = NULL;
- last_cmp_valid = false;
- }
+ /* Notice if this instruction kills the flags register. */
+ if (bitmap_bit_p (killed, targetm.flags_regnum))
+ {
+ /* See if this insn could be the "clobber" that eliminates
+ a future comparison. */
+ last_clobber = (arithmetic_flags_clobber_p (insn) ? insn : NULL);
- /* Notice if this instruction uses the flags register. */
- else if (last_cmp)
- find_flags_uses_in_insn (last_cmp, insn);
+ /* In either case, the previous compare is no longer valid. */
+ last_cmp = NULL;
+ last_cmp_valid = false;
+ }
+ }
/* Notice if any of the inputs to the comparison have changed. */
if (last_cmp_valid
return flags;
}
-/* Attempt to replace a comparison with a prior arithmetic insn that can
- compute the same flags value as the comparison itself. Return true if
- successful, having made all rtl modifications necessary. */
+/* Return a register RTX holding the same value at START as REG at END, or
+ NULL_RTX if there is none. */
-static bool
-try_eliminate_compare (struct comparison *cmp)
+static rtx
+equivalent_reg_at_start (rtx reg, rtx_insn *end, rtx_insn *start)
{
- rtx_insn *insn, *bb_head;
- rtx x, flags, in_a, cmp_src;
-
- /* We must have found an interesting "clobber" preceding the compare. */
- if (cmp->prev_clobber == NULL)
- return false;
-
- /* ??? For the moment we don't handle comparisons for which IN_B
- is a register. We accepted these during initial comparison
- recognition in order to eliminate duplicate compares.
- An improvement here would be to handle x = a - b; if (a cmp b). */
- if (!CONSTANT_P (cmp->in_b))
- return false;
-
- /* Verify that IN_A is not clobbered in between CMP and PREV_CLOBBER.
- Given that this target requires this pass, we can assume that most
- insns do clobber the flags, and so the distance between the compare
- and the clobber is likely to be small. */
- /* ??? This is one point at which one could argue that DF_REF_CHAIN would
- be useful, but it is thought to be too heavy-weight a solution here. */
+ rtx_insn *bb_head = BB_HEAD (BLOCK_FOR_INSN (end));
- in_a = cmp->in_a;
- insn = cmp->insn;
- bb_head = BB_HEAD (BLOCK_FOR_INSN (insn));
- for (insn = PREV_INSN (insn);
- insn != cmp->prev_clobber;
+ for (rtx_insn *insn = PREV_INSN (end);
+ insn != start;
insn = PREV_INSN (insn))
{
const int abnormal_flags
/* Note that the BB_HEAD is always either a note or a label, but in
any case it means that IN_A is defined outside the block. */
if (insn == bb_head)
- return false;
+ return NULL_RTX;
if (NOTE_P (insn) || DEBUG_INSN_P (insn))
continue;
/* Find a possible def of IN_A in INSN. */
FOR_EACH_INSN_DEF (def, insn)
- if (DF_REF_REGNO (def) == REGNO (in_a))
+ if (DF_REF_REGNO (def) == REGNO (reg))
break;
/* No definitions of IN_A; continue searching. */
/* Bail if this is not a totally normal set of IN_A. */
if (DF_REF_IS_ARTIFICIAL (def))
- return false;
+ return NULL_RTX;
if (DF_REF_FLAGS (def) & abnormal_flags)
- return false;
+ return NULL_RTX;
/* We've found an insn between the compare and the clobber that sets
IN_A. Given that pass_cprop_hardreg has not yet run, we still find
situations in which we can usefully look through a copy insn. */
- x = single_set (insn);
- if (x == NULL)
- return false;
- in_a = SET_SRC (x);
- if (!REG_P (in_a))
+ rtx x = single_set (insn);
+ if (x == NULL_RTX)
+ return NULL_RTX;
+ reg = SET_SRC (x);
+ if (!REG_P (reg))
+ return NULL_RTX;
+ }
+
+ return reg;
+}
+
+/* Attempt to replace a comparison with a prior arithmetic insn that can
+ compute the same flags value as the comparison itself. Return true if
+ successful, having made all rtl modifications necessary. */
+
+static bool
+try_eliminate_compare (struct comparison *cmp)
+{
+ rtx x, flags, in_a, in_b, cmp_src;
+
+ /* We must have found an interesting "clobber" preceding the compare. */
+ if (cmp->prev_clobber == NULL)
+ return false;
+
+ /* Verify that IN_A is not clobbered in between CMP and PREV_CLOBBER.
+ Given that this target requires this pass, we can assume that most
+ insns do clobber the flags, and so the distance between the compare
+ and the clobber is likely to be small. */
+ /* ??? This is one point at which one could argue that DF_REF_CHAIN would
+ be useful, but it is thought to be too heavy-weight a solution here. */
+ in_a = equivalent_reg_at_start (cmp->in_a, cmp->insn, cmp->prev_clobber);
+ if (!in_a)
+ return false;
+
+ /* Likewise for IN_B if need be. */
+ if (CONSTANT_P (cmp->in_b))
+ in_b = cmp->in_b;
+ else if (REG_P (cmp->in_b))
+ {
+ in_b = equivalent_reg_at_start (cmp->in_b, cmp->insn, cmp->prev_clobber);
+ if (!in_b)
return false;
}
+ else if (GET_CODE (cmp->in_b) == UNSPEC)
+ {
+ const int len = XVECLEN (cmp->in_b, 0);
+ rtvec v = rtvec_alloc (len);
+ for (int i = 0; i < len; i++)
+ {
+ rtx r = equivalent_reg_at_start (XVECEXP (cmp->in_b, 0, i),
+ cmp->insn, cmp->prev_clobber);
+ if (!r)
+ return false;
+ RTVEC_ELT (v, i) = r;
+ }
+ in_b = gen_rtx_UNSPEC (GET_MODE (cmp->in_b), v, XINT (cmp->in_b, 1));
+ }
+ else
+ gcc_unreachable ();
/* We've reached PREV_CLOBBER without finding a modification of IN_A.
Validate that PREV_CLOBBER itself does in fact refer to IN_A. Do
recall that we've already validated the shape of PREV_CLOBBER. */
+ rtx insn = cmp->prev_clobber;
x = XVECEXP (PATTERN (insn), 0, 0);
if (rtx_equal_p (SET_DEST (x), in_a))
cmp_src = SET_SRC (x);
/* Also check operations with implicit extensions, e.g.:
[(set (reg:DI)
- (zero_extend:DI (plus:SI (reg:SI)(reg:SI))))
+ (zero_extend:DI (plus:SI (reg:SI) (reg:SI))))
(set (reg:CCZ flags)
- (compare:CCZ
- (plus:SI (reg:SI)(reg:SI))
- (const_int 0)))] */
+ (compare:CCZ (plus:SI (reg:SI) (reg:SI))
+ (const_int 0)))] */
else if (REG_P (SET_DEST (x))
&& REG_P (in_a)
&& REGNO (SET_DEST (x)) == REGNO (in_a)
|| GET_CODE (SET_SRC (x)) == SIGN_EXTEND)
&& GET_MODE (XEXP (SET_SRC (x), 0)) == GET_MODE (in_a))
cmp_src = XEXP (SET_SRC (x), 0);
+
+ /* Also check fully redundant comparisons, e.g.:
+ [(set (reg:SI)
+ (minus:SI (reg:SI) (reg:SI))))
+ (set (reg:CC flags)
+ (compare:CC (reg:SI) (reg:SI)))] */
+ else if (REG_P (in_b)
+ && GET_CODE (SET_SRC (x)) == MINUS
+ && rtx_equal_p (XEXP (SET_SRC (x), 0), in_a)
+ && rtx_equal_p (XEXP (SET_SRC (x), 1), in_b))
+ cmp_src = in_a;
+
else
return false;
/* Determine if we ought to use a different CC_MODE here. */
- flags = maybe_select_cc_mode (cmp, cmp_src, cmp->in_b);
+ flags = maybe_select_cc_mode (cmp, cmp_src, in_b);
if (flags == NULL)
flags = gen_rtx_REG (cmp->orig_mode, targetm.flags_regnum);
/* Generate a new comparison for installation in the setter. */
x = copy_rtx (cmp_src);
- x = gen_rtx_COMPARE (GET_MODE (flags), x, cmp->in_b);
+ x = gen_rtx_COMPARE (GET_MODE (flags), x, in_b);
x = gen_rtx_SET (flags, x);
/* Succeed if the new instruction is valid. Note that we may have started
(match_code "eq,ne"))
;; Return true if OP is a valid comparison operator for CCNZmode.
-(define_special_predicate "visium_nz_comparison_operator"
+(define_predicate "visium_nz_comparison_operator"
(match_code "eq,ne,lt,ge"))
;; Return true if OP is a valid comparison operator for CCCmode.
-(define_special_predicate "visium_c_comparison_operator"
+(define_predicate "visium_c_comparison_operator"
(match_code "eq,ne,ltu,geu"))
+;; Return true if OP is a valid comparison operator for CCVmode.
+(define_predicate "visium_v_comparison_operator"
+ (match_code "eq,ne"))
+
;; Return true if OP is a valid FP comparison operator.
(define_predicate "visium_fp_comparison_operator"
(match_code "eq,ne,ordered,unordered,unlt,unle,ungt,unge,lt,le,gt,ge"))
return visium_nz_comparison_operator (op, mode);
case CCCmode:
return visium_c_comparison_operator (op, mode);
+ case CCVmode:
+ return visium_v_comparison_operator (op, mode);
case CCFPmode:
case CCFPEmode:
return visium_fp_comparison_operator (op, mode);
instruction. Only the =,!= and unsigned <,>= operators can be used in
conjunction with it.
+ We also have a CCVmode which is used by the arithmetic instructions when
+ they explicitly set the V flag (signed overflow). Only the =,!= operators
+ can be used in conjunction with it.
+
We also have two modes to indicate that the condition code is set by the
the floating-point unit. One for comparisons which generate an exception
if the result is unordered (CCFPEmode) and one for comparisons which never
CC_MODE (CCNZ);
CC_MODE (CCC);
+CC_MODE (CCV);
CC_MODE (CCFP);
CC_MODE (CCFPE);
&& rtx_equal_p (XEXP (op0, 0), op1))
return CCCmode;
+ /* This is for the {add,sub,neg}<mode>3_insn_set_overflow pattern. */
+ if ((code == EQ || code == NE)
+ && GET_CODE (op1) == UNSPEC
+ && (XINT (op1, 1) == UNSPEC_ADDV
+ || XINT (op1, 1) == UNSPEC_SUBV
+ || XINT (op1, 1) == UNSPEC_NEGV))
+ return CCVmode;
+
if (op1 != const0_rtx)
return CCmode;
case NE:
if (cc_mode == CCCmode)
cond = "cs";
+ else if (cc_mode == CCVmode)
+ cond = "os";
else
cond = "ne";
break;
case EQ:
if (cc_mode == CCCmode)
cond = "cc";
+ else if (cc_mode == CCVmode)
+ cond = "oc";
else
cond = "eq";
break;
UNSPEC_ITOF
UNSPEC_FTOI
UNSPEC_NOP
+ UNSPEC_ADDV
+ UNSPEC_SUBV
+ UNSPEC_NEGV
])
;; UNSPEC_VOLATILE usage.
(match_operand:QHI 2 "register_operand" "")))]
"")
+(define_expand "uaddv<mode>4"
+ [(set (match_operand:I 0 "register_operand" "")
+ (plus:I (match_operand:I 1 "register_operand" "")
+ (match_operand:I 2 "register_operand" "")))
+ (set (pc)
+ (if_then_else (ltu (match_dup 0) (match_dup 1))
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ "")
+
+(define_expand "addv<mode>4"
+ [(set (match_operand:I 0 "register_operand" "")
+ (plus:I (match_operand:I 1 "register_operand" "")
+ (match_operand:I 2 "register_operand" "")))
+ (set (pc)
+ (if_then_else (ne (match_dup 0)
+ (unspec:I [(match_dup 1) (match_dup 2)] UNSPEC_ADDV))
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ "")
+
(define_insn_and_split "*add<mode>3_insn"
[(set (match_operand:QHI 0 "register_operand" "=r")
(plus:QHI (match_operand:QHI 1 "register_operand" "%r")
"add<s> %0,%1,%2"
[(set_attr "type" "arith")])
+(define_insn "*add<mode>3_insn_set_carry"
+ [(set (match_operand:QHI 0 "register_operand" "=r")
+ (plus:QHI (match_operand:QHI 1 "register_operand" "%r")
+ (match_operand:QHI 2 "register_operand" "r")))
+ (set (reg:CCC R_FLAGS)
+ (compare:CCC (plus:QHI (match_dup 1) (match_dup 2))
+ (match_dup 1)))]
+ "reload_completed"
+ "add<s> %0,%1,%2"
+ [(set_attr "type" "arith")])
+
+(define_insn "*add<mode>3_insn_set_overflow"
+ [(set (match_operand:QHI 0 "register_operand" "=r")
+ (plus:QHI (match_operand:QHI 1 "register_operand" "%r")
+ (match_operand:QHI 2 "register_operand" "r")))
+ (set (reg:CCV R_FLAGS)
+ (compare:CCV (plus:QHI (match_dup 1) (match_dup 2))
+ (unspec:QHI [(match_dup 1) (match_dup 2)] UNSPEC_ADDV)))]
+ "reload_completed"
+ "add<s> %0,%1,%2"
+ [(set_attr "type" "arith")])
+
(define_expand "addsi3"
[(set (match_operand:SI 0 "register_operand" "")
(plus:SI (match_operand:SI 1 "register_operand" "")
addi %0,%2"
[(set_attr "type" "arith")])
+(define_insn "*addsi3_insn_set_overflow"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI (match_operand:SI 1 "register_operand" "%r,0")
+ (match_operand:SI 2 "real_add_operand" " r,J")))
+ (set (reg:CCV R_FLAGS)
+ (compare:CCV (plus:SI (match_dup 1) (match_dup 2))
+ (unspec:SI [(match_dup 1) (match_dup 2)] UNSPEC_ADDV)))]
+ "reload_completed"
+ "@
+ add.l %0,%1,%2
+ addi %0,%2"
+ [(set_attr "type" "arith")])
+
(define_expand "adddi3"
[(set (match_operand:DI 0 "register_operand" "")
(plus:DI (match_operand:DI 1 "register_operand" "")
(match_operand:QHI 2 "register_operand" "")))]
"")
+(define_expand "usubv<mode>4"
+ [(set (match_operand:I 0 "register_operand" "")
+ (minus:I (match_operand:I 1 "reg_or_0_operand" "")
+ (match_operand:I 2 "register_operand" "")))
+ (set (pc)
+ (if_then_else (ltu (match_dup 1) (match_dup 2))
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+{
+ if (operands[1] == const0_rtx)
+ {
+ emit_insn (gen_unegv<mode>3 (operands[0], operands[2], operands[3]));
+ DONE;
+ }
+})
+
+(define_expand "subv<mode>4"
+ [(set (match_operand:I 0 "register_operand" "")
+ (minus:I (match_operand:I 1 "register_operand" "")
+ (match_operand:I 2 "register_operand" "")))
+ (set (pc)
+ (if_then_else (ne (match_dup 0)
+ (unspec:I [(match_dup 1) (match_dup 2)] UNSPEC_SUBV))
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ "")
+
(define_insn_and_split "*sub<mode>3_insn"
[(set (match_operand:QHI 0 "register_operand" "=r")
(minus:QHI (match_operand:QHI 1 "reg_or_0_operand" "rO")
"sub<s> %0,%r1,%2"
[(set_attr "type" "arith")])
+(define_insn "*sub<mode>3_insn_set_carry"
+ [(set (match_operand:QHI 0 "register_operand" "=r")
+ (minus:QHI (match_operand:QHI 1 "reg_or_0_operand" "rO")
+ (match_operand:QHI 2 "register_operand" "r")))
+ (set (reg:CC R_FLAGS)
+ (compare:CC (match_dup 1) (match_dup 2)))]
+ "reload_completed"
+ "sub<s> %0,%r1,%2"
+ [(set_attr "type" "arith")])
+
+(define_insn "*sub<mode>3_insn_set_overflow"
+ [(set (match_operand:QHI 0 "register_operand" "=r")
+ (minus:QHI (match_operand:QHI 1 "reg_or_0_operand" "rO")
+ (match_operand:QHI 2 "register_operand" "r")))
+ (set (reg:CCV R_FLAGS)
+ (compare:CCV (minus:QHI (match_dup 1) (match_dup 2))
+ (unspec:QHI [(match_dup 1) (match_dup 2)] UNSPEC_SUBV)))]
+ "reload_completed"
+ "sub<s> %0,%r1,%2"
+ [(set_attr "type" "arith")])
+
(define_expand "subsi3"
[(set (match_operand:SI 0 "register_operand" "")
(minus:SI (match_operand:SI 1 "reg_or_0_operand" "")
subi %0,%2"
[(set_attr "type" "arith")])
+(define_insn "*subsi3_insn_set_overflow"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (minus:SI (match_operand:SI 1 "register_operand" " r,0")
+ (match_operand:SI 2 "real_add_operand" " r,J")))
+ (set (reg:CCV R_FLAGS)
+ (compare:CCV (minus:SI (match_dup 1) (match_dup 2))
+ (unspec:SI [(match_dup 1) (match_dup 2)] UNSPEC_SUBV)))]
+ "reload_completed"
+ "@
+ sub.l %0,%1,%2
+ subi %0,%2"
+ [(set_attr "type" "arith")])
+
(define_expand "subdi3"
[(set (match_operand:DI 0 "register_operand" "")
(minus:DI (match_operand:DI 1 "register_operand" "")
(neg:I (match_operand:I 1 "register_operand" "")))]
"")
+(define_expand "unegv<mode>3"
+ [(set (match_operand:I 0 "register_operand" "")
+ (neg:I (match_operand:I 1 "register_operand" "")))
+ (set (pc)
+ (if_then_else (ne (match_dup 0) (const_int 0))
+ (label_ref (match_operand 2 ""))
+ (pc)))]
+ "")
+
+(define_expand "negv<mode>3"
+ [(set (match_operand:I 0 "register_operand" "")
+ (neg:I (match_operand:I 1 "register_operand" "")))
+ (set (pc)
+ (if_then_else (ne (match_dup 0)
+ (unspec:I [(match_dup 1)] UNSPEC_NEGV))
+ (label_ref (match_operand 2 ""))
+ (pc)))]
+ "")
+
(define_insn_and_split "*neg<mode>2_insn"
[(set (match_operand:I 0 "register_operand" "=r")
(neg:I (match_operand:I 1 "register_operand" "r")))]
"sub.l %0,r0,%1"
[(set_attr "type" "arith")])
+(define_insn "*neg<mode>2_insn_set_overflow"
+ [(set (match_operand:I 0 "register_operand" "=r")
+ (neg:I (match_operand:I 1 "register_operand" "r")))
+ (set (reg:CCV R_FLAGS)
+ (compare:CCV (neg:I (match_dup 1))
+ (unspec:I [(match_dup 1)] UNSPEC_NEGV)))]
+ "reload_completed"
+ "sub<s> %0,r0,%1"
+ [(set_attr "type" "arith")])
+
(define_expand "negdi2"
[(set (match_operand:DI 0 "register_operand" "")
(neg:DI (match_operand:DI 1 "register_operand" "")))]
"lsr.l r0,%0,32-%1"
[(set_attr "type" "logic")])
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+;; Integer overflow tests
+;;
+;; Modes QI, HI and SI are supported directly.
+;;
+;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+;;
+
+(define_insn "*addv_tst<mode>"
+ [(set (reg:CCV R_FLAGS)
+ (compare:CCV (match_operand:I 0 "register_operand" "r")
+ (unspec:I [(match_operand:I 1 "register_operand" "%r")
+ (match_operand:I 2 "register_operand" "r")]
+ UNSPEC_ADDV)))]
+ "reload_completed"
+ "add<s> r0,%1,%2"
+ [(set_attr "type" "arith")])
+
+(define_insn "*subv_tst<mode>"
+ [(set (reg:CCV R_FLAGS)
+ (compare:CCV (match_operand:I 0 "register_operand" "r")
+ (unspec:I [(match_operand:I 1 "reg_or_0_operand" "rO")
+ (match_operand:I 2 "register_operand" "r")]
+ UNSPEC_SUBV)))]
+ "reload_completed"
+ "sub<s> r0,%r1,%2"
+ [(set_attr "type" "arith")])
+
+(define_insn "*negv_tst<mode>"
+ [(set (reg:CCV R_FLAGS)
+ (compare:CCV (match_operand:I 0 "register_operand" "r")
+ (unspec:I [(match_operand:I 1 "register_operand" "r")]
+ UNSPEC_NEGV)))]
+ "reload_completed"
+ "sub<s> r0,r0,%1"
+ [(set_attr "type" "arith")])
+
;;
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
}
[(set_attr "type" "cmp")])
+(define_insn_and_split "*cbranch<mode>4_addv_insn"
+ [(set (pc)
+ (if_then_else (match_operator 0 "visium_equality_comparison_operator"
+ [(match_operand:I 1 "register_operand" "r")
+ (unspec:I [(match_operand:I 2 "register_operand" "%r")
+ (match_operand:I 3 "register_operand" "r")]
+ UNSPEC_ADDV)])
+ (label_ref (match_operand 4 ""))
+ (pc)))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ visium_split_cbranch (GET_CODE (operands[0]), XEXP (operands[0], 0),
+ XEXP (operands[0], 1), operands[4]);
+ DONE;
+}
+ [(set_attr "type" "cmp")])
+
+(define_insn_and_split "*cbranch<mode>4_subv_insn"
+ [(set (pc)
+ (if_then_else (match_operator 0 "visium_equality_comparison_operator"
+ [(match_operand:I 1 "register_operand" "r")
+ (unspec:I [(match_operand:I 2 "reg_or_0_operand" "rO")
+ (match_operand:I 3 "register_operand" "r")]
+ UNSPEC_SUBV)])
+ (label_ref (match_operand 4 ""))
+ (pc)))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ visium_split_cbranch (GET_CODE (operands[0]), XEXP (operands[0], 0),
+ XEXP (operands[0], 1), operands[4]);
+ DONE;
+}
+ [(set_attr "type" "cmp")])
+
+(define_insn_and_split "*cbranch<mode>4_negv_insn"
+ [(set (pc)
+ (if_then_else (match_operator 0 "visium_equality_comparison_operator"
+ [(match_operand:I 1 "register_operand" "r")
+ (unspec:I [(match_operand:I 2 "register_operand" "r")]
+ UNSPEC_NEGV)])
+ (label_ref (match_operand 3 ""))
+ (pc)))]
+ ""
+ "#"
+ "reload_completed"
+ [(const_int 0)]
+{
+ visium_split_cbranch (GET_CODE (operands[0]), XEXP (operands[0], 0),
+ XEXP (operands[0], 1), operands[3]);
+ DONE;
+}
+ [(set_attr "type" "cmp")])
+
(define_insn_and_split "*cbranchsi4_btst_insn"
[(set (pc)
(if_then_else (match_operator 0 "visium_equality_comparison_operator"
+2016-10-20 Eric Botcazou <ebotcazou@adacore.com>
+
+ * gcc.target/visium/overflow8.c: New.
+ * gcc.target/visium/overflow16.c: Likewise.
+ * gcc.target/visium/overflow32: Likewise.
+
2016-10-20 Michael Matz <matz@suse.de>
* gcc.dg/loop-split.c: New test.
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#include <stdbool.h>
+
+bool my_uadd_overflow (unsigned short a, unsigned short b, unsigned short *res)
+{
+ return __builtin_add_overflow (a, b, res);
+}
+
+bool my_usub_overflow (unsigned short a, unsigned short b, unsigned short *res)
+{
+ return __builtin_sub_overflow (a, b, res);
+}
+
+bool my_uneg_overflow (unsigned short a, unsigned short *res)
+{
+ return __builtin_sub_overflow (0, a, res);
+}
+
+bool my_add_overflow (short a, short b, short *res)
+{
+ return __builtin_add_overflow (a, b, res);
+}
+
+bool my_sub_overflow (short a, short b, short *res)
+{
+ return __builtin_sub_overflow (a, b, res);
+}
+
+bool my_neg_overflow (short a, short *res)
+{
+ return __builtin_sub_overflow (0, a, res);
+}
+
+/* { dg-final { scan-assembler-times "add.w" 2 } } */
+/* { dg-final { scan-assembler-times "sub.w" 4 } } */
+/* { dg-final { scan-assembler-not "cmp.w" } } */
+/* { dg-final { scan-assembler-not "mov.w" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#include <stdbool.h>
+
+bool my_uadd_overflow (unsigned int a, unsigned int b, unsigned int *res)
+{
+ return __builtin_add_overflow (a, b, res);
+}
+
+bool my_usub_overflow (unsigned int a, unsigned int b, unsigned int *res)
+{
+ return __builtin_sub_overflow (a, b, res);
+}
+
+bool my_uneg_overflow (unsigned int a, unsigned int *res)
+{
+ return __builtin_sub_overflow (0, a, res);
+}
+
+bool my_add_overflow (int a, int b, int *res)
+{
+ return __builtin_add_overflow (a, b, res);
+}
+
+bool my_sub_overflow (int a, int b, int *res)
+{
+ return __builtin_sub_overflow (a, b, res);
+}
+
+bool my_neg_overflow (int a, int *res)
+{
+ return __builtin_sub_overflow (0, a, res);
+}
+
+/* { dg-final { scan-assembler-times "add.l" 2 } } */
+/* { dg-final { scan-assembler-times "sub.l" 4 } } */
+/* { dg-final { scan-assembler-not "cmp.l" } } */
+/* { dg-final { scan-assembler-not "mov.l" } } */
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+#include <stdbool.h>
+
+bool my_uadd_overflow (unsigned char a, unsigned char b, unsigned char *res)
+{
+ return __builtin_add_overflow (a, b, res);
+}
+
+bool my_usub_overflow (unsigned char a, unsigned char b, unsigned char *res)
+{
+ return __builtin_sub_overflow (a, b, res);
+}
+
+bool my_uneg_overflow (unsigned char a, unsigned char *res)
+{
+ return __builtin_sub_overflow (0, a, res);
+}
+
+bool my_add_overflow (signed char a, signed char b, signed char *res)
+{
+ return __builtin_add_overflow (a, b, res);
+}
+
+bool my_sub_overflow (signed char a, signed char b, signed char *res)
+{
+ return __builtin_sub_overflow (a, b, res);
+}
+
+bool my_neg_overflow (signed char a, signed char *res)
+{
+ return __builtin_sub_overflow (0, a, res);
+}
+
+/* { dg-final { scan-assembler-times "add.b" 2 } } */
+/* { dg-final { scan-assembler-times "sub.b" 4 } } */
+/* { dg-final { scan-assembler-not "cmp.b" } } */
+/* { dg-final { scan-assembler-not "mov.b" } } */