+2020-01-09 Richard Sandiford <richard.sandiford@arm.com>
+
+ * config/aarch64/iterators.md (addsub): New code attribute.
+ * config/aarch64/aarch64-simd.md (aarch64_<su_optab><optab><mode>):
+ Re-express as...
+ (aarch64_<su_optab>q<addsub><mode>): ...this, making the same change
+ in the asm string and attributes. Fix indentation.
+ * config/aarch64/aarch64-sve.md (@aarch64_<su_optab><optab><mode>):
+ Re-express as...
+ (@aarch64_sve_<optab><mode>): ...this.
+ * config/aarch64/aarch64-sve-builtins.h
+ (function_expander::expand_signed_unpred_op): Delete.
+ * config/aarch64/aarch64-sve-builtins.cc
+ (function_expander::expand_signed_unpred_op): Likewise.
+ (function_expander::map_to_rtx_codes): If the optab isn't defined,
+ try using code_for_aarch64_sve instead.
+ * config/aarch64/aarch64-sve-builtins-base.cc (svqadd_impl): Delete.
+ (svqsub_impl): Likewise.
+ (svqadd, svqsub): Use rtx_code_function instead.
+
2020-01-09 Richard Sandiford <richard.sandiford@arm.com>
* config/aarch64/iterators.md (SRHSUB, URHSUB): Delete.
)
;; <su>q<addsub>
-(define_insn "aarch64_<su_optab><optab><mode>"
+(define_insn "aarch64_<su_optab>q<addsub><mode>"
[(set (match_operand:VSDQ_I 0 "register_operand" "=w")
(BINQOPS:VSDQ_I (match_operand:VSDQ_I 1 "register_operand" "w")
- (match_operand:VSDQ_I 2 "register_operand" "w")))]
+ (match_operand:VSDQ_I 2 "register_operand" "w")))]
"TARGET_SIMD"
- "<su_optab><optab>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
- [(set_attr "type" "neon_<optab><q>")]
+ "<su_optab>q<addsub>\\t%<v>0<Vmtype>, %<v>1<Vmtype>, %<v>2<Vmtype>"
+ [(set_attr "type" "neon_q<addsub><q>")]
)
;; suqadd and usqadd
}
};
-class svqadd_impl : public function_base
-{
-public:
- rtx
- expand (function_expander &e) const OVERRIDE
- {
- return e.expand_signed_unpred_op (SS_PLUS, US_PLUS);
- }
-};
-
/* Implements svqdec[bhwd]{,_pat} and svqinc[bhwd]{,_pat}. */
class svqdec_svqinc_bhwd_impl : public function_base
{
rtx_code m_code_for_uint;
};
-class svqsub_impl : public function_base
-{
-public:
- rtx
- expand (function_expander &e) const OVERRIDE
- {
- return e.expand_signed_unpred_op (SS_MINUS, US_MINUS);
- }
-};
-
class svrdffr_impl : public function_base
{
public:
FUNCTION (svptest_last, svptest_impl, (LTU))
FUNCTION (svptrue, svptrue_impl,)
FUNCTION (svptrue_pat, svptrue_pat_impl,)
-FUNCTION (svqadd, svqadd_impl,)
+FUNCTION (svqadd, rtx_code_function, (SS_PLUS, US_PLUS, -1))
FUNCTION (svqdecb, svqdec_bhwd_impl, (QImode))
FUNCTION (svqdecb_pat, svqdec_bhwd_impl, (QImode))
FUNCTION (svqdecd, svqdec_bhwd_impl, (DImode))
FUNCTION (svqincp, svqdecp_svqincp_impl, (SS_PLUS, US_PLUS))
FUNCTION (svqincw, svqinc_bhwd_impl, (SImode))
FUNCTION (svqincw_pat, svqinc_bhwd_impl, (SImode))
-FUNCTION (svqsub, svqsub_impl,)
+FUNCTION (svqsub, rtx_code_function, (SS_MINUS, US_MINUS, -1))
FUNCTION (svrbit, unspec_based_function, (UNSPEC_RBIT, UNSPEC_RBIT, -1))
FUNCTION (svrdffr, svrdffr_impl,)
FUNCTION (svrecpe, unspec_based_function, (-1, -1, UNSPEC_FRECPE))
(3) a normal unpredicated optab for PRED_none and PRED_x functions,
dropping the predicate in the latter case
- (4) "cond_<optab><mode>" otherwise
+ (4) an unpredicated "aarch64_sve_<code_optab><mode>" for PRED_none and
+ PRED_x functions, again dropping the predicate for PRED_x
+
+ (5) "cond_<optab><mode>" otherwise
where <optab> corresponds to:
- CODE_FOR_UINT for unsigned integers
- UNSPEC_FOR_FP for floating-point values
+ and where <code_optab> is like <optab>, but uses CODE_FOR_SINT instead
+ of UNSPEC_FOR_FP for floating-point values.
+
MERGE_ARGNO is the argument that provides the values of inactive lanes for
_m functions, or DEFAULT_MERGE_ARGNO if we should apply the usual rules. */
rtx
/* Otherwise expand PRED_none and PRED_x operations without a predicate.
Floating-point operations conventionally use the signed rtx code. */
if (pred == PRED_none || pred == PRED_x)
- return use_unpred_insn (direct_optab_handler (code_to_optab (code), 0));
+ {
+ icode = direct_optab_handler (code_to_optab (code), 0);
+ if (icode == CODE_FOR_nothing)
+ icode = code_for_aarch64_sve (code, mode);
+ return use_unpred_insn (icode);
+ }
/* Don't use cond_*_optabs here, since not all codes have one yet. */
if (type_suffix (0).integer_p)
return use_cond_insn (icode, merge_argno);
}
-/* Implement the call using an @aarch64 instruction and the
- instructions are parameterized by an rtx_code. CODE_FOR_SINT
- is the rtx_code for signed integer operations, CODE_FOR_UINT
- is the rtx_code for unsigned integer operations. */
-rtx
-function_expander::expand_signed_unpred_op (rtx_code code_for_sint,
- rtx_code code_for_uint)
-{
- insn_code icode;
- if (type_suffix (0).unsigned_p)
- icode = code_for_aarch64 (code_for_uint, code_for_uint, vector_mode (0));
- else
- icode = code_for_aarch64 (code_for_sint, code_for_sint, vector_mode (0));
- return use_unpred_insn (icode);
-}
-
/* Expand the call and return its lhs. */
rtx
function_expander::expand ()
rtx map_to_rtx_codes (rtx_code, rtx_code, int,
unsigned int = DEFAULT_MERGE_ARGNO);
rtx map_to_unspecs (int, int, int, unsigned int = DEFAULT_MERGE_ARGNO);
- rtx expand_signed_unpred_op (rtx_code, rtx_code);
/* The function call expression. */
tree call_expr;
;; -------------------------------------------------------------------------
;; Unpredicated saturating signed addition and subtraction.
-(define_insn "@aarch64_<su_optab><optab><mode>"
+(define_insn "@aarch64_sve_<optab><mode>"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, w, ?&w, ?&w, w")
(SBINQOPS:SVE_FULL_I
(match_operand:SVE_FULL_I 1 "register_operand" "0, 0, w, w, w")
)
;; Unpredicated saturating unsigned addition and subtraction.
-(define_insn "@aarch64_<su_optab><optab><mode>"
+(define_insn "@aarch64_sve_<optab><mode>"
[(set (match_operand:SVE_FULL_I 0 "register_operand" "=w, ?&w, w")
(UBINQOPS:SVE_FULL_I
(match_operand:SVE_FULL_I 1 "register_operand" "0, w, w")
(mult "mul")
(div "div")
(udiv "udiv")
- (ss_plus "qadd")
- (us_plus "qadd")
- (ss_minus "qsub")
- (us_minus "qsub")
+ (ss_plus "ssadd")
+ (us_plus "usadd")
+ (ss_minus "sssub")
+ (us_minus "ussub")
(ss_neg "qneg")
(ss_abs "qabs")
(smin "smin")
(gtu "gtu")
(abs "abs")])
+(define_code_attr addsub [(ss_plus "add")
+ (us_plus "add")
+ (ss_minus "sub")
+ (us_minus "sub")])
+
;; For comparison operators we use the FCM* and CM* instructions.
;; As there are no CMLE or CMLT instructions which act on 3 vector
;; operands, we must use CMGE or CMGT and swap the order of the