+2019-11-18 Richard Sandiford <richard.sandiford@arm.com>
+
+ * cse.c (cse_insn): Delete no-op register moves too.
+ * simplify-rtx.c (comparison_to_mask): Handle unsigned comparisons.
+ Take a second comparison to control the value for NE.
+ (mask_to_comparison): Handle unsigned comparisons.
+ (simplify_logical_relational_operation): Likewise. Update call
+ to comparison_to_mask. Handle AND if !HONOR_NANs.
+ (simplify_binary_operation_1): Call the above for AND too.
+
2019-11-18 Richard Sandiford <richard.sandiford@arm.com>
* fold-const.c (native_encode_vector): Turn into a wrapper function,
for (i = 0; i < n_sets; i++)
{
bool repeat = false;
- bool mem_noop_insn = false;
+ bool noop_insn = false;
rtx src, dest;
rtx src_folded;
struct table_elt *elt = 0, *p;
}
/* Similarly, lots of targets don't allow no-op
- (set (mem x) (mem x)) moves. */
+ (set (mem x) (mem x)) moves. Even (set (reg x) (reg x))
+ might be impossible for certain registers (like CC registers). */
else if (n_sets == 1
- && MEM_P (trial)
- && MEM_P (dest)
+ && (MEM_P (trial) || REG_P (trial))
&& rtx_equal_p (trial, dest)
&& !side_effects_p (dest)
&& (cfun->can_delete_dead_exceptions
|| insn_nothrow_p (insn)))
{
SET_SRC (sets[i].rtl) = trial;
- mem_noop_insn = true;
+ noop_insn = true;
break;
}
sets[i].rtl = 0;
}
- /* Similarly for no-op MEM moves. */
- else if (mem_noop_insn)
+ /* Similarly for no-op moves. */
+ else if (noop_insn)
{
if (cfun->can_throw_non_call_exceptions && can_throw_internal (insn))
cse_cfg_altered = true;
return 0;
}
-/* Return a mask describing the COMPARISON. */
+/* Return a mask describing the COMPARISON. Treat NE as unsigned
+ if OTHER_COMPARISON is. */
static int
-comparison_to_mask (enum rtx_code comparison)
+comparison_to_mask (rtx_code comparison, rtx_code other_comparison)
{
switch (comparison)
{
+ case LTU:
+ return 32;
+ case GTU:
+ return 16;
case LT:
return 8;
case GT:
case UNORDERED:
return 1;
+ case LEU:
+ return 34;
+ case GEU:
+ return 18;
case LTGT:
return 12;
case LE:
case ORDERED:
return 14;
case NE:
- return 13;
+ return (other_comparison == LTU
+ || other_comparison == LEU
+ || other_comparison == GTU
+ || other_comparison == GEU ? 48 : 13);
case UNLE:
return 11;
case UNGE:
{
switch (mask)
{
+ case 32:
+ return LTU;
+ case 16:
+ return GTU;
case 8:
return LT;
case 4:
case 1:
return UNORDERED;
+ case 34:
+ return LEU;
+ case 18:
+ return GEU;
case 12:
return LTGT;
case 10:
case 14:
return ORDERED;
+ case 48:
case 13:
return NE;
case 11:
simplify_logical_relational_operation (enum rtx_code code, machine_mode mode,
rtx op0, rtx op1)
{
- /* We only handle IOR of two relational operations. */
- if (code != IOR)
+ /* We only handle AND if we can ignore unordered cases. */
+ bool honor_nans_p = HONOR_NANS (GET_MODE (op0));
+ if (code != IOR && (code != AND || honor_nans_p))
return 0;
if (!(COMPARISON_P (op0) && COMPARISON_P (op1)))
enum rtx_code code0 = GET_CODE (op0);
enum rtx_code code1 = GET_CODE (op1);
- /* We don't handle unsigned comparisons currently. */
- if (code0 == LTU || code0 == GTU || code0 == LEU || code0 == GEU)
- return 0;
- if (code1 == LTU || code1 == GTU || code1 == LEU || code1 == GEU)
- return 0;
+ int mask0 = comparison_to_mask (code0, code1);
+ int mask1 = comparison_to_mask (code1, code0);
- int mask0 = comparison_to_mask (code0);
- int mask1 = comparison_to_mask (code1);
+ /* Reject combinations of signed and unsigned comparisons,
+ with ORDERED being signed. */
+ if (((mask0 & 13) && (mask1 & 48)) || ((mask1 & 13) && (mask0 & 48)))
+ return NULL_RTX;
- int mask = mask0 | mask1;
+ int mask = (code == IOR ? mask0 | mask1 : mask0 & mask1);
- if (mask == 15)
+ if (mask == 0)
+ return const0_rtx;
+
+ if (mask == 50 || mask == 15)
return const_true_rtx;
code = mask_to_comparison (mask);
return tem;
tem = simplify_associative_operation (code, mode, op0, op1);
+ if (tem)
+ return tem;
+
+ tem = simplify_logical_relational_operation (code, mode, op0, op1);
if (tem)
return tem;
break;
+2019-11-18 Richard Sandiford <richard.sandiford@arm.com>
+
+ * gcc.target/aarch64/sve/acle/asm/ptest_pmore.c: New test.
+
2019-11-18 Richard Sandiford <richard.sandiford@arm.com>
* gcc.target/aarch64/sve/acle/general/temporaries_1.c: New test.
--- /dev/null
+/* { dg-additional-options "-msve-vector-bits=scalable" } */
+/* { dg-final { check-function-bodies "**" "" "-DCHECK_ASM" } } */
+
+#include "test_sve_acle.h"
+#include <stdbool.h>
+
+/*
+** test_bool_pmore:
+** ptest p0, p1\.b
+** cset [wx]0, pmore
+** ret
+*/
+TEST_PTEST (test_bool_pmore, bool,
+ x0 = svptest_any (p0, p1) & !svptest_last (p0, p1));
+
+/*
+** test_bool_plast:
+** ptest p0, p1\.b
+** cset [wx]0, plast
+** ret
+*/
+TEST_PTEST (test_bool_plast, bool,
+ x0 = !svptest_any (p0, p1) | svptest_last (p0, p1));
+
+/*
+** test_int_pmore:
+** ptest p0, p1\.b
+** cset [wx]0, pmore
+** ret
+*/
+TEST_PTEST (test_int_pmore, int,
+ x0 = svptest_any (p0, p1) & !svptest_last (p0, p1));
+
+/*
+** test_int_plast:
+** ptest p0, p1\.b
+** cset [wx]0, plast
+** ret
+*/
+TEST_PTEST (test_int_plast, int,
+ x0 = !svptest_any (p0, p1) | svptest_last (p0, p1));
+
+/*
+** test_int64_t_pmore:
+** ptest p0, p1\.b
+** cset [wx]0, pmore
+** ret
+*/
+TEST_PTEST (test_int64_t_pmore, int64_t,
+ x0 = svptest_any (p0, p1) & !svptest_last (p0, p1));
+
+/*
+** test_int64_t_plast:
+** ptest p0, p1\.b
+** cset [wx]0, plast
+** ret
+*/
+TEST_PTEST (test_int64_t_plast, int64_t,
+ x0 = !svptest_any (p0, p1) | svptest_last (p0, p1));
+
+/*
+** sel_pmore:
+** ptest p0, p1\.b
+** csel x0, (x0, x1, pmore|x1, x0, plast)
+** ret
+*/
+TEST_PTEST (sel_pmore, int64_t,
+ x0 = svptest_any (p0, p1) & !svptest_last (p0, p1) ? x0 : x1);
+
+/*
+** sel_plast:
+** ptest p0, p1\.b
+** csel x0, (x0, x1, plast|x1, x0, pmore)
+** ret
+*/
+TEST_PTEST (sel_plast, int64_t,
+ x0 = !svptest_any (p0, p1) | svptest_last (p0, p1) ? x0 : x1);