+2018-08-16 Vlad Lazar <vlad.lazar@arm.com>
+
+ * expmed.h (canonicalize_comparison): New declaration.
+ * expmed.c (canonicalize_comparison, equivalent_cmp_code): New function.
+ * expmed.c (emit_store_flag_1): Add call to canonicalize_comparison.
+ * optabs.c (prepare_cmp_insn): Likewise.
+ * rtl.h (unsigned_condition_p): New function which checks if a
+ comparison operator is unsigned.
+
2018-08-16 Nathan Sidwell <nathan@acm.org>
* config/rs6000/rs6000-c.c (rs6000_macro_to_expend): Use cpp_macro_p.
if (mode == VOIDmode)
mode = GET_MODE (op0);
+ if (CONST_SCALAR_INT_P (op1))
+ canonicalize_comparison (mode, &code, &op1);
+
/* For some comparisons with 1 and -1, we can convert this to
comparisons with zero. This will often produce more opportunities for
store-flag insns. */
return target;
}
+
+/* Helper function for canonicalize_cmp_for_target. Swap between inclusive
+ and exclusive ranges in order to create an equivalent comparison. See
+ canonicalize_cmp_for_target for the possible cases. */
+
+static enum rtx_code
+equivalent_cmp_code (enum rtx_code code)
+{
+ switch (code)
+ {
+ case GT:
+ return GE;
+ case GE:
+ return GT;
+ case LT:
+ return LE;
+ case LE:
+ return LT;
+ case GTU:
+ return GEU;
+ case GEU:
+ return GTU;
+ case LTU:
+ return LEU;
+ case LEU:
+ return LTU;
+
+ default:
+ return code;
+ }
+}
+
+/* Choose the more appropiate immediate in scalar integer comparisons. The
+ purpose of this is to end up with an immediate which can be loaded into a
+ register in fewer moves, if possible.
+
+ For each integer comparison there exists an equivalent choice:
+ i) a > b or a >= b + 1
+ ii) a <= b or a < b + 1
+ iii) a >= b or a > b - 1
+ iv) a < b or a <= b - 1
+
+ MODE is the mode of the first operand.
+ CODE points to the comparison code.
+ IMM points to the rtx containing the immediate. *IMM must satisfy
+ CONST_SCALAR_INT_P on entry and continues to satisfy CONST_SCALAR_INT_P
+ on exit. */
+
+void
+canonicalize_comparison (machine_mode mode, enum rtx_code *code, rtx *imm)
+{
+ if (!SCALAR_INT_MODE_P (mode))
+ return;
+
+ int to_add = 0;
+ enum signop sgn = unsigned_condition_p (*code) ? UNSIGNED : SIGNED;
+
+ /* Extract the immediate value from the rtx. */
+ wide_int imm_val = rtx_mode_t (*imm, mode);
+
+ if (*code == GT || *code == GTU || *code == LE || *code == LEU)
+ to_add = 1;
+ else if (*code == GE || *code == GEU || *code == LT || *code == LTU)
+ to_add = -1;
+ else
+ return;
+
+ /* Check for overflow/underflow in the case of signed values and
+ wrapping around in the case of unsigned values. If any occur
+ cancel the optimization. */
+ wi::overflow_type overflow = wi::OVF_NONE;
+ wide_int imm_modif = wi::add (imm_val, to_add, sgn, &overflow);
+ if (overflow)
+ return;
+
+ rtx reg = gen_rtx_REG (mode, LAST_VIRTUAL_REGISTER + 1);
+ rtx new_imm = immed_wide_int_const (imm_modif, mode);
+
+ rtx_insn *old_rtx = gen_move_insn (reg, *imm);
+ rtx_insn *new_rtx = gen_move_insn (reg, new_imm);
+
+ /* Update the immediate and the code. */
+ if (insn_cost (old_rtx, true) > insn_cost (new_rtx, true))
+ {
+ *code = equivalent_cmp_code (*code);
+ *imm = new_imm;
+ }
+}
+
+
\f
/* Perform possibly multi-word comparison and conditional jump to LABEL
if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
gcc_assert (methods == OPTAB_DIRECT || methods == OPTAB_WIDEN
|| methods == OPTAB_LIB_WIDEN);
+ if (CONST_SCALAR_INT_P (y))
+ canonicalize_comparison (mode, &comparison, &y);
+
/* If we are optimizing, force expensive constants into a register. */
if (CONSTANT_P (x) && optimize
&& (rtx_cost (x, mode, COMPARE, 0, optimize_insn_for_speed_p ())