}
)
-(define_insn "*condjump"
+(define_insn "condjump"
[(set (pc) (if_then_else (match_operator 0 "aarch64_comparison_operator"
[(match_operand 1 "cc_register" "") (const_int 0)])
(label_ref (match_operand 2 "" ""))
(const_int 1)))]
)
+;; For a 24-bit immediate CST we can optimize the compare for equality
+;; and branch sequence from:
+;; mov x0, #imm1
+;; movk x0, #imm2, lsl 16 /* x0 contains CST. */
+;; cmp x1, x0
+;; b<ne,eq> .Label
+;; into the shorter:
+;; sub x0, x1, #(CST & 0xfff000)
+;; subs x0, x0, #(CST & 0x000fff)
+;; b<ne,eq> .Label
+(define_insn_and_split "*compare_condjump<mode>"
+ [(set (pc) (if_then_else (EQL
+ (match_operand:GPI 0 "register_operand" "r")
+ (match_operand:GPI 1 "aarch64_imm24" "n"))
+ (label_ref:P (match_operand 2 "" ""))
+ (pc)))]
+ "!aarch64_move_imm (INTVAL (operands[1]), <MODE>mode)
+ && !aarch64_plus_operand (operands[1], <MODE>mode)
+ && !reload_completed"
+ "#"
+ "&& true"
+ [(const_int 0)]
+ {
+ HOST_WIDE_INT lo_imm = UINTVAL (operands[1]) & 0xfff;
+ HOST_WIDE_INT hi_imm = UINTVAL (operands[1]) & 0xfff000;
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_add<mode>3 (tmp, operands[0], GEN_INT (-hi_imm)));
+ emit_insn (gen_add<mode>3_compare0 (tmp, tmp, GEN_INT (-lo_imm)));
+ rtx cc_reg = gen_rtx_REG (CC_NZmode, CC_REGNUM);
+ rtx cmp_rtx = gen_rtx_fmt_ee (<EQL:CMP>, <MODE>mode, cc_reg, const0_rtx);
+ emit_jump_insn (gen_condjump (cmp_rtx, cc_reg, operands[2]));
+ DONE;
+ }
+)
+
(define_expand "casesi"
[(match_operand:SI 0 "register_operand" "") ; Index
(match_operand:SI 1 "const_int_operand" "") ; Lower bound
"
)
-(define_insn "*cstore<mode>_insn"
+(define_insn "aarch64_cstore<mode>"
[(set (match_operand:ALLI 0 "register_operand" "=r")
(match_operator:ALLI 1 "aarch64_comparison_operator"
[(match_operand 2 "cc_register" "") (const_int 0)]))]
[(set_attr "type" "csel")]
)
+;; For a 24-bit immediate CST we can optimize the compare for equality
+;; and branch sequence from:
+;; mov x0, #imm1
+;; movk x0, #imm2, lsl 16 /* x0 contains CST. */
+;; cmp x1, x0
+;; cset x2, <ne,eq>
+;; into the shorter:
+;; sub x0, x1, #(CST & 0xfff000)
+;; subs x0, x0, #(CST & 0x000fff)
+;; cset x2, <ne, eq>.
+(define_insn_and_split "*compare_cstore<mode>_insn"
+ [(set (match_operand:GPI 0 "register_operand" "=r")
+ (EQL:GPI (match_operand:GPI 1 "register_operand" "r")
+ (match_operand:GPI 2 "aarch64_imm24" "n")))]
+ "!aarch64_move_imm (INTVAL (operands[2]), <MODE>mode)
+ && !aarch64_plus_operand (operands[2], <MODE>mode)
+ && !reload_completed"
+ "#"
+ "&& true"
+ [(const_int 0)]
+ {
+ HOST_WIDE_INT lo_imm = UINTVAL (operands[2]) & 0xfff;
+ HOST_WIDE_INT hi_imm = UINTVAL (operands[2]) & 0xfff000;
+ rtx tmp = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_add<mode>3 (tmp, operands[1], GEN_INT (-hi_imm)));
+ emit_insn (gen_add<mode>3_compare0 (tmp, tmp, GEN_INT (-lo_imm)));
+ rtx cc_reg = gen_rtx_REG (CC_NZmode, CC_REGNUM);
+ rtx cmp_rtx = gen_rtx_fmt_ee (<EQL:CMP>, <MODE>mode, cc_reg, const0_rtx);
+ emit_insn (gen_aarch64_cstore<mode> (operands[0], cmp_rtx, cc_reg));
+ DONE;
+ }
+ [(set_attr "type" "csel")]
+)
+
;; zero_extend version of the above
(define_insn "*cstoresi_insn_uxtw"
[(set (match_operand:DI 0 "register_operand" "=r")