return get_attr_type (insn);
}
-/* Return true if insn sets the condition codes. */
-
-bool
-arc_sets_cc_p (rtx_insn *insn)
-{
- if (NONJUMP_INSN_P (insn))
- if (rtx_sequence *seq = dyn_cast <rtx_sequence *> (PATTERN (insn)))
- insn = seq->insn (seq->len () - 1);
- return arc_attr_type (insn) == TYPE_COMPARE;
-}
-
-/* Return true if INSN is an instruction with a delay slot we may want
- to fill. */
-
-bool
-arc_need_delay (rtx_insn *insn)
-{
- rtx_insn *next;
-
- if (!flag_delayed_branch)
- return false;
- /* The return at the end of a function needs a delay slot. */
- if (NONJUMP_INSN_P (insn) && GET_CODE (PATTERN (insn)) == USE
- && (!(next = next_active_insn (insn))
- || ((!NONJUMP_INSN_P (next) || GET_CODE (PATTERN (next)) != SEQUENCE)
- && arc_attr_type (next) == TYPE_RETURN))
- && (!TARGET_PAD_RETURN
- || (prev_active_insn (insn)
- && prev_active_insn (prev_active_insn (insn))
- && prev_active_insn (prev_active_insn (prev_active_insn (insn))))))
- return true;
- if (NONJUMP_INSN_P (insn)
- ? (GET_CODE (PATTERN (insn)) == USE
- || GET_CODE (PATTERN (insn)) == CLOBBER
- || GET_CODE (PATTERN (insn)) == SEQUENCE)
- : JUMP_P (insn)
- ? (GET_CODE (PATTERN (insn)) == ADDR_VEC
- || GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC)
- : !CALL_P (insn))
- return false;
- return num_delay_slots (insn) != 0;
-}
-
-/* Return true if the scheduling pass(es) has/have already run,
- i.e. where possible, we should try to mitigate high latencies
- by different instruction selection. */
-
-bool
-arc_scheduling_not_expected (void)
-{
- return cfun->machine->arc_reorg_started;
-}
-
/* Code has a minimum p2 alignment of 1, which we must restore after
an ADDR_DIFF_VEC. */
(set_attr "type" "compare")
(set_attr "length" "4,4,8")])
-; w/c/c comes first (rather than w/0/C_0) to prevent the middle-end
-; needlessly prioritizing the matching constraint.
-; Rcw/0/C_0 comes before w/c/L so that the lower latency conditional
-; execution is used where possible.
-(define_insn_and_split "adc"
- [(set (match_operand:SI 0 "dest_reg_operand" "=w,Rcw,w,Rcw,w")
- (plus:SI (plus:SI (ltu:SI (reg:CC_C CC_REG) (const_int 0))
- (match_operand:SI 1 "nonmemory_operand"
- "%c,0,c,0,cCal"))
- (match_operand:SI 2 "nonmemory_operand" "c,C_0,L,I,cCal")))]
+(define_insn "adc"
+ [(set (match_operand:SI 0 "register_operand" "=r, r,r,r, r,r")
+ (plus:SI
+ (plus:SI
+ (ltu:SI (reg:CC_C CC_REG) (const_int 0))
+ (match_operand:SI 1 "nonmemory_operand" "%r, 0,r,0,Cal,r"))
+ (match_operand:SI 2 "nonmemory_operand" "r,C_0,L,I, r,Cal")))]
"register_operand (operands[1], SImode)
|| register_operand (operands[2], SImode)"
"@
- adc %0,%1,%2
- add.cs %0,%1,1
- adc %0,%1,%2
- adc %0,%1,%2
- adc %0,%1,%2"
- ; if we have a bad schedule after sched2, split.
- "reload_completed
- && !optimize_size && (!TARGET_ARC600_FAMILY)
- && arc_scheduling_not_expected ()
- && arc_sets_cc_p (prev_nonnote_insn (insn))
- /* If next comes a return or other insn that needs a delay slot,
- expect the adc to get into the delay slot. */
- && next_nonnote_insn (insn)
- && !arc_need_delay (next_nonnote_insn (insn))
- /* Restore operands before emitting. */
- && (extract_insn_cached (insn), 1)"
- [(set (match_dup 0) (match_dup 3))
- (cond_exec
- (ltu (reg:CC_C CC_REG) (const_int 0))
- (set (match_dup 0) (plus:SI (match_dup 0) (const_int 1))))]
- "operands[3] = simplify_gen_binary (PLUS, SImode, operands[1], operands[2]);"
+ adc\\t%0,%1,%2
+ add.cs\\t%0,%1,1
+ adc\\t%0,%1,%2
+ adc\\t%0,%1,%2
+ adc\\t%0,%1,%2
+ adc\\t%0,%1,%2"
[(set_attr "cond" "use")
(set_attr "type" "cc_arith")
- (set_attr "length" "4,4,4,4,8")])
+ (set_attr "length" "4,4,4,4,8,8")])
; combiner-splitter cmp / scc -> cmp / adc
(define_split
DONE;
}
emit_insn (gen_sub_f (l0, l1, l2));
- emit_insn (gen_sbc (h0, h1, h2, gen_rtx_REG (CCmode, CC_REG)));
+ emit_insn (gen_sbc (h0, h1, h2));
DONE;
")
(set_attr "type" "cc_arith")
(set_attr "length" "4")])
-; w/c/c comes first (rather than Rcw/0/C_0) to prevent the middle-end
-; needlessly prioritizing the matching constraint.
-; Rcw/0/C_0 comes before w/c/L so that the lower latency conditional execution
-; is used where possible.
-(define_insn_and_split "sbc"
- [(set (match_operand:SI 0 "dest_reg_operand" "=w,Rcw,w,Rcw,w")
- (minus:SI (minus:SI (match_operand:SI 1 "nonmemory_operand"
- "c,0,c,0,cCal")
- (ltu:SI (match_operand:CC_C 3 "cc_use_register")
- (const_int 0)))
- (match_operand:SI 2 "nonmemory_operand" "c,C_0,L,I,cCal")))]
+(define_insn "sbc"
+ [(set (match_operand:SI 0 "dest_reg_operand" "=r,r,r,r,r,r")
+ (minus:SI
+ (minus:SI
+ (match_operand:SI 1 "nonmemory_operand" "r, 0,r,0, r,Cal")
+ (ltu:SI (reg:CC_C CC_REG) (const_int 0)))
+ (match_operand:SI 2 "nonmemory_operand" "r,C_0,L,I,Cal,r")))]
"register_operand (operands[1], SImode)
|| register_operand (operands[2], SImode)"
"@
- sbc %0,%1,%2
- sub.cs %0,%1,1
- sbc %0,%1,%2
- sbc %0,%1,%2
- sbc %0,%1,%2"
- ; if we have a bad schedule after sched2, split.
- "reload_completed
- && !optimize_size && (!TARGET_ARC600_FAMILY)
- && arc_scheduling_not_expected ()
- && arc_sets_cc_p (prev_nonnote_insn (insn))
- /* If next comes a return or other insn that needs a delay slot,
- expect the adc to get into the delay slot. */
- && next_nonnote_insn (insn)
- && !arc_need_delay (next_nonnote_insn (insn))
- /* Restore operands before emitting. */
- && (extract_insn_cached (insn), 1)"
- [(set (match_dup 0) (match_dup 4))
- (cond_exec
- (ltu (reg:CC_C CC_REG) (const_int 0))
- (set (match_dup 0) (plus:SI (match_dup 0) (const_int -1))))]
- "operands[4] = simplify_gen_binary (MINUS, SImode, operands[1], operands[2]);"
+ sbc\\t%0,%1,%2
+ sub.cs\\t%0,%1,1
+ sbc\\t%0,%1,%2
+ sbc\\t%0,%1,%2
+ sbc\\t%0,%1,%2
+ sbc\\t%0,%1,%2"
[(set_attr "cond" "use")
(set_attr "type" "cc_arith")
- (set_attr "length" "4,4,4,4,8")])
+ (set_attr "length" "4,4,4,4,8,8")])
(define_insn "sub_f"
[(set (reg:CC CC_REG)