-/* Output routines for GCC for ARM/RISCiX.
+/* Output routines for GCC for ARM.
Copyright (C) 1991, 93, 94, 95, 96, 97, 98, 1999 Free Software Foundation, Inc.
Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
and Martin Simmons (@harleqn.co.uk).
- More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+ More major hacks by Richard Earnshaw (rearnsha@arm.com).
This file is part of GNU CC.
/* The maximum number of insns skipped which will be conditionalised if
possible. */
-#define MAX_INSNS_SKIPPED 5
+static int max_insns_skipped = 5;
/* Some function declarations. */
extern FILE *asm_out_file;
| FL_ARCH4)},
{"arm810", PROCESSOR_ARM8, (FL_FAST_MULT | FL_MODE32 | FL_MODE26
| FL_ARCH4)},
+ /* The next two are the same, but arm9 only exists in the thumb variant */
+ {"arm9", PROCESSOR_ARM9, (FL_FAST_MULT | FL_MODE32 | FL_ARCH4
+ | FL_THUMB)},
+ {"arm9tdmi", PROCESSOR_ARM9, (FL_FAST_MULT | FL_MODE32 | FL_ARCH4
+ | FL_THUMB)},
{"strongarm", PROCESSOR_STARM, (FL_FAST_MULT | FL_MODE32 | FL_MODE26
| FL_ARCH4)},
{"strongarm110", PROCESSOR_STARM, (FL_FAST_MULT | FL_MODE32 | FL_MODE26
target_flags &= ~ARM_FLAG_THUMB;
}
- if (TARGET_FPE && arm_fpu != FP_HARD)
- arm_fpu = FP_SOFT2;
+ if (TARGET_FPE && arm_fpu == FP_HARD)
+ arm_fpu = FP_SOFT3;
+
+ /* If optimizing for space, don't synthesize constants */
+ if (optimize_size)
+ arm_constant_limit = 1;
+
+ /* Override a few things based on the tuning pararmeters. */
+ switch (arm_cpu)
+ {
+ case PROCESSOR_ARM2:
+ case PROCESSOR_ARM3:
+ /* For arm2/3 there is no need to do any scheduling if there is
+ only a floating point emulator, or we are doing software
+ floating-point. */
+ if (TARGET_SOFT_FLOAT || arm_fpu != FP_HARD)
+ flag_schedule_insns = flag_schedule_insns_after_reload = 0;
+ break;
+
+ case PROCESSOR_ARM6:
+ case PROCESSOR_ARM7:
+ break;
+
+ case PROCESSOR_ARM8:
+ case PROCESSOR_ARM9:
+ /* For these processors, it never costs more than 2 cycles to load a
+ constant, and the load scheduler may well reduce that to 1. */
+ arm_constant_limit = 1;
+ break;
+
+ case PROCESSOR_STARM:
+ /* Same as above */
+ arm_constant_limit = 1;
+ /* StrongARM has early execution of branches, a sequence that is worth
+ skipping is shorter. */
+ max_insns_skipped = 3;
+ break;
+
+ default:
+ fatal ("Unknown cpu type selected");
+ break;
+ }
- /* For arm2/3 there is no need to do any scheduling if there is only
- a floating point emulator, or we are doing software floating-point. */
- if ((TARGET_SOFT_FLOAT || arm_fpu != FP_HARD) && arm_cpu == PROCESSOR_ARM2)
- flag_schedule_insns = flag_schedule_insns_after_reload = 0;
+ /* If optimizing for size, bump the number of instructions that we
+ are prepared to conditionally execute (even on a StrongARM). */
+ if (optimize_size)
+ max_insns_skipped = 6;
arm_prog_mode = TARGET_APCS_32 ? PROG_MODE_PROG32 : PROG_MODE_PROG26;
}
}
\f
-
/* Return 1 if it is possible to return using a single instruction */
int
-use_return_insn ()
+use_return_insn (iscond)
+ int iscond;
{
int regno;
return 0;
/* Can't be done if interworking with Thumb, and any registers have been
- stacked */
- if (TARGET_THUMB_INTERWORK)
+ stacked. Similarly, on StrongARM, conditional returns are expensive
+ if they aren't taken and registers have been stacked. */
+ if (iscond && arm_cpu == PROCESSOR_STARM && frame_pointer_needed)
+ return 0;
+ else if ((iscond && arm_cpu == PROCESSOR_STARM)
+ || TARGET_THUMB_INTERWORK)
for (regno = 0; regno < 16; regno++)
if (regs_ever_live[regno] && ! call_used_regs[regno])
return 0;
{
rtx i_pat, d_pat;
+ /* XXX This is not strictly true for the FPA. */
+ if (REG_NOTE_KIND(link) == REG_DEP_ANTI
+ || REG_NOTE_KIND(link) == REG_DEP_OUTPUT)
+ return 0;
+
if ((i_pat = single_set (insn)) != NULL
&& GET_CODE (SET_SRC (i_pat)) == MEM
&& (d_pat = single_set (dep)) != NULL
if (unsorted_offsets[order[nops - 1]] == -4)
return 4; /* ldmdb */
+ /* For ARM8,9 & StrongARM, 2 ldr instructions are faster than an ldm if
+ the offset isn't small enough */
+ if (nops == 2
+ && (arm_cpu == PROCESSOR_ARM8 || arm_cpu == PROCESSOR_ARM9
+ || arm_cpu == PROCESSOR_STARM))
+ return 0;
+
/* Can't do it without setting up the offset, only do this if it takes
no more than one insn. */
return (const_ok_for_arm (unsorted_offsets[order[0]])
int volatile_func = (optimize > 0
&& TREE_THIS_VOLATILE (current_function_decl));
- if (use_return_insn() && return_used_this_function)
+ if (use_return_insn (FALSE) && return_used_this_function)
{
if ((frame_size + current_function_outgoing_args_size) != 0
&& !(frame_pointer_needed || TARGET_APCS))
insns are okay, and the label or unconditional branch to the same
label is not too far away, succeed. */
for (insns_skipped = 0;
- !fail && !succeed && insns_skipped++ < MAX_INSNS_SKIPPED;)
+ !fail && !succeed && insns_skipped++ < max_insns_skipped;)
{
rtx scanbody;
this_insn = next_nonnote_insn (this_insn);
if (this_insn && this_insn == label
- && insns_skipped < MAX_INSNS_SKIPPED)
+ && insns_skipped < max_insns_skipped)
{
if (jump_clobbers)
{
else if (GET_CODE (SET_SRC (scanbody)) == IF_THEN_ELSE)
fail = TRUE;
}
+ /* Fail if a conditional return is undesirable (eg on a
+ StrongARM), but still allow this if optimizing for size. */
+ else if (GET_CODE (scanbody) == RETURN
+ && ! use_return_insn (TRUE)
+ && ! optimize_size)
+ fail = TRUE;
else if (GET_CODE (scanbody) == RETURN
&& seeking_return)
{
-;;- Machine description for Advanced RISC Machines' ARM for GNU compiler
+;;- Machine description for ARM for GNU compiler
;; Copyright (C) 1991, 93-98, 1999 Free Software Foundation, Inc.
;; Contributed by Pieter `Tiggr' Schoenmakers (rcpieter@win.tue.nl)
;; and Martin Simmons (@harleqn.co.uk).
-;; More major hacks by Richard Earnshaw (rwe11@cl.cam.ac.uk)
+;; More major hacks by Richard Earnshaw (rearnsha@arm.com).
;; This file is part of GNU CC.
; by the -mapcs-{32,26} flag, and possibly the -mcpu=... option.
(define_attr "prog_mode" "prog26,prog32" (const (symbol_ref "arm_prog_mode")))
-; CPU attribute is used to determine whether condition codes are clobbered
-; by a call insn: on the arm6 they are if in 32-bit addressing mode; on the
-; arm2 and arm3 the condition codes are restored by the return.
-
-(define_attr "cpu" "arm2,arm3,arm6,arm7,arm8,st_arm"
+; CPU attribute is used to determine the best instruction mix for performance
+; on the named processor.
+(define_attr "cpu" "arm2,arm3,arm6,arm7,arm8,arm9,st_arm"
(const (symbol_ref "arm_cpu_attr")))
; Floating Point Unit. If we only have floating point emulation, then there
; Load scheduling, set from the cpu characteristic
(define_attr "ldsched" "no,yes"
- (if_then_else (eq_attr "cpu" "arm8,st_arm")
+ (if_then_else (eq_attr "cpu" "arm8,arm9,st_arm")
(const_string "yes")
(const_string "no")))
(const_string "clob") (const_string "nocond"))
(const_string "nocond")))
+; Only model the write buffer for ARM6 and ARM7. Earlier processors don't
+; have one. Later ones, such as StrongARM, have write-back caches, so don't
+; suffer blockages enough to warrent modelling this (and it can adversely
+; affect the schedule).
+(define_attr "model_wbuf" "no,yes"
+ (if_then_else (eq_attr "cpu" "arm6,arm7")
+ (const_string "yes")
+ (const_string "no")))
+
(define_attr "write_conflict" "no,yes"
(if_then_else (eq_attr "type"
"block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load")
(define_function_unit "fpa_mem" 1 0 (and (eq_attr "fpu" "fpa")
(eq_attr "type" "f_load")) 3 1)
-(define_function_unit "write_buf" 1 2 (eq_attr "type" "store1") 5 3)
-(define_function_unit "write_buf" 1 2 (eq_attr "type" "store2") 7 4)
-(define_function_unit "write_buf" 1 2 (eq_attr "type" "store3") 9 5)
-(define_function_unit "write_buf" 1 2 (eq_attr "type" "store4") 11 6)
-(define_function_unit "write_buf" 1 2 (eq_attr "type" "r_mem_f") 5 3)
-
-;; The write_blockage unit models (partially), the fact that writes will stall
+;;--------------------------------------------------------------------
+;; Write buffer
+;;--------------------------------------------------------------------
+;; Strictly we should model a 4-deep write buffer for ARM7xx based chips
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1,r_mem_f")) 5 3)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 4)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 5)
+(define_function_unit "write_buf" 1 2
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store4")) 11 6)
+
+;;--------------------------------------------------------------------
+;; Write blockage unit
+;;--------------------------------------------------------------------
+;; The write_blockage unit models (partially), the fact that reads will stall
;; until the write buffer empties.
-
-(define_function_unit "write_blockage" 1 0 (eq_attr "type" "store1") 5 5
- [(eq_attr "write_conflict" "yes")])
-(define_function_unit "write_blockage" 1 0 (eq_attr "type" "store2") 7 7
+;; The f_mem_r and r_mem_f could also block, but they are to the stack,
+;; so we don't model them here
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store1")) 5 5
[(eq_attr "write_conflict" "yes")])
-(define_function_unit "write_blockage" 1 0 (eq_attr "type" "store3") 9 9
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store2")) 7 7
[(eq_attr "write_conflict" "yes")])
-(define_function_unit "write_blockage" 1 0 (eq_attr "type" "store4") 11 11
+(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "type" "store3")) 9 9
[(eq_attr "write_conflict" "yes")])
-(define_function_unit "write_blockage" 1 0 (eq_attr "type" "r_mem_f") 5 5
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes") (eq_attr "type" "store4")) 11 11
[(eq_attr "write_conflict" "yes")])
-(define_function_unit "write_blockage" 1 0
- (eq_attr "write_conflict" "yes") 1 1)
-
+(define_function_unit "write_blockage" 1 0
+ (and (eq_attr "model_wbuf" "yes")
+ (eq_attr "write_conflict" "yes")) 1 1)
+
+;;--------------------------------------------------------------------
+;; Core unit
+;;--------------------------------------------------------------------
+;; Everything must spend at least one cycle in the core unit
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "store1")) 1 1)
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
-(define_function_unit "core" 1 1 (eq_attr "core_cycles" "single") 1 1)
+(define_function_unit "core" 1 0
+ (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load,store1")) 2 2)
-(define_function_unit "core" 1 1
- (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 1 1)
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_load")) 3 3)
-(define_function_unit "core" 1 1
- (and (eq_attr "ldsched" "!yes") (eq_attr "type" "load")) 2 2)
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_store")) 4 4)
-(define_function_unit "core" 1 1 (eq_attr "type" "mult") 16 16)
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "r_mem_f")) 6 6)
-(define_function_unit "core" 1 1
- (and (eq_attr "ldsched" "yes") (eq_attr "type" "store1")) 1 1)
+(define_function_unit "core" 1 0
+ (and (eq_attr "fpu" "fpa") (eq_attr "type" "f_mem_r")) 7 7)
-(define_function_unit "core" 1 1
- (and (eq_attr "ldsched" "!yes") (eq_attr "type" "store1")) 2 2)
+(define_function_unit "core" 1 0
+ (and (eq_attr "cpu" "!arm8,st_arm") (eq_attr "type" "mult")) 16 16)
-(define_function_unit "core" 1 1 (eq_attr "type" "store2") 3 3)
+(define_function_unit "core" 1 0
+ (and (eq_attr "cpu" "arm8") (eq_attr "type" "mult")) 4 4)
-(define_function_unit "core" 1 1 (eq_attr "type" "store3") 4 4)
+(define_function_unit "core" 1 0
+ (and (eq_attr "cpu" "st_arm") (eq_attr "type" "mult")) 3 2)
-(define_function_unit "core" 1 1 (eq_attr "type" "store4") 5 5)
+(define_function_unit "core" 1 0 (eq_attr "type" "store2") 3 3)
-(define_function_unit "core" 1 1
- (and (eq_attr "core_cycles" "multi")
- (eq_attr "type" "!mult,load,store2,store3,store4")) 32 32)
-
-(define_function_unit "loader" 1 0
- (and (eq_attr "ldsched" "yes") (eq_attr "type" "load")) 2 1)
+(define_function_unit "core" 1 0 (eq_attr "type" "store3") 4 4)
+(define_function_unit "core" 1 0 (eq_attr "type" "store4") 5 5)
\f
;; Note: For DImode insns, there is normally no reason why operands should
;; not be in the same register, what we don't want is for something being
[(set_attr "type" "*,*,load,store1")])
(define_insn "*movhi_insn_littleend"
- [(set (match_operand:HI 0 "general_operand" "=r,r,r")
+ [(set (match_operand:HI 0 "s_register_operand" "=r,r,r")
(match_operand:HI 1 "general_operand" "rI,K,m"))]
"! arm_arch4
&& ! BYTES_BIG_ENDIAN
;; Often the return insn will be the same as loading from memory, so set attr
(define_insn "return"
[(return)]
- "USE_RETURN_INSN"
+ "USE_RETURN_INSN(FALSE)"
"*
{
extern int arm_ccfsm_state;
[(match_operand 1 "cc_register" "") (const_int 0)])
(return)
(pc)))]
- "USE_RETURN_INSN"
+ "USE_RETURN_INSN(TRUE)"
"*
{
extern int arm_ccfsm_state;
[(match_operand 1 "cc_register" "") (const_int 0)])
(pc)
(return)))]
- "USE_RETURN_INSN"
+ "USE_RETURN_INSN(TRUE)"
"*
{
extern int arm_ccfsm_state;
(plus:SI
(match_operand:SI 2 "s_register_operand" "r,r")
(match_operand:SI 3 "arm_add_operand" "rIL,rIL"))
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")))
(clobber (reg:CC 24))]
""
"#"
(set_attr "length" "8,12")])
(define_insn "*if_plus_move"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r")
(if_then_else:SI
(match_operator 4 "comparison_operator"
[(match_operand 5 "cc_register" "") (const_int 0)])
(plus:SI
- (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
- (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))
- (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")))]
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L"))
+ (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI")))]
""
"@
add%d4\\t%0, %2, %3
sub%d4\\t%0, %2, #%n3
add%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
- sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1
- add%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1
- sub%d4\\t%0, %2, #%n3\;ldr%D4\\t%0, %1"
+ sub%d4\\t%0, %2, #%n3\;mov%D4\\t%0, %1"
[(set_attr "conds" "use")
- (set_attr "length" "4,4,8,8,8,8")
- (set_attr "type" "*,*,*,*,load,load")])
+ (set_attr "length" "4,4,8,8")
+ (set_attr "type" "*,*,*,*")])
(define_insn "*ifcompare_move_plus"
[(set (match_operand:SI 0 "s_register_operand" "=r,r")
(if_then_else:SI (match_operator 6 "comparison_operator"
[(match_operand:SI 4 "s_register_operand" "r,r")
(match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
(plus:SI
(match_operand:SI 2 "s_register_operand" "r,r")
(match_operand:SI 3 "arm_add_operand" "rIL,rIL"))))
(set_attr "length" "8,12")])
(define_insn "*if_move_plus"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r")
(if_then_else:SI
(match_operator 4 "comparison_operator"
[(match_operand 5 "cc_register" "") (const_int 0)])
- (match_operand:SI 1 "arm_rhsm_operand" "0,0,?rI,?rI,m,m")
+ (match_operand:SI 1 "arm_rhs_operand" "0,0,?rI,?rI")
(plus:SI
- (match_operand:SI 2 "s_register_operand" "r,r,r,r,r,r")
- (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L,rI,L"))))]
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 3 "arm_add_operand" "rI,L,rI,L"))))]
""
"@
add%D4\\t%0, %2, %3
sub%D4\\t%0, %2, #%n3
add%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
- sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1
- add%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1
- sub%D4\\t%0, %2, #%n3\;ldr%d4\\t%0, %1"
+ sub%D4\\t%0, %2, #%n3\;mov%d4\\t%0, %1"
[(set_attr "conds" "use")
- (set_attr "length" "4,4,8,8,8,8")
- (set_attr "type" "*,*,*,*,load,load")])
+ (set_attr "length" "4,4,8,8")
+ (set_attr "type" "*,*,*,*")])
(define_insn "*ifcompare_arith_arith"
[(set (match_operand:SI 0 "s_register_operand" "=r")
(match_operator:SI 7 "shiftable_operator"
[(match_operand:SI 4 "s_register_operand" "r,r")
(match_operand:SI 5 "arm_rhs_operand" "rI,rI")])
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")))
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")))
(clobber (reg:CC 24))]
""
"*
output_asm_insn (\"cmp\\t%2, %3\", operands);
output_asm_insn (\"%I7%d6\\t%0, %4, %5\", operands);
if (which_alternative != 0)
- {
- if (GET_CODE (operands[1]) == MEM)
- return \"ldr%D6\\t%0, %1\";
- else
- return \"mov%D6\\t%0, %1\";
- }
+ return \"mov%D6\\t%0, %1\";
return \"\";
"
[(set_attr "conds" "clob")
(set_attr "length" "8,12")])
(define_insn "*if_arith_move"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
(if_then_else:SI (match_operator 4 "comparison_operator"
[(match_operand 6 "cc_register" "") (const_int 0)])
(match_operator:SI 5 "shiftable_operator"
- [(match_operand:SI 2 "s_register_operand" "r,r,r")
- (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")))]
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")))]
""
"@
%I5%d4\\t%0, %2, %3
- %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1
- %I5%d4\\t%0, %2, %3\;ldr%D4\\t%0, %1"
+ %I5%d4\\t%0, %2, %3\;mov%D4\\t%0, %1"
[(set_attr "conds" "use")
- (set_attr "length" "4,8,8")
- (set_attr "type" "*,*,load")])
+ (set_attr "length" "4,8")
+ (set_attr "type" "*,*")])
(define_insn "*ifcompare_move_arith"
[(set (match_operand:SI 0 "s_register_operand" "=r,r")
(if_then_else:SI (match_operator 6 "comparison_operator"
[(match_operand:SI 4 "s_register_operand" "r,r")
(match_operand:SI 5 "arm_add_operand" "rIL,rIL")])
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rIm")
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
(match_operator:SI 7 "shiftable_operator"
[(match_operand:SI 2 "s_register_operand" "r,r")
(match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))
output_asm_insn (\"cmp\\t%4, %5\", operands);
if (which_alternative != 0)
- {
- if (GET_CODE (operands[1]) == MEM)
- output_asm_insn (\"ldr%d6\\t%0, %1\", operands);
- else
- output_asm_insn (\"mov%d6\\t%0, %1\", operands);
- }
+ output_asm_insn (\"mov%d6\\t%0, %1\", operands);
return \"%I7%D6\\t%0, %2, %3\";
"
[(set_attr "conds" "clob")
(set_attr "length" "8,12")])
(define_insn "*if_move_arith"
- [(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
+ [(set (match_operand:SI 0 "s_register_operand" "=r,r")
(if_then_else:SI
(match_operator 4 "comparison_operator"
[(match_operand 6 "cc_register" "") (const_int 0)])
- (match_operand:SI 1 "arm_rhsm_operand" "0,?rI,m")
+ (match_operand:SI 1 "arm_rhs_operand" "0,?rI")
(match_operator:SI 5 "shiftable_operator"
- [(match_operand:SI 2 "s_register_operand" "r,r,r")
- (match_operand:SI 3 "arm_rhs_operand" "rI,rI,rI")])))]
+ [(match_operand:SI 2 "s_register_operand" "r,r")
+ (match_operand:SI 3 "arm_rhs_operand" "rI,rI")])))]
""
"@
%I5%D4\\t%0, %2, %3
- %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1
- %I5%D4\\t%0, %2, %3\;ldr%d4\\t%0, %1"
+ %I5%D4\\t%0, %2, %3\;mov%d4\\t%0, %1"
[(set_attr "conds" "use")
- (set_attr "length" "4,8,8")
- (set_attr "type" "*,*,load")])
+ (set_attr "length" "4,8")
+ (set_attr "type" "*,*")])
(define_insn "*ifcompare_move_not"
[(set (match_operand:SI 0 "s_register_operand" "=r,r")
(match_operand:SI 1 "general_operand" "g"))
(clobber (reg:SI 14))])
(return)]
- "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN
+ "(GET_CODE (operands[0]) == SYMBOL_REF && USE_RETURN_INSN(FALSE)
&& !get_frame_size () && !current_function_calls_alloca
&& !frame_pointer_needed && !current_function_args_size)"
"*
(match_operand:SI 2 "general_operand" "g")))
(clobber (reg:SI 14))])
(return)]
- "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN(FALSE)
&& !get_frame_size () && !current_function_calls_alloca
&& !frame_pointer_needed && !current_function_args_size)"
"*
(clobber (reg:SI 14))])
(use (match_dup 0))
(return)]
- "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN
+ "(GET_CODE (operands[1]) == SYMBOL_REF && USE_RETURN_INSN(FALSE)
&& !get_frame_size () && !current_function_calls_alloca
&& !frame_pointer_needed && !current_function_args_size)"
"*