while (value)
{
- value &= ~(value & - value);
- ++ count;
+ value &= ~(value & -value);
+ ++count;
}
return count;
{
const struct processors * sel;
- for (sel = ptr->processors; sel->name != NULL; sel ++)
+ for (sel = ptr->processors; sel->name != NULL; sel++)
if (streq (ptr->string, sel->name))
{
if (i == 2)
struct cpu_default * def;
/* Find the default. */
- for (def = cpu_defaults; def->name; def ++)
+ for (def = cpu_defaults; def->name; def++)
if (def->cpu == TARGET_CPU_DEFAULT)
break;
abort ();
/* Find the default CPU's flags. */
- for (sel = all_cores; sel->name != NULL; sel ++)
+ for (sel = all_cores; sel->name != NULL; sel++)
if (streq (def->name, sel->name))
break;
interworking. Therefore we force FL_MODE26 to be removed
from insn_flags here (if it was set), so that the search
below will always be able to find a compatible processor. */
- insn_flags &= ~ FL_MODE26;
+ insn_flags &= ~FL_MODE26;
}
- else if (! TARGET_APCS_32)
+ else if (!TARGET_APCS_32)
sought |= FL_MODE26;
if (sought != 0 && ((sought & insn_flags) != sought))
/* Try to locate a CPU type that supports all of the abilities
of the default CPU, plus the extra abilities requested by
the user. */
- for (sel = all_cores; sel->name != NULL; sel ++)
+ for (sel = all_cores; sel->name != NULL; sel++)
if ((sel->flags & sought) == (sought | insn_flags))
break;
options. Instead if we cannot find a cpu that has both the
characteristics of the default cpu and the given command line
options we scan the array again looking for a best match. */
- for (sel = all_cores; sel->name != NULL; sel ++)
+ for (sel = all_cores; sel->name != NULL; sel++)
if ((sel->flags & sought) == sought)
{
unsigned int count;
"-mapcs-32 -mcpu=arm2" then we loose here. */
if ((TARGET_DEFAULT & ARM_FLAG_APCS_32) == 0)
warning ("target CPU does not support APCS-32" );
- target_flags &= ~ ARM_FLAG_APCS_32;
+ target_flags &= ~ARM_FLAG_APCS_32;
}
- else if (! TARGET_APCS_32 && !(insn_flags & FL_MODE26))
+ else if (!TARGET_APCS_32 && !(insn_flags & FL_MODE26))
{
warning ("target CPU does not support APCS-26" );
target_flags |= ARM_FLAG_APCS_32;
/* If interworking is enabled then APCS-32 must be selected as well. */
if (TARGET_INTERWORK)
{
- if (! TARGET_APCS_32)
+ if (!TARGET_APCS_32)
warning ("interworking forces APCS-32 to be used" );
target_flags |= ARM_FLAG_APCS_32;
}
- if (TARGET_APCS_STACK && ! TARGET_APCS_FRAME)
+ if (TARGET_APCS_STACK && !TARGET_APCS_FRAME)
{
warning ("-mapcs-stack-check incompatible with -mno-apcs-frame");
target_flags |= ARM_FLAG_APCS_FRAME;
are turned off and debugging is turned on. */
if (TARGET_ARM
&& write_symbols != NO_DEBUG
- && ! TARGET_APCS_FRAME
+ && !TARGET_APCS_FRAME
&& (TARGET_DEFAULT & ARM_FLAG_APCS_FRAME))
warning ("-g with -mno-apcs-frame may not give sensible debugging");
/* If stack checking is disabled, we can use r10 as the PIC register,
which keeps r9 available. */
- if (flag_pic && ! TARGET_APCS_STACK)
+ if (flag_pic && !TARGET_APCS_STACK)
arm_pic_register = 10;
if (TARGET_APCS_FLOAT)
{
int pic_register;
- if (! flag_pic)
+ if (!flag_pic)
warning ("-mpic-register= is useless without -fpic");
pic_register = decode_reg_name (arm_pic_register_string);
int regno;
/* Never use a return instruction before reload has run. */
- if (! reload_completed
+ if (!reload_completed
/* Or if the function is variadic. */
|| current_function_pretend_args_size
|| current_function_anonymous_args
|| cfun->machine->eh_epilogue_sp_ofs != NULL
/* Or if there is no frame pointer and there is a stack adjustment. */
|| ((get_frame_size () + current_function_outgoing_args_size != 0)
- && ! frame_pointer_needed))
+ && !frame_pointer_needed))
return 0;
/* Can't be done if interworking with Thumb, and any registers have been
|| TARGET_INTERWORK)
{
for (regno = 0; regno <= LAST_ARM_REGNUM; regno++)
- if (regs_ever_live[regno] && ! call_used_regs[regno])
+ if (regs_ever_live[regno] && !call_used_regs[regno])
return 0;
if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
requires an insn. */
if (TARGET_HARD_FLOAT)
for (regno = FIRST_ARM_FP_REGNUM; regno <= LAST_ARM_FP_REGNUM; regno++)
- if (regs_ever_live[regno] && ! call_used_regs[regno])
+ if (regs_ever_live[regno] && !call_used_regs[regno])
return 0;
/* If a function is naked, don't use the "return" insn. */
const_ok_for_arm (i)
HOST_WIDE_INT i;
{
- unsigned HOST_WIDE_INT mask = ~ HOST_UINT (0xFF);
+ unsigned HOST_WIDE_INT mask = ~HOST_UINT (0xFF);
/* For machines with >32 bit HOST_WIDE_INT, the bits above bit 31 must
be all zero, or all one. */
- if ((i & ~ HOST_UINT (0xffffffff)) != 0
- && ((i & ~ HOST_UINT (0xffffffff))
- != ((~ HOST_UINT (0))
- & ~ HOST_UINT (0xffffffff))))
+ if ((i & ~HOST_UINT (0xffffffff)) != 0
+ && ((i & ~HOST_UINT (0xffffffff))
+ != ((~HOST_UINT (0))
+ & ~HOST_UINT (0xffffffff))))
return FALSE;
/* Fast return for 0 and powers of 2 */
mask =
(mask << 2) | ((mask & HOST_UINT (0xffffffff))
>> (32 - 2)) | ~(HOST_UINT (0xffffffff));
- } while (mask != ~ HOST_UINT (0xFF));
+ } while (mask != ~HOST_UINT (0xFF));
return FALSE;
}
Ref: gcc -O1 -mcpu=strongarm gcc.c-torture/compile/980506-2.c
*/
- if (! after_arm_reorg
+ if (!after_arm_reorg
&& (arm_gen_constant (code, mode, val, target, source, 1, 0)
> arm_constant_limit + (code != SET)))
{
{
if ((((temp2 | (temp2 << i))
& HOST_UINT (0xffffffff)) == remainder)
- && ! const_ok_for_arm (temp2))
+ && !const_ok_for_arm (temp2))
{
rtx new_src = (subtargets
? (generate ? gen_reg_rtx (mode) : NULL_RTX)
for (i = 17; i < 24; i++)
{
if (((temp1 | (temp1 >> i)) == remainder)
- && ! const_ok_for_arm (temp1))
+ && !const_ok_for_arm (temp1))
{
rtx new_src = (subtargets
? (generate ? gen_reg_rtx (mode) : NULL_RTX)
then this can be done in two instructions instead of 3-4. */
if (subtargets
/* TARGET can't be NULL if SUBTARGETS is 0 */
- || (reload_completed && ! reg_mentioned_p (target, source)))
+ || (reload_completed && !reg_mentioned_p (target, source)))
{
- if (const_ok_for_arm (ARM_SIGN_EXTEND (~ val)))
+ if (const_ok_for_arm (ARM_SIGN_EXTEND (~val)))
{
if (generate)
{
return 2;
}
- if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~ val)))
+ if (const_ok_for_arm (temp1 = ARM_SIGN_EXTEND (~val)))
{
if (generate)
{
{
int consecutive_zeros = 0;
- if (! (remainder & (3 << i)))
+ if (!(remainder & (3 << i)))
{
- while ((i < 32) && ! (remainder & (3 << i)))
+ while ((i < 32) && !(remainder & (3 << i)))
{
consecutive_zeros += 2;
i += 2;
case GT:
case LE:
- if (i != (((HOST_UINT (1)) << (HOST_BITS_PER_WIDE_INT - 1))
- - 1)
- && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
+ if (i != (((HOST_UINT (1)) << (HOST_BITS_PER_WIDE_INT - 1)) - 1)
+ && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
{
- *op1 = GEN_INT (i+1);
+ *op1 = GEN_INT (i + 1);
return code == GT ? GE : LT;
}
break;
case GE:
case LT:
if (i != ((HOST_UINT (1)) << (HOST_BITS_PER_WIDE_INT - 1))
- && (const_ok_for_arm (i-1) || const_ok_for_arm (- (i-1))))
+ && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
{
- *op1 = GEN_INT (i-1);
+ *op1 = GEN_INT (i - 1);
return code == GE ? GT : LE;
}
break;
case GTU:
case LEU:
- if (i != ~ (HOST_UINT (0))
- && (const_ok_for_arm (i+1) || const_ok_for_arm (- (i+1))))
+ if (i != ~(HOST_UINT (0))
+ && (const_ok_for_arm (i + 1) || const_ok_for_arm (-(i + 1))))
{
*op1 = GEN_INT (i + 1);
return code == GTU ? GEU : LTU;
case GEU:
case LTU:
if (i != 0
- && (const_ok_for_arm (i - 1) || const_ok_for_arm (- (i - 1))))
+ && (const_ok_for_arm (i - 1) || const_ok_for_arm (-(i - 1))))
{
*op1 = GEN_INT (i - 1);
return code == GEU ? GTU : LEU;
arm_return_in_memory (type)
tree type;
{
- if (! AGGREGATE_TYPE_P (type))
+ if (!AGGREGATE_TYPE_P (type))
/* All simple types are returned in registers. */
return 0;
if (TREE_CODE (field) != FIELD_DECL)
continue;
- if (! DECL_BIT_FIELD_TYPE (field))
+ if (!DECL_BIT_FIELD_TYPE (field))
return 1;
}
/* Compute operand 2 of the call insn. */
return GEN_INT (pcum->call_cookie);
- if (! named || pcum->nregs >= NUM_ARG_REGS)
+ if (!named || pcum->nregs >= NUM_ARG_REGS)
return NULL_RTX;
return gen_rtx_REG (mode, pcum->nregs);
unit. if it s a weak defintion however, then this may not be the real
defintion of the function, and so we have to say no. */
if (sym_ref == XEXP (DECL_RTL (current_function_decl), 0)
- && ! DECL_WEAK (current_function_decl))
+ && !DECL_WEAK (current_function_decl))
return 1;
/* We cannot make the determination - default to returning 0. */
int call_cookie;
int call_symbol;
{
- if (! call_symbol)
+ if (!call_symbol)
{
if (GET_CODE (sym_ref) != MEM)
return 0;
/* Cannot tail-call to long calls, since these are out of range of
a branch instruction. However, if not compiling PIC, we know
we can reach the symbol if it is in this compilation unit. */
- if (call_type == CALL_LONG && (flag_pic || ! TREE_ASM_WRITTEN (decl)))
+ if (call_type == CALL_LONG && (flag_pic || !TREE_ASM_WRITTEN (decl)))
return 0;
/* If we are interworking and the function is not declared static
then we can't tail-call it unless we know that it exists in this
compilation unit (since it might be a Thumb routine). */
- if (TARGET_INTERWORK && TREE_PUBLIC (decl) && ! TREE_ASM_WRITTEN (decl))
+ if (TARGET_INTERWORK && TREE_PUBLIC (decl) && !TREE_ASM_WRITTEN (decl))
return 0;
/* Everything else is ok. */
test the index for the appropriate mode. */
GO_IF_LEGITIMATE_INDEX (mode, 0, offset, win);
- if (! no_new_pseudos)
+ if (!no_new_pseudos)
offset = force_reg (Pmode, offset);
else
abort ();
if (current_function_uses_pic_offset_table == 0 || TARGET_SINGLE_PIC_BASE)
return;
- if (! flag_pic)
+ if (!flag_pic)
abort ();
start_sequence ();
while (i)
{
i >>= 2;
- cycles ++;
+ cycles++;
}
return COSTS_N_INSNS (2) + cycles;
}
if (const_ok_for_arm (INTVAL (x)))
return outer == SET ? 2 : -1;
else if (outer == AND
- && const_ok_for_arm (~ INTVAL (x)))
+ && const_ok_for_arm (~INTVAL (x)))
return -1;
else if ((outer == COMPARE
|| outer == PLUS || outer == MINUS)
- && const_ok_for_arm (- INTVAL (x)))
+ && const_ok_for_arm (-INTVAL (x)))
return -1;
else
return 5;
{
int regno = true_regnum (op);
- return (! CONSTANT_P (op)
+ return (!CONSTANT_P (op)
&& (regno == -1
|| (GET_CODE (op) == REG
&& REGNO (op) >= FIRST_PSEUDO_REGISTER)));
enum machine_mode mode ATTRIBUTE_UNUSED;
{
#if 0
- if ((mode == QImode && ! memory_operand (op, mode)) || GET_CODE (op) != MEM)
+ if ((mode == QImode && !memory_operand (op, mode)) || GET_CODE (op) != MEM)
return 0;
#endif
if (GET_CODE (op) != MEM)
/* A sum of anything more complex than reg + reg or reg + const is bad. */
if ((GET_CODE (op) == PLUS || GET_CODE (op) == MINUS)
- && (! s_register_operand (XEXP (op, 0), VOIDmode)
- || (! s_register_operand (XEXP (op, 1), VOIDmode)
+ && (!s_register_operand (XEXP (op, 0), VOIDmode)
+ || (!s_register_operand (XEXP (op, 1), VOIDmode)
&& GET_CODE (XEXP (op, 1)) != CONST_INT)))
return 1;
|| GET_CODE (SET_SRC (elt)) != MEM
|| GET_MODE (SET_SRC (elt)) != SImode
|| GET_CODE (XEXP (SET_SRC (elt), 0)) != PLUS
- || ! rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
+ || !rtx_equal_p (XEXP (XEXP (SET_SRC (elt), 0), 0), src_addr)
|| GET_CODE (XEXP (XEXP (SET_SRC (elt), 0), 1)) != CONST_INT
|| INTVAL (XEXP (XEXP (SET_SRC (elt), 0), 1)) != (i - base) * 4)
return 0;
|| GET_CODE (SET_DEST (elt)) != MEM
|| GET_MODE (SET_DEST (elt)) != SImode
|| GET_CODE (XEXP (SET_DEST (elt), 0)) != PLUS
- || ! rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
+ || !rtx_equal_p (XEXP (XEXP (SET_DEST (elt), 0), 0), dest_addr)
|| GET_CODE (XEXP (XEXP (SET_DEST (elt), 0), 1)) != CONST_INT
|| INTVAL (XEXP (XEXP (SET_DEST (elt), 0), 1)) != (i - base) * 4)
return 0;
/* If the comparisons are not equal, and one doesn't dominate the other,
then we can't do this. */
if (cond1 != cond2
- && ! comparison_dominates_p (cond1, cond2)
- && (swapped = 1, ! comparison_dominates_p (cond2, cond1)))
+ && !comparison_dominates_p (cond1, cond2)
+ && (swapped = 1, !comparison_dominates_p (cond2, cond1)))
return CCmode;
if (swapped)
switch (cond1)
{
case EQ:
- if (cond2 == EQ || ! cond_or)
+ if (cond2 == EQ || !cond_or)
return CC_DEQmode;
switch (cond2)
break;
case LT:
- if (cond2 == LT || ! cond_or)
+ if (cond2 == LT || !cond_or)
return CC_DLTmode;
if (cond2 == LE)
return CC_DLEmode;
break;
case GT:
- if (cond2 == GT || ! cond_or)
+ if (cond2 == GT || !cond_or)
return CC_DGTmode;
if (cond2 == GE)
return CC_DGEmode;
break;
case LTU:
- if (cond2 == LTU || ! cond_or)
+ if (cond2 == LTU || !cond_or)
return CC_DLTUmode;
if (cond2 == LEU)
return CC_DLEUmode;
break;
case GTU:
- if (cond2 == GTU || ! cond_or)
+ if (cond2 == GTU || !cond_or)
return CC_DGTUmode;
if (cond2 == GEU)
return CC_DGEUmode;
gen_rtx_MEM (QImode,
plus_constant (base,
offset + 1))));
- if (! BYTES_BIG_ENDIAN)
+ if (!BYTES_BIG_ENDIAN)
emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_SUBREG (SImode, operands[0], 0),
gen_rtx_IOR (SImode,
gen_rtx_ASHIFT
{
/* Updating base_plus might destroy outval, see if we can
swap the scratch and base_plus. */
- if (! reg_overlap_mentioned_p (scratch, outval))
+ if (!reg_overlap_mentioned_p (scratch, outval))
{
rtx tmp = scratch;
scratch = base_plus;
hi = ((((offset - lo) & HOST_INT (0xffffffff))
^ HOST_INT (0x80000000))
- - HOST_INT (0x80000000));
+ - HOST_INT (0x80000000));
if (hi + lo != offset)
abort ();
{
/* Updating base_plus might destroy outval, see if we
can swap the scratch and base_plus. */
- if (! reg_overlap_mentioned_p (scratch, outval))
+ if (!reg_overlap_mentioned_p (scratch, outval))
{
rtx tmp = scratch;
scratch = base_plus;
extract_insn (insn);
- if (! constrain_operands (1))
+ if (!constrain_operands (1))
fatal_insn_not_found (insn);
/* Fill in recog_op_alt with information about the constraints of this
{
int i;
- if (! fpa_consts_inited)
+ if (!fpa_consts_inited)
init_fpa_table ();
for (i = 0; i < 8; i++)
case '\"':
case '\\':
putc ('\\', stream);
- len_so_far ++;
+ len_so_far++;
/* drop through. */
default:
if (c >= ' ' && c <= '~')
{
putc (c, stream);
- len_so_far ++;
+ len_so_far++;
}
else
{
return "";
}
- if (current_function_calls_alloca && ! really_return)
+ if (current_function_calls_alloca && !really_return)
abort ();
for (reg = 0; reg <= 10; reg++)
- if (regs_ever_live[reg] && ! call_used_regs[reg])
+ if (regs_ever_live[reg] && !call_used_regs[reg])
live_regs++;
- if (! TARGET_APCS_FRAME
- && ! frame_pointer_needed
+ if (!TARGET_APCS_FRAME
+ && !frame_pointer_needed
&& regs_ever_live[HARD_FRAME_POINTER_REGNUM]
- && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
+ && !call_used_regs[HARD_FRAME_POINTER_REGNUM])
live_regs++;
- if (flag_pic && ! TARGET_SINGLE_PIC_BASE
+ if (flag_pic && !TARGET_SINGLE_PIC_BASE
&& regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
live_regs++;
load a single register. On other architectures, the cost is the same. */
if (live_regs == 1
&& regs_ever_live[LR_REGNUM]
- && ! really_return)
+ && !really_return)
output_asm_insn (reverse ? "ldr%?%D0\t%|lr, [%|sp], #4"
: "ldr%?%d0\t%|lr, [%|sp], #4", &operand);
else if (live_regs == 1
: "ldr%?%d0\t%|pc, [%|sp], #4", &operand);
else if (live_regs)
{
- if (! regs_ever_live[LR_REGNUM])
+ if (!regs_ever_live[LR_REGNUM])
live_regs++;
if (frame_pointer_needed)
for (reg = 0; reg <= 10; reg++)
if (regs_ever_live[reg]
- && (! call_used_regs[reg]
- || (flag_pic && ! TARGET_SINGLE_PIC_BASE
+ && (!call_used_regs[reg]
+ || (flag_pic && !TARGET_SINGLE_PIC_BASE
&& reg == PIC_OFFSET_TABLE_REGNUM)))
{
strcat (instr, "%|");
strcat (instr, reg_names[13]);
strcat (instr, ", ");
strcat (instr, "%|");
- strcat (instr, TARGET_INTERWORK || (! really_return)
+ strcat (instr, TARGET_INTERWORK || (!really_return)
? reg_names[LR_REGNUM] : reg_names[PC_REGNUM] );
}
else
{
- if (! TARGET_APCS_FRAME
+ if (!TARGET_APCS_FRAME
&& regs_ever_live[HARD_FRAME_POINTER_REGNUM]
- && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
+ && !call_used_regs[HARD_FRAME_POINTER_REGNUM])
{
strcat (instr, "%|");
strcat (instr, reg_names[HARD_FRAME_POINTER_REGNUM]);
strcat (instr, "\t%|");
strcat (instr, frame_pointer_needed ? "lr" : "ip");
- output_asm_insn (instr, & operand);
+ output_asm_insn (instr, &operand);
}
}
else if (really_return)
sprintf (instr, "mov%%?%%%s0%s\t%%|pc, %%|lr",
reverse ? "D" : "d", TARGET_APCS_32 ? "" : "s");
- output_asm_insn (instr, & operand);
+ output_asm_insn (instr, &operand);
}
return "";
store_arg_regs = 1;
for (reg = 0; reg <= 10; reg++)
- if (regs_ever_live[reg] && ! call_used_regs[reg])
+ if (regs_ever_live[reg] && !call_used_regs[reg])
live_regs_mask |= (1 << reg);
- if (! TARGET_APCS_FRAME
- && ! frame_pointer_needed
+ if (!TARGET_APCS_FRAME
+ && !frame_pointer_needed
&& regs_ever_live[HARD_FRAME_POINTER_REGNUM]
- && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
+ && !call_used_regs[HARD_FRAME_POINTER_REGNUM])
live_regs_mask |= (1 << HARD_FRAME_POINTER_REGNUM);
- if (flag_pic && ! TARGET_SINGLE_PIC_BASE
+ if (flag_pic && !TARGET_SINGLE_PIC_BASE
&& regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
live_regs_mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
/* If we are throwing an exception, then we really must be doing a return,
so we can't tail-call. */
- if (eh_ofs && ! really_return)
+ if (eh_ofs && !really_return)
abort();
/* A volatile function should never return. Call abort. */
}
for (reg = 0; reg <= 10; reg++)
- if (regs_ever_live[reg] && ! call_used_regs[reg])
+ if (regs_ever_live[reg] && !call_used_regs[reg])
{
live_regs_mask |= (1 << reg);
floats_offset += 4;
}
/* Handle the frame pointer as a special case. */
- if (! TARGET_APCS_FRAME
- && ! frame_pointer_needed
+ if (!TARGET_APCS_FRAME
+ && !frame_pointer_needed
&& regs_ever_live[HARD_FRAME_POINTER_REGNUM]
- && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
+ && !call_used_regs[HARD_FRAME_POINTER_REGNUM])
{
live_regs_mask |= (1 << HARD_FRAME_POINTER_REGNUM);
floats_offset += 4;
/* If we aren't loading the PIC register, don't stack it even though it may
be live. */
- if (flag_pic && ! TARGET_SINGLE_PIC_BASE
+ if (flag_pic && !TARGET_SINGLE_PIC_BASE
&& regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
{
live_regs_mask |= (1 << PIC_OFFSET_TABLE_REGNUM);
if (arm_fpu_arch == FP_SOFT2)
{
for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
- if (regs_ever_live[reg] && ! call_used_regs[reg])
+ if (regs_ever_live[reg] && !call_used_regs[reg])
{
floats_offset += 12;
asm_fprintf (f, "\tldfe\t%r, [%r, #-%d]\n",
for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg--)
{
- if (regs_ever_live[reg] && ! call_used_regs[reg])
+ if (regs_ever_live[reg] && !call_used_regs[reg])
{
floats_offset += 12;
if (really_return)
asm_fprintf (f, "\tbx\t%r\n", return_regnum);
}
- else if (eh_ofs || ! really_return)
+ else if (eh_ofs || !really_return)
{
live_regs_mask |= 0x6800;
print_multi_reg (f, "ldmea\t%r", FP_REGNUM, live_regs_mask, FALSE);
if (arm_fpu_arch == FP_SOFT2)
{
for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
- if (regs_ever_live[reg] && ! call_used_regs[reg])
+ if (regs_ever_live[reg] && !call_used_regs[reg])
asm_fprintf (f, "\tldfe\t%r, [%r], #12\n",
reg, SP_REGNUM);
}
for (reg = FIRST_ARM_FP_REGNUM; reg <= LAST_ARM_FP_REGNUM; reg++)
{
- if (regs_ever_live[reg] && ! call_used_regs[reg])
+ if (regs_ever_live[reg] && !call_used_regs[reg])
{
if (reg - start_reg == 3)
{
/* Jump to the target; even in 26-bit mode. */
asm_fprintf (f, "\tmov\t%r, %r\n", PC_REGNUM, return_regnum);
}
- else if (TARGET_APCS_32 && live_regs_mask == 0 && ! really_return)
+ else if (TARGET_APCS_32 && live_regs_mask == 0 && !really_return)
asm_fprintf (f, "\tldr\t%r, [%r], #4\n", LR_REGNUM, SP_REGNUM);
else if (TARGET_APCS_32 && live_regs_mask == 0 && really_return)
asm_fprintf (f, "\tldr\t%r, [%r], #4\n", PC_REGNUM, SP_REGNUM);
- else if (! really_return)
+ else if (!really_return)
print_multi_reg (f, "ldmfd\t%r!", SP_REGNUM,
live_regs_mask | (1 << LR_REGNUM), FALSE);
else
if (use_return_insn (FALSE)
&& return_used_this_function
&& (frame_size + current_function_outgoing_args_size) != 0
- && ! frame_pointer_needed)
+ && !frame_pointer_needed)
abort ();
/* Reset the ARM-specific per-function variables. */
for (i = 0; i <= LAST_ARM_REGNUM; i++)
if (mask & (1 << i))
- num_regs ++;
+ num_regs++;
if (num_regs == 0 || num_regs > 16)
abort ();
if (current_function_anonymous_args && current_function_pretend_args_size)
store_arg_regs = 1;
- if (! volatile_func)
+ if (!volatile_func)
{
for (reg = 0; reg <= 10; reg++)
- if (regs_ever_live[reg] && ! call_used_regs[reg])
+ if (regs_ever_live[reg] && !call_used_regs[reg])
live_regs_mask |= 1 << reg;
- if (! TARGET_APCS_FRAME
- && ! frame_pointer_needed
+ if (!TARGET_APCS_FRAME
+ && !frame_pointer_needed
&& regs_ever_live[HARD_FRAME_POINTER_REGNUM]
- && ! call_used_regs[HARD_FRAME_POINTER_REGNUM])
+ && !call_used_regs[HARD_FRAME_POINTER_REGNUM])
live_regs_mask |= 1 << HARD_FRAME_POINTER_REGNUM;
if (flag_pic && regs_ever_live[PIC_OFFSET_TABLE_REGNUM])
/* For now the integer regs are still pushed in output_arm_epilogue (). */
- if (! volatile_func)
+ if (!volatile_func)
{
if (arm_fpu_arch == FP_SOFT2)
{
for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg --)
- if (regs_ever_live[reg] && ! call_used_regs[reg])
+ if (regs_ever_live[reg] && !call_used_regs[reg])
{
insn = gen_rtx_PRE_DEC (XFmode, stack_pointer_rtx);
insn = gen_rtx_MEM (XFmode, insn);
for (reg = LAST_ARM_FP_REGNUM; reg >= FIRST_ARM_FP_REGNUM; reg --)
{
- if (regs_ever_live[reg] && ! call_used_regs[reg])
+ if (regs_ever_live[reg] && !call_used_regs[reg])
{
if (start_reg - reg == 3)
{
if (GET_CODE (x) == CONST_INT)
{
HOST_WIDE_INT val;
- val = ARM_SIGN_EXTEND (~ INTVAL (x));
+ val = ARM_SIGN_EXTEND (~INTVAL (x));
fprintf (stream, HOST_WIDE_INT_PRINT_DEC, val);
}
else
case 'S':
{
HOST_WIDE_INT val;
- const char * shift = shift_op (x, & val);
+ const char * shift = shift_op (x, &val);
if (shift)
{
- fprintf (stream, ", %s ", shift_op (x, & val));
+ fprintf (stream, ", %s ", shift_op (x, &val));
if (val == -1)
arm_print_operand (stream, XEXP (x, 1), 0);
else
return;
case 'd':
- if (! x)
+ if (!x)
return;
if (TARGET_ARM)
return;
case 'D':
- if (! x)
+ if (!x)
return;
if (TARGET_ARM)
/* Fail if a conditional return is undesirable (eg on a
StrongARM), but still allow this if optimizing for size. */
else if (GET_CODE (scanbody) == RETURN
- && ! use_return_insn (TRUE)
- && ! optimize_size)
+ && !use_return_insn (TRUE)
+ && !optimize_size)
fail = TRUE;
else if (GET_CODE (scanbody) == RETURN
&& seeking_return)
/* Instructions using or affecting the condition codes make it
fail. */
scanbody = PATTERN (this_insn);
- if (! (GET_CODE (scanbody) == SET
- || GET_CODE (scanbody) == PARALLEL)
+ if (!(GET_CODE (scanbody) == SET
+ || GET_CODE (scanbody) == PARALLEL)
|| get_attr_conds (this_insn) != CONDS_NOCOND)
fail = TRUE;
break;
/* If we are using the stack pointer to point at the
argument, then an offset of 0 is correct. */
- if ((TARGET_THUMB || ! frame_pointer_needed)
+ if ((TARGET_THUMB || !frame_pointer_needed)
&& REGNO (addr) == SP_REGNUM)
return 0;
{
tree sym;
- if (! TREE_USED (block))
+ if (!TREE_USED (block))
continue;
for (sym = BLOCK_VARS (block); sym; sym = TREE_CHAIN (sym))
|| DECL_IGNORED_P (sym)
|| TREE_CODE (sym) != VAR_DECL
|| DECL_EXTERNAL (sym)
- || ! rtx_equal_p (DECL_RTL (sym), orig)
+ || !rtx_equal_p (DECL_RTL (sym), orig)
)
continue;
for (bit = 0;
(mask & (1 << bit)) == 0;
- ++ bit)
+ ++bit)
continue;
return bit;
abort ();
regs_to_pop |= 1 << LR_REGNUM;
- ++ pops_needed;
+ ++pops_needed;
}
if (TARGET_BACKTRACE)
/* Otherwise if we are not supporting interworking and we have not created
a backtrace structure and the function was not entered in ARM mode then
just pop the return address straight into the PC. */
- else if ( ! TARGET_INTERWORK
- && ! TARGET_BACKTRACE
- && ! is_called_in_ARM_mode (current_function_decl))
+ else if (!TARGET_INTERWORK
+ && !TARGET_BACKTRACE
+ && !is_called_in_ARM_mode (current_function_decl))
{
if (eh_ofs)
{
/* If we have any popping registers left over, remove them. */
if (available > 0)
- regs_available_for_popping &= ~ available;
+ regs_available_for_popping &= ~available;
/* Otherwise if we need another popping register we can use
the fourth argument register. */
/* The fourth argument register is available. */
regs_available_for_popping |= 1 << LAST_ARG_REGNUM;
- -- pops_needed;
+ --pops_needed;
}
}
if (reg_containing_return_addr == -1)
{
/* The return address was popped into the lowest numbered register. */
- regs_to_pop &= ~ (1 << LR_REGNUM);
+ regs_to_pop &= ~(1 << LR_REGNUM);
reg_containing_return_addr =
number_of_first_bit_set (regs_available_for_popping);
/* Remove this register for the mask of available registers, so that
the return address will not be corrupted by futher pops. */
- regs_available_for_popping &= ~ (1 << reg_containing_return_addr);
+ regs_available_for_popping &= ~(1 << reg_containing_return_addr);
}
/* If we popped other registers then handle them here. */
ARM_HARD_FRAME_POINTER_REGNUM, frame_pointer);
/* (Temporarily) remove it from the mask of popped registers. */
- regs_available_for_popping &= ~ (1 << frame_pointer);
- regs_to_pop &= ~ (1 << ARM_HARD_FRAME_POINTER_REGNUM);
+ regs_available_for_popping &= ~(1 << frame_pointer);
+ regs_to_pop &= ~(1 << ARM_HARD_FRAME_POINTER_REGNUM);
if (regs_available_for_popping)
{
asm_fprintf (f, "\tmov\t%r, %r\n", move_to, popped_into);
- regs_to_pop &= ~ (1 << move_to);
+ regs_to_pop &= ~(1 << move_to);
- -- pops_needed;
+ --pops_needed;
}
/* If we still have not popped everything then we must have only
int regno;
int lo_mask = mask & 0xFF;
- if (lo_mask == 0 && ! push && (mask & (1 << 15)))
+ if (lo_mask == 0 && !push && (mask & (1 << 15)))
{
/* Special case. Do not generate a POP PC statement here, do it in
thumb_exit() */
fprintf (f, "\t%s\t{", push ? "push" : "pop");
/* Look at the low registers first. */
- for (regno = 0; regno <= LAST_LO_REGNUM; regno ++, lo_mask >>= 1)
+ for (regno = 0; regno <= LAST_LO_REGNUM; regno++, lo_mask >>= 1)
{
if (lo_mask & 1)
{
rtx insn;
/* This test is only important for leaf functions. */
- /* assert (! leaf_function_p ()); */
+ /* assert (!leaf_function_p ()); */
/* If we have already decided that far jumps may be used,
do not bother checking again, and always return true even if
/* If this function is not being called from the prologue/epilogue
generation code then it must be being called from the
INITIAL_ELIMINATION_OFFSET macro. */
- if (! in_prologue)
+ if (!in_prologue)
{
/* In this case we know that we are being asked about the elimination
of the arg pointer register. If that register is not being used,
hope that this does not occur too often. */
if (regs_ever_live [ARG_POINTER_REGNUM])
cfun->machine->arg_pointer_live = 1;
- else if (! cfun->machine->arg_pointer_live)
+ else if (!cfun->machine->arg_pointer_live)
return 0;
}
return "";
for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
- if (regs_ever_live[regno] && ! call_used_regs[regno]
- && ! (TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
+ if (regs_ever_live[regno] && !call_used_regs[regno]
+ && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
live_regs_mask |= 1 << regno;
for (regno = 8; regno < 13; regno++)
{
- if (regs_ever_live[regno] && ! call_used_regs[regno]
- && ! (TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
- high_regs_pushed ++;
+ if (regs_ever_live[regno] && !call_used_regs[regno]
+ && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
+ high_regs_pushed++;
}
/* The prolog may have pushed some high registers to use as
fatal ("No low registers available for popping high registers");
for (next_hi_reg = 8; next_hi_reg < 13; next_hi_reg++)
- if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg]
- && ! (TARGET_SINGLE_PIC_BASE && (next_hi_reg == arm_pic_register)))
+ if (regs_ever_live[next_hi_reg] && !call_used_regs[next_hi_reg]
+ && !(TARGET_SINGLE_PIC_BASE && (next_hi_reg == arm_pic_register)))
break;
while (high_regs_pushed)
regno);
for (next_hi_reg++; next_hi_reg < 13; next_hi_reg++)
- if (regs_ever_live[next_hi_reg] &&
- ! call_used_regs[next_hi_reg]
- && ! (TARGET_SINGLE_PIC_BASE
- && (next_hi_reg == arm_pic_register)))
+ if (regs_ever_live[next_hi_reg]
+ && !call_used_regs[next_hi_reg]
+ && !(TARGET_SINGLE_PIC_BASE
+ && (next_hi_reg == arm_pic_register)))
break;
}
}
}
}
- had_to_push_lr = (live_regs_mask || ! leaf_function
+ had_to_push_lr = (live_regs_mask || !leaf_function
|| thumb_far_jump_used_p (1));
if (TARGET_BACKTRACE
if (current_function_pretend_args_size == 0 || TARGET_BACKTRACE)
{
if (had_to_push_lr
- && ! is_called_in_ARM_mode (current_function_decl)
- && ! eh_ofs)
+ && !is_called_in_ARM_mode (current_function_decl)
+ && !eh_ofs)
live_regs_mask |= 1 << PC_REGNUM;
/* Either no argument registers were pushed or a backtrace
else
{
/* Pop everything but the return address. */
- live_regs_mask &= ~ (1 << PC_REGNUM);
+ live_regs_mask &= ~(1 << PC_REGNUM);
if (live_regs_mask)
thumb_pushpop (asm_out_file, live_regs_mask, FALSE);
reg = gen_reg_rtx (Pmode);
cfun->machine->ra_rtx = reg;
- if (! TARGET_APCS_32)
+ if (!TARGET_APCS_32)
init = gen_rtx_AND (Pmode, gen_rtx_REG (Pmode, LR_REGNUM),
GEN_INT (RETURN_ADDR_MASK26));
else
if (amount < 512)
emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
- GEN_INT (- amount)));
+ GEN_INT (-amount)));
else
{
int regno;
it now. */
for (regno = LAST_ARG_REGNUM + 1; regno <= LAST_LO_REGNUM; regno++)
if (regs_ever_live[regno]
- && ! call_used_regs[regno] /* Paranoia */
- && ! (TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register))
- && ! (frame_pointer_needed
- && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
+ && !call_used_regs[regno] /* Paranoia */
+ && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register))
+ && !(frame_pointer_needed
+ && (regno == THUMB_HARD_FRAME_POINTER_REGNUM)))
break;
if (regno > LAST_LO_REGNUM) /* Very unlikely */
emit_insn (gen_movsi (spare, reg));
/* Decrement the stack. */
- emit_insn (gen_movsi (reg, GEN_INT (- amount)));
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
reg));
{
reg = gen_rtx (REG, SImode, regno);
- emit_insn (gen_movsi (reg, GEN_INT (- amount)));
+ emit_insn (gen_movsi (reg, GEN_INT (-amount)));
emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
reg));
}
for (regno = LAST_ARG_REGNUM + 1 - num_pushes;
regno <= LAST_ARG_REGNUM;
- regno ++)
+ regno++)
asm_fprintf (f, "%r%s", regno,
regno == LAST_ARG_REGNUM ? "" : ", ");
current_function_pretend_args_size);
}
- for (regno = 0; regno <= LAST_LO_REGNUM; regno ++)
- if (regs_ever_live[regno] && ! call_used_regs[regno]
- && ! (TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
+ for (regno = 0; regno <= LAST_LO_REGNUM; regno++)
+ if (regs_ever_live[regno] && !call_used_regs[regno]
+ && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
live_regs_mask |= 1 << regno;
- if (live_regs_mask || ! leaf_function_p () || thumb_far_jump_used_p (1))
+ if (live_regs_mask || !leaf_function_p () || thumb_far_jump_used_p (1))
live_regs_mask |= 1 << LR_REGNUM;
if (TARGET_BACKTRACE)
for (regno = 8; regno < 13; regno++)
{
- if (regs_ever_live[regno] && ! call_used_regs[regno]
- && ! (TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
- high_regs_pushed ++;
+ if (regs_ever_live[regno] && !call_used_regs[regno]
+ && !(TARGET_SINGLE_PIC_BASE && (regno == arm_pic_register)))
+ high_regs_pushed++;
}
if (high_regs_pushed)
for (next_hi_reg = 12; next_hi_reg > LAST_LO_REGNUM; next_hi_reg--)
{
- if (regs_ever_live[next_hi_reg] && ! call_used_regs[next_hi_reg]
- && ! (TARGET_SINGLE_PIC_BASE
- && (next_hi_reg == arm_pic_register)))
+ if (regs_ever_live[next_hi_reg] && !call_used_regs[next_hi_reg]
+ && !(TARGET_SINGLE_PIC_BASE
+ && (next_hi_reg == arm_pic_register)))
break;
}
{
/* Desperation time -- this probably will never happen. */
if (regs_ever_live[LAST_ARG_REGNUM]
- || ! call_used_regs[LAST_ARG_REGNUM])
+ || !call_used_regs[LAST_ARG_REGNUM])
asm_fprintf (f, "\tmov\t%r, %r\n", IP_REGNUM, LAST_ARG_REGNUM);
mask = 1 << LAST_ARG_REGNUM;
}
{
asm_fprintf (f, "\tmov\t%r, %r\n", regno, next_hi_reg);
- high_regs_pushed --;
+ high_regs_pushed--;
if (high_regs_pushed)
for (next_hi_reg--; next_hi_reg > LAST_LO_REGNUM;
next_hi_reg--)
{
if (regs_ever_live[next_hi_reg]
- && ! call_used_regs[next_hi_reg]
- && ! (TARGET_SINGLE_PIC_BASE
- && (next_hi_reg == arm_pic_register)))
+ && !call_used_regs[next_hi_reg]
+ && !(TARGET_SINGLE_PIC_BASE
+ && (next_hi_reg == arm_pic_register)))
break;
}
else
{
- mask &= ~ ((1 << regno) - 1);
+ mask &= ~((1 << regno) - 1);
break;
}
}
if (pushable_regs == 0
&& (regs_ever_live[LAST_ARG_REGNUM]
- || ! call_used_regs[LAST_ARG_REGNUM]))
+ || !call_used_regs[LAST_ARG_REGNUM]))
asm_fprintf (f, "\tmov\t%r, %r\n", LAST_ARG_REGNUM, IP_REGNUM);
}
}
/* We mark this here and not in arm_add_gc_roots() to avoid
polluting even more code with ifdefs, and because it never
contains anything useful until we assign to it here. */
- ggc_add_rtx_root (& aof_pic_label, 1);
+ ggc_add_rtx_root (&aof_pic_label, 1);
/* This needs to persist throughout the compilation. */
end_temporary_allocation ();
aof_pic_label = gen_rtx_SYMBOL_REF (Pmode, "x$adcons");
;; 3 A symbol that has been treated properly for pic usage, that is, we
;; will add the pic_register value to it before trying to dereference it.
;; Note: sin and cos are no-longer used.
+;;
+;; UNSPEC_VOLATILE Usage:
+;; 0 `blockage' insn to prevent scheduling across an insn in the code.
+;; 1 `epilogue' insn, used to represent any part of the instruction epilogue
+;; sequence that isn't expanded into normal RTL. Used for both normal
+;; and sibcall epilogues.
+;; 2 `align' insn. Used at the head of a minipool table for inlined
+;; constants.
+;; 3 `end-of-table'. Used to mark the end of a minipool table.
+;; 4 `pool-entry(1)'. An entry in the constant pool for an 8-bit object.
+;; 5 `pool-entry(2)'. An entry in the constant pool for a 16-bit object.
+;; 6 `pool-entry(4)'. An entry in the constant pool for a 32-bit object.
+;; 7 `pool-entry(8)'. An entry in the constant pool for a 64-bit object.
+;;
\f
;; Attributes
+; IS_THUMB is set to 'yes' when we are generating Thumb code, and 'no' when
+; generating ARM code. This is used to control the length of some insn
+; patterns that share the same RTL in both ARM and Thumb code.
(define_attr "is_thumb" "no,yes" (const (symbol_ref "thumb_code")))
; PROG_MODE attribute is used to determine whether condition codes are
; by the -mapcs-{32,26} flag, and possibly the -mcpu=... option.
(define_attr "prog_mode" "prog26,prog32" (const (symbol_ref "arm_prog_mode")))
+; IS_STRONGARM is set to 'yes' when compiling for StrongARM, it affects
+; scheduling decisions for the load unit and the multiplier.
(define_attr "is_strongarm" "no,yes" (const (symbol_ref "arm_is_strong")))
; Floating Point Unit. If we only have floating point emulation, then there
; is no point in scheduling the floating point insns. (Well, for best
; performance we should try and group them together).
-
(define_attr "fpu" "fpa,fpe2,fpe3" (const (symbol_ref "arm_fpu_attr")))
; LENGTH of an instruction (in bytes)
(define_attr "pool_range" "" (const_int 0))
(define_attr "neg_pool_range" "" (const_int 0))
-; An assembler sequence may clobber the condition codes without us knowing
+; An assembler sequence may clobber the condition codes without us knowing.
(define_asm_attributes
[(set_attr "conds" "clob")])
; condition codes: this one is used by final_prescan_insn to speed up
; conditionalizing instructions. It saves having to scan the rtl to see if
; it uses or alters the condition codes.
-
+;
; USE means that the condition codes are used by the insn in the process of
-; outputting code, this means (at present) that we can't use the insn in
-; inlined branches
-
+; outputting code, this means (at present) that we can't use the insn in
+; inlined branches
+;
; SET means that the purpose of the insn is to set the condition codes in a
-; well defined manner.
-
+; well defined manner.
+;
; CLOB means that the condition codes are altered in an undefined manner, if
-; they are altered at all
-
+; they are altered at all
+;
; JUMP_CLOB is used when the condition cannot be represented by a single
-; instruction (UNEQ and LTGT). These cannot be predicated.
-
+; instruction (UNEQ and LTGT). These cannot be predicated.
+;
; NOCOND means that the condition codes are neither altered nor affect the
-; output of this insn
+; output of this insn
(define_attr "conds" "use,set,clob,jump_clob,nocond"
(if_then_else (eq_attr "type" "call")
(const_string "clob") (const_string "nocond"))
(const_string "nocond")))
+; Predicable means that the insn can be conditionally executed based on
+; an automatically added predicate (additional patterns are generated by
+; gen...). We default to 'no' because no Thumb patterns match this rule
+; and not all ARM patterns do.
(define_attr "predicable" "no,yes" (const_string "no"))
; Only model the write buffer for ARM6 and ARM7. Earlier processors don't
; affect the schedule).
(define_attr "model_wbuf" "no,yes" (const (symbol_ref "arm_is_6_or_7")))
+; WRITE_CONFLICT implies that a read following an unrelated write is likely
+; to stall the processor. Used with model_wbuf above.
(define_attr "write_conflict" "no,yes"
(if_then_else (eq_attr "type"
"block,float_em,f_load,f_store,f_mem_r,r_mem_f,call,load")
(const_string "yes")
(const_string "no")))
+; Classify the insns into those that take one cycle and those that take more
+; than one on the main cpu execution unit.
(define_attr "core_cycles" "single,multi"
(if_then_else (eq_attr "type"
"normal,float,fdivx,fdivd,fdivs,fmul,ffmul,farith,ffarith")
(const_string "multi")))
;; FAR_JUMP is "yes" if a BL instruction is used to generate a branch to a
-;; distant label.
+;; distant label. Only applicable to Thumb code.
(define_attr "far_jump" "yes,no" (const_string "no"))
-
-; The write buffer on some of the arm6 processors is hard to model exactly.
-; There is room in the buffer for up to two addresses and up to eight words
-; of memory, but the two needn't be split evenly. When writing the two
-; addresses are fully pipelined. However, a read from memory that is not
-; currently in the cache will block until the writes have completed.
-; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so
-; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous
-; (they aren't allowed to be at present) then there is a startup cost of 1MCLK
-; cycle to add as well.
-
;; (define_function_unit {name} {num-units} {n-users} {test}
;; {ready-delay} {issue-delay} [{conflict-list}])
+
+;;--------------------------------------------------------------------
+;; Floating point unit (FPA)
+;;--------------------------------------------------------------------
(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
(eq_attr "type" "fdivx")) 71 69)
(define_function_unit "fpa" 1 0 (and (eq_attr "fpu" "fpa")
(eq_attr "type" "f_2_r")) 1 2)
-;; The fpa10 doesn't really have a memory read unit, but it can start to
-;; speculatively execute the instruction in the pipeline, provided the data
-;; is already loaded, so pretend reads have a delay of 2 (and that the
-;; pipeline is infinite.
+; The fpa10 doesn't really have a memory read unit, but it can start to
+; speculatively execute the instruction in the pipeline, provided the data
+; is already loaded, so pretend reads have a delay of 2 (and that the
+; pipeline is infinite).
(define_function_unit "fpa_mem" 1 0 (and (eq_attr "fpu" "fpa")
(eq_attr "type" "f_load")) 3 1)
;;--------------------------------------------------------------------
;; Write buffer
;;--------------------------------------------------------------------
-;; Strictly we should model a 4-deep write buffer for ARM7xx based chips
+; Strictly, we should model a 4-deep write buffer for ARM7xx based chips
+;
+; The write buffer on some of the arm6 processors is hard to model exactly.
+; There is room in the buffer for up to two addresses and up to eight words
+; of memory, but the two needn't be split evenly. When writing the two
+; addresses are fully pipelined. However, a read from memory that is not
+; currently in the cache will block until the writes have completed.
+; It is normally the case that FCLK and MCLK will be in the ratio 2:1, so
+; writes will take 2 FCLK cycles per word, if FCLK and MCLK are asynchronous
+; (they aren't allowed to be at present) then there is a startup cost of 1MCLK
+; cycle to add as well.
+
(define_function_unit "write_buf" 1 2
(and (eq_attr "model_wbuf" "yes")
(eq_attr "type" "store1,r_mem_f")) 5 3)
;;--------------------------------------------------------------------
;; Write blockage unit
;;--------------------------------------------------------------------
-;; The write_blockage unit models (partially), the fact that reads will stall
-;; until the write buffer empties.
-;; The f_mem_r and r_mem_f could also block, but they are to the stack,
-;; so we don't model them here
+; The write_blockage unit models (partially), the fact that reads will stall
+; until the write buffer empties.
+; The f_mem_r and r_mem_f could also block, but they are to the stack,
+; so we don't model them here
(define_function_unit "write_blockage" 1 0 (and (eq_attr "model_wbuf" "yes")
(eq_attr "type" "store1")) 5 5
[(eq_attr "write_conflict" "yes")])
;;--------------------------------------------------------------------
;; Core unit
;;--------------------------------------------------------------------
-;; Everything must spend at least one cycle in the core unit
+; Everything must spend at least one cycle in the core unit
(define_function_unit "core" 1 0 (eq_attr "core_cycles" "single") 1 1)
(define_function_unit "core" 1 0
(and (eq_attr "core_cycles" "multi")
(eq_attr "type" "!mult,load,store1,store2,store3,store4")) 32 32)
\f
+;;---------------------------------------------------------------------------
+
;; Note: For DImode insns, there is normally no reason why operands should
;; not be in the same register, what we don't want is for something being
;; written to partially overlap something that is an input.
;; independently.
(define_split
[(set (match_operand:DI 0 "s_register_operand" "")
- (plus:DI (match_operand:DI 1 "s_register_operand" "")
- (match_operand:DI 2 "s_register_operand" "")))
+ (plus:DI (match_operand:DI 1 "s_register_operand" "")
+ (match_operand:DI 2 "s_register_operand" "")))
(clobber (reg:CC 24))]
"TARGET_ARM && reload_completed"
[(parallel [(set (reg:CC_C 24)
- (compare:CC_C (plus:SI (match_dup 1) (match_dup 2))
- (match_dup 1)))
+ (compare:CC_C (plus:SI (match_dup 1) (match_dup 2))
+ (match_dup 1)))
(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])
(set (match_dup 3) (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
(plus:SI (match_dup 4) (match_dup 5))))]
"
-{
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[4] = gen_highpart (SImode, operands[1]);
- operands[1] = gen_lowpart (SImode, operands[1]);
- operands[5] = gen_highpart (SImode, operands[2]);
- operands[2] = gen_lowpart (SImode, operands[2]);
-}")
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_highpart (SImode, operands[2]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+)
;; The first insn created by this splitter must set the low part of
;; operand0 as well as the carry bit in the CC register. The second
;; operand 2 from 32 to 64 bits and the high part of operand 1.
(define_split
[(set (match_operand:DI 0 "s_register_operand" "")
- (plus:DI (sign_extend:DI
- (match_operand:SI 2 "s_register_operand" ""))
- (match_operand:DI 1 "s_register_operand" "")))
+ (plus:DI (sign_extend:DI (match_operand:SI 2 "s_register_operand" ""))
+ (match_operand:DI 1 "s_register_operand" "")))
(clobber (reg:CC 24))]
"TARGET_ARM && reload_completed"
[(parallel [(set (reg:CC_C 24)
- (compare:CC_C (plus:SI (match_dup 1) (match_dup 2))
- (match_dup 1)))
+ (compare:CC_C (plus:SI (match_dup 1) (match_dup 2))
+ (match_dup 1)))
(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])
(set (match_dup 3) (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
(plus:SI (ashiftrt:SI (match_dup 2)
(const_int 31))
(match_dup 4))))]
"
-{
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[4] = gen_highpart (SImode, operands[1]);
- operands[1] = gen_lowpart (SImode, operands[1]);
- operands[2] = gen_lowpart (SImode, operands[2]);
-}")
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+)
-;; The first insn created by this splitter must set the low part of
-;; operand0 as well as the carry bit in the CC register. The second
-;; insn must compute the sum of the carry bit and the high bits from
-;; operand 1
+; The first insn created by this splitter must set the low part of
+; operand0 as well as the carry bit in the CC register. The second
+; insn must compute the sum of the carry bit and the high bits from
+; operand 1
(define_split
[(set (match_operand:DI 0 "s_register_operand" "")
- (plus:DI (zero_extend:DI
- (match_operand:SI 2 "s_register_operand" ""))
- (match_operand:DI 1 "s_register_operand" "")))
+ (plus:DI (zero_extend:DI (match_operand:SI 2 "s_register_operand" ""))
+ (match_operand:DI 1 "s_register_operand" "")))
(clobber (reg:CC 24))]
"TARGET_ARM && reload_completed"
[(parallel [(set (reg:CC_C 24)
- (compare:CC_C (plus:SI (match_dup 1) (match_dup 2))
- (match_dup 1)))
+ (compare:CC_C (plus:SI (match_dup 1) (match_dup 2))
+ (match_dup 1)))
(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))])
(set (match_dup 3) (plus:SI (ltu:SI (reg:CC_C 24) (const_int 0))
(plus:SI (match_dup 4) (const_int 0))))]
"
-{
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[4] = gen_highpart (SImode, operands[1]);
- operands[1] = gen_lowpart (SImode, operands[1]);
- operands[2] = gen_lowpart (SImode, operands[2]);
-}")
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+)
;; Addition insns.
(define_expand "adddi3"
[(parallel
- [(set (match_operand:DI 0 "s_register_operand" "")
+ [(set (match_operand:DI 0 "s_register_operand" "")
(plus:DI (match_operand:DI 1 "s_register_operand" "")
(match_operand:DI 2 "s_register_operand" "")))
- (clobber (reg:CC 24))
- ])]
+ (clobber (reg:CC 24))])]
"TARGET_EITHER"
"
if (TARGET_THUMB)
(define_insn "*thumb_adddi3"
[(set (match_operand:DI 0 "register_operand" "=l")
(plus:DI (match_operand:DI 1 "register_operand" "%0")
- (match_operand:DI 2 "register_operand" "l")))
+ (match_operand:DI 2 "register_operand" "l")))
(clobber (reg:CC 24))
]
"TARGET_THUMB"
(define_insn "*arm_adddi3"
[(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
- (plus:DI (match_operand:DI 1 "s_register_operand" "%0,0")
- (match_operand:DI 2 "s_register_operand" "r,0")))
- (clobber (reg:CC 24))
- ]
+ (plus:DI (match_operand:DI 1 "s_register_operand" "%0, 0")
+ (match_operand:DI 2 "s_register_operand" "r, 0")))
+ (clobber (reg:CC 24))]
"TARGET_ARM"
"#"
[(set_attr "conds" "clob")
(plus:DI (sign_extend:DI
(match_operand:SI 2 "s_register_operand" "r,r"))
(match_operand:DI 1 "s_register_operand" "r,0")))
- (clobber (reg:CC 24))
- ]
+ (clobber (reg:CC 24))]
"TARGET_ARM"
"#"
-[(set_attr "conds" "clob")
- (set_attr "length" "8")])
+ [(set_attr "conds" "clob")
+ (set_attr "length" "8")]
+)
(define_insn "*adddi_zesidi_di"
[(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
(plus:SI (match_operand:SI 1 "s_register_operand" "")
(match_operand:SI 2 "const_int_operand" "")))]
"TARGET_ARM &&
- (! (const_ok_for_arm (INTVAL (operands[2]))
+ (!(const_ok_for_arm (INTVAL (operands[2]))
|| const_ok_for_arm (-INTVAL (operands[2]))))"
[(clobber (const_int 0))]
"
(define_insn "*arm_addsi3"
[(set (match_operand:SI 0 "s_register_operand" "=r,r,r")
- (plus:SI (match_operand:SI 1 "s_register_operand" "r,r,r")
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r, r,r")
(match_operand:SI 2 "reg_or_int_operand" "rI,L,?n")))]
"TARGET_ARM"
"@
(define_insn "*addsi3_compare0"
[(set (reg:CC_NOOV 24)
(compare:CC_NOOV
- (plus:SI (match_operand:SI 1 "s_register_operand" "r,r")
- (match_operand:SI 2 "arm_add_operand" "rI,L"))
+ (plus:SI (match_operand:SI 1 "s_register_operand" "r, r")
+ (match_operand:SI 2 "arm_add_operand" "rI,L"))
(const_int 0)))
(set (match_operand:SI 0 "s_register_operand" "=r,r")
(plus:SI (match_dup 1) (match_dup 2)))]
(define_insn "*addsi3_compare0_scratch"
[(set (reg:CC_NOOV 24)
(compare:CC_NOOV
- (plus:SI (match_operand:SI 0 "s_register_operand" "r,r")
- (match_operand:SI 1 "arm_add_operand" "rI,L"))
+ (plus:SI (match_operand:SI 0 "s_register_operand" "r, r")
+ (match_operand:SI 1 "arm_add_operand" "rI,L"))
(const_int 0)))]
"TARGET_ARM"
"@
add%d2\\t%0, %1, #1
mov%D2\\t%0, %1\;add%d2\\t%0, %1, #1"
[(set_attr "conds" "use")
- (set_attr "length" "4,8")])
+ (set_attr "length" "4,8")]
+)
; If a constant is too big to fit in a single instruction then the constant
; will be pre-loaded into a register taking at least two insns, we might be
(plus:SI (match_operand:SI 1 "s_register_operand" "r")
(match_operand:SI 2 "const_int_operand" "n")))]
"TARGET_ARM
- && (! (const_ok_for_arm (INTVAL (operands[2]))
- || const_ok_for_arm (-INTVAL (operands[2]))))"
+ && (!(const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (-INTVAL (operands[2]))))"
[(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 2)))
(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 3)))]
"
-{
- unsigned int val = (unsigned) INTVAL (operands[2]);
- int i;
- unsigned int temp;
+ {
+ unsigned int val = (unsigned) INTVAL (operands[2]);
+ int i;
+ unsigned int temp;
- /* This code is similar to the approach followed in movsi,
- but it must generate exactly two insns. */
+ /* This code is similar to the approach followed in movsi,
+ but it must generate exactly two insns. */
- for (i = 30; i >= 0; i -= 2)
- {
- if (val & (3 << i))
- {
- i -= 6;
- if (i < 0) i = 0;
- if (const_ok_for_arm (temp = (val & ~(255 << i))))
- {
- val &= 255 << i;
- break;
- }
- /* We might be able to do this as (larger number - small number). */
- temp = ((val >> i) & 255) + 1;
- if (temp > 255 && i < 24)
- {
- i += 2;
- temp = ((val >> i) & 255) + 1;
- }
- if (const_ok_for_arm ((temp << i) - val))
- {
- i = temp << i;
- temp = (unsigned) - (int) (i - val);
- val = i;
- break;
- }
- FAIL;
- }
- }
- /* If we got here, we have found a way of doing it in two instructions.
- the two constants are in val and temp. */
- operands[2] = GEN_INT ((int) val);
- operands[3] = GEN_INT ((int) temp);
-}
-")
+ for (i = 30; i >= 0; i -= 2)
+ {
+ if (val & (3 << i))
+ {
+ i -= 6;
+ if (i < 0)
+ i = 0;
+
+ if (const_ok_for_arm (temp = (val & ~(255 << i))))
+ {
+ val &= 255 << i;
+ break;
+ }
+
+ /* We might be able to do this as (larger number - small
+ number). */
+ temp = ((val >> i) & 255) + 1;
+ if (temp > 255 && i < 24)
+ {
+ i += 2;
+ temp = ((val >> i) & 255) + 1;
+ }
+
+ if (const_ok_for_arm ((temp << i) - val))
+ {
+ i = temp << i;
+ temp = (unsigned) - (int) (i - val);
+ val = i;
+ break;
+ }
+
+ FAIL;
+ }
+ }
+
+ /* If we got here, we have found a way of doing it in two
+ instructions. the two constants are in val and temp. */
+ operands[2] = GEN_INT ((int) val);
+ operands[3] = GEN_INT ((int) temp);
+ }"
+)
(define_insn "addsf3"
[(set (match_operand:SF 0 "s_register_operand" "=f,f")
[(set (match_operand:DI 0 "s_register_operand" "")
(minus:DI (match_operand:DI 1 "s_register_operand" "")
(match_operand:DI 2 "s_register_operand" "")))
- (clobber (reg:CC 24))
- ])
- ]
+ (clobber (reg:CC 24))])]
"TARGET_EITHER"
"
if (TARGET_THUMB)
[(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r")
(minus:DI (match_operand:DI 1 "s_register_operand" "0,r,0")
(match_operand:DI 2 "s_register_operand" "r,0,0")))
- (clobber (reg:CC 24))
- ]
+ (clobber (reg:CC 24))]
"TARGET_ARM"
"subs\\t%Q0, %Q1, %Q2\;sbc\\t%R0, %R1, %R2"
[(set_attr "conds" "clob")
[(set (match_operand:DI 0 "register_operand" "=l")
(minus:DI (match_operand:DI 1 "register_operand" "0")
(match_operand:DI 2 "register_operand" "l")))
- (clobber (reg:CC 24))
- ]
+ (clobber (reg:CC 24))]
"TARGET_THUMB"
"sub\\t%Q0, %Q0, %Q2\;sbc\\t%R0, %R0, %R2"
[(set_attr "length" "4")]
(minus:DI (match_operand:DI 1 "s_register_operand" "?r,0")
(zero_extend:DI
(match_operand:SI 2 "s_register_operand" "r,r"))))
- (clobber (reg:CC 24))
- ]
+ (clobber (reg:CC 24))]
"TARGET_ARM"
"subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, #0"
[(set_attr "conds" "clob")
(minus:DI (match_operand:DI 1 "s_register_operand" "r,0")
(sign_extend:DI
(match_operand:SI 2 "s_register_operand" "r,r"))))
- (clobber (reg:CC 24))
- ]
+ (clobber (reg:CC 24))]
"TARGET_ARM"
"subs\\t%Q0, %Q1, %2\;sbc\\t%R0, %R1, %2, asr #31"
[(set_attr "conds" "clob")
(minus:DI (zero_extend:DI
(match_operand:SI 2 "s_register_operand" "r,r"))
(match_operand:DI 1 "s_register_operand" "?r,0")))
- (clobber (reg:CC 24))
- ]
+ (clobber (reg:CC 24))]
"TARGET_ARM"
"rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, #0"
[(set_attr "conds" "clob")
(minus:DI (sign_extend:DI
(match_operand:SI 2 "s_register_operand" "r,r"))
(match_operand:DI 1 "s_register_operand" "?r,0")))
- (clobber (reg:CC 24))
- ]
+ (clobber (reg:CC 24))]
"TARGET_ARM"
"rsbs\\t%Q0, %Q1, %2\;rsc\\t%R0, %R1, %2, asr #31"
[(set_attr "conds" "clob")
(match_operand:SI 1 "s_register_operand" "r"))
(zero_extend:DI
(match_operand:SI 2 "s_register_operand" "r"))))
- (clobber (reg:CC 24))
- ]
+ (clobber (reg:CC 24))]
"TARGET_ARM"
"subs\\t%Q0, %1, %2\;rsc\\t%R0, %1, %1"
[(set_attr "conds" "clob")
[(set (match_operand:SI 0 "s_register_operand" "")
(minus:SI (match_operand:SI 1 "const_int_operand" "")
(match_operand:SI 2 "s_register_operand" "")))]
- "TARGET_ARM && (! const_ok_for_arm (INTVAL (operands[1])))"
+ "TARGET_ARM && (!const_ok_for_arm (INTVAL (operands[1])))"
[(clobber (const_int 0))]
"
arm_split_constant (MINUS, SImode, INTVAL (operands[1]), operands[0],
(define_insn "*subsi3_compare0"
[(set (reg:CC_NOOV 24)
- (compare:CC_NOOV (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I")
- (match_operand:SI 2 "arm_rhs_operand" "rI,r"))
- (const_int 0)))
+ (compare:CC_NOOV
+ (minus:SI (match_operand:SI 1 "arm_rhs_operand" "r,I")
+ (match_operand:SI 2 "arm_rhs_operand" "rI,r"))
+ (const_int 0)))
(set (match_operand:SI 0 "s_register_operand" "=r,r")
(minus:SI (match_dup 1) (match_dup 2)))]
"TARGET_ARM"
[(match_operand 3 "cc_register" "") (const_int 0)])))]
"TARGET_ARM"
"@
- sub%d2\\t%0, %1, #1
- mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
+ sub%d2\\t%0, %1, #1
+ mov%D2\\t%0, %1\;sub%d2\\t%0, %1, #1"
[(set_attr "conds" "use")
(set_attr "length" "*,8")]
)
(set_attr "predicable" "yes")]
)
-;; Unfortunately with the Thumb the '&'/'0' trick can fails when operands 1 and 2
-;; are the same, because reload will make operand 0 match operand 1 without
-;; realizing that this conflicts with operand 2. We fix this by adding another
-;; alternative to match this case, and then `reload' it ourselves. This
-;; alternative must come first.
+; Unfortunately with the Thumb the '&'/'0' trick can fails when operands
+; 1 and 2; are the same, because reload will make operand 0 match
+; operand 1 without realizing that this conflicts with operand 2. We fix
+; this by adding another alternative to match this case, and then `reload'
+; it ourselves. This alternative must come first.
(define_insn "*thumb_mulsi3"
[(set (match_operand:SI 0 "register_operand" "=&l,&l,&l")
(mult:SI (match_operand:SI 1 "register_operand" "%l,*h,0")
(define_insn "*mulsi3addsi_compare0"
[(set (reg:CC_NOOV 24)
- (compare:CC_NOOV (plus:SI
- (mult:SI
- (match_operand:SI 2 "s_register_operand" "r,r,r,r")
- (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
- (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
- (const_int 0)))
+ (compare:CC_NOOV
+ (plus:SI (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
(set (match_operand:SI 0 "s_register_operand" "=&r,&r,&r,&r")
(plus:SI (mult:SI (match_dup 2) (match_dup 1))
(match_dup 3)))]
(define_insn "*mulsi3addsi_compare0_scratch"
[(set (reg:CC_NOOV 24)
- (compare:CC_NOOV (plus:SI
- (mult:SI
- (match_operand:SI 2 "s_register_operand" "r,r,r,r")
- (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
- (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
- (const_int 0)))
+ (compare:CC_NOOV
+ (plus:SI (mult:SI
+ (match_operand:SI 2 "s_register_operand" "r,r,r,r")
+ (match_operand:SI 1 "s_register_operand" "%r,0,r,0"))
+ (match_operand:SI 3 "s_register_operand" "?r,r,0,0"))
+ (const_int 0)))
(clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
"TARGET_ARM"
"mla%?s\\t%0, %2, %1, %3"
(define_insn "*mulsidi3adddi"
[(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r")
(plus:DI
- (mult:DI (sign_extend:DI
- (match_operand:SI 2 "s_register_operand" "r,0,1"))
- (sign_extend:DI
- (match_operand:SI 1 "s_register_operand" "%r,r,r")))
- (match_dup 0)))]
+ (mult:DI
+ (sign_extend:DI (match_operand:SI 2 "s_register_operand" "r,0,1"))
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "%r,r,r")))
+ (match_dup 0)))]
"TARGET_ARM && arm_fast_multiply"
"smlal%?\\t%Q0, %R0, %1, %2"
[(set_attr "type" "mult")
(define_insn "mulsidi3"
[(set (match_operand:DI 0 "s_register_operand" "=&r")
- (mult:DI (sign_extend:DI
- (match_operand:SI 1 "s_register_operand" "%r"))
- (sign_extend:DI
- (match_operand:SI 2 "s_register_operand" "r"))))]
+ (mult:DI
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "%r"))
+ (sign_extend:DI (match_operand:SI 2 "s_register_operand" "r"))))]
"TARGET_ARM && arm_fast_multiply"
"smull%?\\t%Q0, %R0, %1, %2"
[(set_attr "type" "mult")
(define_insn "umulsidi3"
[(set (match_operand:DI 0 "s_register_operand" "=&r")
- (mult:DI (zero_extend:DI
- (match_operand:SI 1 "s_register_operand" "%r"))
- (zero_extend:DI
- (match_operand:SI 2 "s_register_operand" "r"))))]
+ (mult:DI
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "%r"))
+ (zero_extend:DI (match_operand:SI 2 "s_register_operand" "r"))))]
"TARGET_ARM && arm_fast_multiply"
"umull%?\\t%Q0, %R0, %1, %2"
[(set_attr "type" "mult")
(define_insn "*umulsidi3adddi"
[(set (match_operand:DI 0 "s_register_operand" "=&r,&r,&r")
(plus:DI
- (mult:DI (zero_extend:DI
- (match_operand:SI 2 "s_register_operand" "r,0,1"))
- (zero_extend:DI
- (match_operand:SI 1 "s_register_operand" "%r,r,r")))
- (match_dup 0)))]
+ (mult:DI
+ (zero_extend:DI (match_operand:SI 2 "s_register_operand" "r,0,1"))
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "%r,r,r")))
+ (match_dup 0)))]
"TARGET_ARM && arm_fast_multiply"
"umlal%?\\t%Q0, %R0, %1, %2"
[(set_attr "type" "mult")
[(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
(truncate:SI
(lshiftrt:DI
- (mult:DI (sign_extend:DI
- (match_operand:SI 1 "s_register_operand" "%r,0"))
- (sign_extend:DI
- (match_operand:SI 2 "s_register_operand" "r,r")))
+ (mult:DI
+ (sign_extend:DI (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (sign_extend:DI (match_operand:SI 2 "s_register_operand" "r,r")))
(const_int 32))))
(clobber (match_scratch:SI 3 "=&r,&r"))]
"TARGET_ARM && arm_fast_multiply"
[(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
(truncate:SI
(lshiftrt:DI
- (mult:DI (zero_extend:DI
- (match_operand:SI 1 "s_register_operand" "%r,0"))
- (zero_extend:DI
- (match_operand:SI 2 "s_register_operand" "r,r")))
+ (mult:DI
+ (zero_extend:DI (match_operand:SI 1 "s_register_operand" "%r,0"))
+ (zero_extend:DI (match_operand:SI 2 "s_register_operand" "r,r")))
(const_int 32))))
(clobber (match_scratch:SI 3 "=&r,&r"))]
"TARGET_ARM && arm_fast_multiply"
(define_insn "*muldf_esfdf_esfdf"
[(set (match_operand:DF 0 "s_register_operand" "=f")
- (mult:DF (float_extend:DF
- (match_operand:SF 1 "s_register_operand" "f"))
- (float_extend:DF
- (match_operand:SF 2 "s_register_operand" "f"))))]
+ (mult:DF
+ (float_extend:DF (match_operand:SF 1 "s_register_operand" "f"))
+ (float_extend:DF (match_operand:SF 2 "s_register_operand" "f"))))]
"TARGET_ARM && TARGET_HARD_FLOAT"
"muf%?d\\t%0, %1, %2"
[(set_attr "type" "fmul")
[(set (match_dup 0) (match_op_dup:SI 6 [(match_dup 1) (match_dup 2)]))
(set (match_dup 3) (match_op_dup:SI 6 [(match_dup 4) (match_dup 5)]))]
"
-{
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[4] = gen_highpart (SImode, operands[1]);
- operands[1] = gen_lowpart (SImode, operands[1]);
- operands[5] = gen_highpart (SImode, operands[2]);
- operands[2] = gen_lowpart (SImode, operands[2]);
-}")
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_highpart (SImode, operands[2]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+)
(define_split
[(set (match_operand:DI 0 "s_register_operand" "")
[(set (match_dup 0) (not:SI (match_dup 1)))
(set (match_dup 2) (not:SI (match_dup 3)))]
"
-{
- operands[2] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[3] = gen_highpart (SImode, operands[1]);
- operands[1] = gen_lowpart (SImode, operands[1]);
-}")
+ {
+ operands[2] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[3] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ }"
+)
(define_split
[(set (match_operand:DI 0 "s_register_operand" "")
[(set (match_dup 0) (and:SI (not:SI (match_dup 1)) (match_dup 2)))
(set (match_dup 3) (and:SI (not:SI (match_dup 4)) (match_dup 5)))]
"
-{
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[4] = gen_highpart (SImode, operands[1]);
- operands[1] = gen_lowpart (SImode, operands[1]);
- operands[5] = gen_highpart (SImode, operands[2]);
- operands[2] = gen_lowpart (SImode, operands[2]);
-}")
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_highpart (SImode, operands[2]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+)
(define_split
[(set (match_operand:DI 0 "s_register_operand" "")
[(ashiftrt:SI (match_dup 2) (const_int 31))
(match_dup 4)]))]
"
-{
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[4] = gen_highpart (SImode, operands[1]);
- operands[1] = gen_lowpart (SImode, operands[1]);
- operands[5] = gen_highpart (SImode, operands[2]);
- operands[2] = gen_lowpart (SImode, operands[2]);
-}")
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[5] = gen_highpart (SImode, operands[2]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+)
(define_split
[(set (match_operand:DI 0 "s_register_operand" "")
(ashiftrt:SI (match_dup 2) (const_int 31)))
(match_dup 4)))]
"
-{
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[4] = gen_highpart (SImode, operands[1]);
- operands[1] = gen_lowpart (SImode, operands[1]);
- operands[2] = gen_lowpart (SImode, operands[2]);
-}")
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+)
;; The zero extend of operand 2 clears the high word of the output
;; operand.
[(set (match_dup 0) (and:SI (match_dup 1) (match_dup 2)))
(set (match_dup 3) (const_int 0))]
"
-{
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[1] = gen_lowpart (SImode, operands[1]);
-}")
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ }"
+)
;; The zero extend of operand 2 means we can just copy the high part of
;; operand1 into operand0.
[(set (match_dup 0) (ior:SI (match_dup 1) (match_dup 2)))
(set (match_dup 3) (match_dup 4))]
"
-{
- operands[4] = gen_highpart (SImode, operands[1]);
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[1] = gen_lowpart (SImode, operands[1]);
-}")
+ {
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ }"
+)
;; The zero extend of operand 2 means we can just copy the high part of
;; operand1 into operand0.
[(set (match_dup 0) (xor:SI (match_dup 1) (match_dup 2)))
(set (match_dup 3) (match_dup 4))]
"
-{
- operands[4] = gen_highpart (SImode, operands[1]);
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[1] = gen_lowpart (SImode, operands[1]);
-}")
+ {
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ }"
+)
;; (not (zero_extend ...)) allows us to just copy the high word from
;; operand1 to operand0.
[(set (match_dup 0) (and:SI (not:SI (match_dup 1)) (match_dup 2)))
(set (match_dup 3) (match_dup 4))]
"
-{
- operands[3] = gen_highpart (SImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[4] = gen_highpart (SImode, operands[1]);
- operands[1] = gen_lowpart (SImode, operands[1]);
- operands[2] = gen_lowpart (SImode, operands[2]);
-}")
+ {
+ operands[3] = gen_highpart (SImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[4] = gen_highpart (SImode, operands[1]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_lowpart (SImode, operands[2]);
+ }"
+)
(define_insn "anddi3"
[(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
(match_operand:DI 2 "s_register_operand" "r,r")))]
"TARGET_ARM"
"#"
-[(set_attr "length" "8")])
+ [(set_attr "length" "8")]
+)
(define_insn "*anddi_zesidi_di"
[(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
(match_operand:DI 1 "s_register_operand" "?r,0")))]
"TARGET_ARM"
"#"
-[(set_attr "length" "8")])
+ [(set_attr "length" "8")]
+)
(define_insn "*anddi_sesdi_di"
[(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
{
int i;
- if (((unsigned HOST_WIDE_INT) ~ INTVAL (operands[2])) < 256)
+ if (((unsigned HOST_WIDE_INT) ~INTVAL (operands[2])) < 256)
{
- operands[2] = force_reg (SImode, GEN_INT (~INTVAL (operands[2])));
+ operands[2] = force_reg (SImode,
+ GEN_INT (~INTVAL (operands[2])));
emit_insn (gen_bicsi3 (operands[0], operands[2], operands[1]));
const0_rtx));
DONE;
}
- else if ((((HOST_WIDE_INT) 1) << i) - 1 == ~ INTVAL (operands[2]))
+ else if ((((HOST_WIDE_INT) 1) << i) - 1
+ == ~INTVAL (operands[2]))
{
rtx shift = GEN_INT (i);
rtx reg = gen_reg_rtx (SImode);
operands[2] = force_reg (SImode, operands[2]);
}
- }"
+ }
+ "
)
(define_insn "*arm_andsi3_insn"
(and:SI (match_operand:SI 1 "s_register_operand" "")
(match_operand:SI 2 "const_int_operand" "")))]
"TARGET_ARM
- && (! (const_ok_for_arm ( INTVAL (operands[2]))
- || const_ok_for_arm (~ INTVAL (operands[2]))))"
+ && (!(const_ok_for_arm (INTVAL (operands[2]))
+ || const_ok_for_arm (~INTVAL (operands[2]))))"
[(clobber (const_int 0))]
"
arm_split_constant (AND, SImode, INTVAL (operands[2]), operands[0],
operands[1], 0);
DONE;
-")
+ "
+)
(define_insn "*andsi3_compare0"
[(set (reg:CC_NOOV 24)
return \"movne\\t%0, #1\";
"
[(set_attr "conds" "clob")
- (set_attr "length" "8")])
+ (set_attr "length" "8")]
+)
;;; ??? This pattern is bogus. If operand3 has bits outside the range
;;; represented by the bitfield, then this will produce incorrect results.
(match_operand:SI 3 "nonmemory_operand" ""))]
"TARGET_ARM"
"
-{
- int start_bit = INTVAL (operands[2]);
- int width = INTVAL (operands[1]);
- HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1;
- rtx target, subtarget;
-
- target = operands[0];
- /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
- subreg as the final target. */
- if (GET_CODE (target) == SUBREG)
- {
- subtarget = gen_reg_rtx (SImode);
- if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (target)))
- < GET_MODE_SIZE (SImode))
- target = SUBREG_REG (target);
- }
- else
- subtarget = target;
-
- if (GET_CODE (operands[3]) == CONST_INT)
- {
- /* Since we are inserting a known constant, we may be able to
- reduce the number of bits that we have to clear so that
- the mask becomes simple. */
- /* ??? This code does not check to see if the new mask is actually
- simpler. It may not be. */
- rtx op1 = gen_reg_rtx (SImode);
- /* ??? Truncate operand3 to fit in the bitfield. See comment before
- start of this pattern. */
- HOST_WIDE_INT op3_value = mask & INTVAL (operands[3]);
- HOST_WIDE_INT mask2 = ((mask & ~op3_value) << start_bit);
-
- emit_insn (gen_andsi3 (op1, operands[0], GEN_INT (~mask2)));
- emit_insn (gen_iorsi3 (subtarget, op1,
- GEN_INT (op3_value << start_bit)));
- }
- else if (start_bit == 0
- && ! (const_ok_for_arm (mask)
- || const_ok_for_arm (~mask)))
- {
- /* A Trick, since we are setting the bottom bits in the word,
- we can shift operand[3] up, operand[0] down, OR them together
- and rotate the result back again. This takes 3 insns, and
- the third might be mergable into another op. */
- /* The shift up copes with the possibility that operand[3] is
- wider than the bitfield. */
- rtx op0 = gen_reg_rtx (SImode);
- rtx op1 = gen_reg_rtx (SImode);
-
- emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
- emit_insn (gen_lshrsi3 (op1, operands[0], operands[1]));
- emit_insn (gen_iorsi3 (op1, op1, op0));
- emit_insn (gen_rotlsi3 (subtarget, op1, operands[1]));
- }
- else if ((width + start_bit == 32)
- && ! (const_ok_for_arm (mask)
- || const_ok_for_arm (~mask)))
- {
- /* Similar trick, but slightly less efficient. */
+ {
+ int start_bit = INTVAL (operands[2]);
+ int width = INTVAL (operands[1]);
+ HOST_WIDE_INT mask = (((HOST_WIDE_INT)1) << width) - 1;
+ rtx target, subtarget;
+
+ target = operands[0];
+ /* Avoid using a subreg as a subtarget, and avoid writing a paradoxical
+ subreg as the final target. */
+ if (GET_CODE (target) == SUBREG)
+ {
+ subtarget = gen_reg_rtx (SImode);
+ if (GET_MODE_SIZE (GET_MODE (SUBREG_REG (target)))
+ < GET_MODE_SIZE (SImode))
+ target = SUBREG_REG (target);
+ }
+ else
+ subtarget = target;
- rtx op0 = gen_reg_rtx (SImode);
- rtx op1 = gen_reg_rtx (SImode);
+ if (GET_CODE (operands[3]) == CONST_INT)
+ {
+ /* Since we are inserting a known constant, we may be able to
+ reduce the number of bits that we have to clear so that
+ the mask becomes simple. */
+ /* ??? This code does not check to see if the new mask is actually
+ simpler. It may not be. */
+ rtx op1 = gen_reg_rtx (SImode);
+ /* ??? Truncate operand3 to fit in the bitfield. See comment before
+ start of this pattern. */
+ HOST_WIDE_INT op3_value = mask & INTVAL (operands[3]);
+ HOST_WIDE_INT mask2 = ((mask & ~op3_value) << start_bit);
+
+ emit_insn (gen_andsi3 (op1, operands[0], GEN_INT (~mask2)));
+ emit_insn (gen_iorsi3 (subtarget, op1,
+ GEN_INT (op3_value << start_bit)));
+ }
+ else if (start_bit == 0
+ && !(const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* A Trick, since we are setting the bottom bits in the word,
+ we can shift operand[3] up, operand[0] down, OR them together
+ and rotate the result back again. This takes 3 insns, and
+ the third might be mergable into another op. */
+ /* The shift up copes with the possibility that operand[3] is
+ wider than the bitfield. */
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
+
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_lshrsi3 (op1, operands[0], operands[1]));
+ emit_insn (gen_iorsi3 (op1, op1, op0));
+ emit_insn (gen_rotlsi3 (subtarget, op1, operands[1]));
+ }
+ else if ((width + start_bit == 32)
+ && !(const_ok_for_arm (mask)
+ || const_ok_for_arm (~mask)))
+ {
+ /* Similar trick, but slightly less efficient. */
- emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
- emit_insn (gen_ashlsi3 (op1, operands[0], operands[1]));
- emit_insn (gen_lshrsi3 (op1, op1, operands[1]));
- emit_insn (gen_iorsi3 (subtarget, op1, op0));
- }
- else
- {
- rtx op0 = GEN_INT (mask);
- rtx op1 = gen_reg_rtx (SImode);
- rtx op2 = gen_reg_rtx (SImode);
+ rtx op0 = gen_reg_rtx (SImode);
+ rtx op1 = gen_reg_rtx (SImode);
- if (! (const_ok_for_arm (mask) || const_ok_for_arm (~mask)))
- {
- rtx tmp = gen_reg_rtx (SImode);
+ emit_insn (gen_ashlsi3 (op0, operands[3], GEN_INT (32 - width)));
+ emit_insn (gen_ashlsi3 (op1, operands[0], operands[1]));
+ emit_insn (gen_lshrsi3 (op1, op1, operands[1]));
+ emit_insn (gen_iorsi3 (subtarget, op1, op0));
+ }
+ else
+ {
+ rtx op0 = GEN_INT (mask);
+ rtx op1 = gen_reg_rtx (SImode);
+ rtx op2 = gen_reg_rtx (SImode);
- emit_insn (gen_movsi (tmp, op0));
- op0 = tmp;
- }
+ if (!(const_ok_for_arm (mask) || const_ok_for_arm (~mask)))
+ {
+ rtx tmp = gen_reg_rtx (SImode);
- /* Mask out any bits in operand[3] that are not needed. */
- emit_insn (gen_andsi3 (op1, operands[3], op0));
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
- if (GET_CODE (op0) == CONST_INT
- && (const_ok_for_arm (mask << start_bit)
- || const_ok_for_arm (~ (mask << start_bit))))
- {
- op0 = GEN_INT (~(mask << start_bit));
- emit_insn (gen_andsi3 (op2, operands[0], op0));
- }
- else
- {
- if (GET_CODE (op0) == CONST_INT)
- {
- rtx tmp = gen_reg_rtx (SImode);
+ /* Mask out any bits in operand[3] that are not needed. */
+ emit_insn (gen_andsi3 (op1, operands[3], op0));
- emit_insn (gen_movsi (tmp, op0));
- op0 = tmp;
- }
+ if (GET_CODE (op0) == CONST_INT
+ && (const_ok_for_arm (mask << start_bit)
+ || const_ok_for_arm (~(mask << start_bit))))
+ {
+ op0 = GEN_INT (~(mask << start_bit));
+ emit_insn (gen_andsi3 (op2, operands[0], op0));
+ }
+ else
+ {
+ if (GET_CODE (op0) == CONST_INT)
+ {
+ rtx tmp = gen_reg_rtx (SImode);
- if (start_bit != 0)
- emit_insn (gen_ashlsi3 (op0, op0, operands[2]));
+ emit_insn (gen_movsi (tmp, op0));
+ op0 = tmp;
+ }
+
+ if (start_bit != 0)
+ emit_insn (gen_ashlsi3 (op0, op0, operands[2]));
- emit_insn (gen_andsi_notsi_si (op2, operands[0], op0));
- }
+ emit_insn (gen_andsi_notsi_si (op2, operands[0], op0));
+ }
- if (start_bit != 0)
- emit_insn (gen_ashlsi3 (op1, op1, operands[2]));
+ if (start_bit != 0)
+ emit_insn (gen_ashlsi3 (op1, op1, operands[2]));
- emit_insn (gen_iorsi3 (subtarget, op1, op2));
- }
+ emit_insn (gen_iorsi3 (subtarget, op1, op2));
+ }
- if (subtarget != target)
- {
- /* If TARGET is still a SUBREG, then it must be wider than a word,
- so we must be careful only to set the subword we were asked to. */
- if (GET_CODE (target) == SUBREG)
- emit_move_insn (target, subtarget);
- else
- emit_move_insn (target, gen_lowpart (GET_MODE (target), subtarget));
- }
+ if (subtarget != target)
+ {
+ /* If TARGET is still a SUBREG, then it must be wider than a word,
+ so we must be careful only to set the subword we were asked to. */
+ if (GET_CODE (target) == SUBREG)
+ emit_move_insn (target, subtarget);
+ else
+ emit_move_insn (target, gen_lowpart (GET_MODE (target), subtarget));
+ }
- DONE;
-}
-")
+ DONE;
+ }"
+)
-;; constants for op 2 will never be given to these patterns.
+; constants for op 2 will never be given to these patterns.
(define_insn "*anddi_notdi_di"
[(set (match_operand:DI 0 "s_register_operand" "=&r,&r")
(and:DI (not:DI (match_operand:DI 2 "s_register_operand" "r,0"))
[(set (match_operand:SI 0 "s_register_operand" "")
(ior:SI (match_operand:SI 1 "s_register_operand" "")
(match_operand:SI 2 "const_int_operand" "")))]
- "TARGET_ARM && (! const_ok_for_arm (INTVAL (operands[2])))"
+ "TARGET_ARM && (!const_ok_for_arm (INTVAL (operands[2])))"
[(clobber (const_int 0))]
"
arm_split_constant (IOR, SImode, INTVAL (operands[2]), operands[0],
operands[1], 0);
DONE;
-")
+ "
+)
(define_insn "*iorsi3_compare0"
[(set (reg:CC_NOOV 24)
[(set_attr "conds" "set")]
)
-;; by splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C),
-;; (NOT D) we can sometimes merge the final NOT into one of the following
-;; insns
+; By splitting (IOR (AND (NOT A) (NOT B)) C) as D = AND (IOR A B) (NOT C),
+; (NOT D) we can sometimes merge the final NOT into one of the following
+; insns.
(define_split
[(set (match_operand:SI 0 "s_register_operand" "=r")
|| (code != PLUS && code != MINUS && code != IOR && code != XOR))
output_asm_insn (\"%i4%D5\\t%0, %1, %3\", operands);
return \"\";
- }
- "
+ }"
[(set_attr "conds" "clob")
(set_attr "length" "12")]
)
(match_operand:SI 2 "const_int_operand" "")))
(set (match_operand:SI 0 "register_operand" "")
(lshiftrt:SI (match_dup 4)
- (match_operand:SI 3 "const_int_operand" "")))
- ]
+ (match_operand:SI 3 "const_int_operand" "")))]
"TARGET_THUMB"
"
{
operands[2] = GEN_INT (lshift);
operands[4] = gen_reg_rtx (SImode);
- }
- "
+ }"
)
\f
[(parallel
[(set (match_operand:DI 0 "s_register_operand" "")
(neg:DI (match_operand:DI 1 "s_register_operand" "")))
- (clobber (reg:CC 24))
- ])
- ]
+ (clobber (reg:CC 24))])]
"TARGET_EITHER"
"
if (TARGET_THUMB)
if (GET_CODE (operands[1]) != REG)
operands[1] = force_reg (SImode, operands[1]);
}
- "
+ "
)
;; The constraints here are to prevent a *partial* overlap (where %Q0 == %R1).
(define_insn "*arm_negdi2"
[(set (match_operand:DI 0 "s_register_operand" "=&r,r")
(neg:DI (match_operand:DI 1 "s_register_operand" "?r,0")))
- (clobber (reg:CC 24))
- ]
+ (clobber (reg:CC 24))]
"TARGET_ARM"
"rsbs\\t%Q0, %Q1, #0\;rsc\\t%R0, %R1, #0"
[(set_attr "conds" "clob")
(define_insn "*thumb_negdi2"
[(set (match_operand:DI 0 "register_operand" "=&l")
(neg:DI (match_operand:DI 1 "register_operand" "l")))
- (clobber (reg:CC 24))
- ]
+ (clobber (reg:CC 24))]
"TARGET_THUMB"
"mov\\t%R0, #0\;neg\\t%Q0, %Q1\;sbc\\t%R0, %R1"
[(set_attr "length" "6")]
rather than an LDR instruction, so we cannot get an unaligned
word access. */
emit_insn (gen_rtx_SET (VOIDmode, operands[0],
- gen_rtx_ZERO_EXTEND (SImode, operands[1])));
+ gen_rtx_ZERO_EXTEND (SImode,
+ operands[1])));
DONE;
}
if (TARGET_MMU_TRAPS && GET_CODE (operands[1]) == MEM)
emit_insn (gen_movhi_bytes (operands[0], operands[1]));
DONE;
}
- if (! s_register_operand (operands[1], HImode))
+ if (!s_register_operand (operands[1], HImode))
operands[1] = copy_to_mode_reg (HImode, operands[1]);
operands[1] = gen_lowpart (SImode, operands[1]);
operands[2] = gen_reg_rtx (SImode);
{
rtx ops[3];
- if (! s_register_operand (operands[1], HImode))
+ if (!s_register_operand (operands[1], HImode))
operands[1] = copy_to_mode_reg (HImode, operands[1]);
operands[1] = gen_lowpart (SImode, operands[1]);
operands[2] = gen_reg_rtx (SImode);
[(set (match_operand:SI 0 "s_register_operand" "")
(zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
(clobber (match_operand:SI 2 "s_register_operand" ""))]
- "TARGET_ARM && (! arm_arch4)"
+ "TARGET_ARM && (!arm_arch4)"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (lshiftrt:SI (match_dup 2) (const_int 16)))]
"
-{
if ((operands[1] = arm_gen_rotated_half_load (operands[1])) == NULL)
FAIL;
-}")
+ "
+)
(define_split
[(set (match_operand:SI 0 "s_register_operand" "")
[(zero_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
(match_operand:SI 4 "s_register_operand" "")]))
(clobber (match_operand:SI 2 "s_register_operand" ""))]
- "TARGET_ARM && (! arm_arch4)"
+ "TARGET_ARM && (!arm_arch4)"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0)
(match_op_dup 3
[(lshiftrt:SI (match_dup 2) (const_int 16)) (match_dup 4)]))]
"
-{
if ((operands[1] = arm_gen_rotated_half_load (operands[1])) == NULL)
FAIL;
-}")
+ "
+)
(define_expand "zero_extendqisi2"
[(set (match_operand:SI 0 "s_register_operand" "")
{
if (TARGET_ARM)
{
- emit_insn (gen_andsi3 (operands[0], gen_lowpart (SImode, operands[1]),
+ emit_insn (gen_andsi3 (operands[0],
+ gen_lowpart (SImode, operands[1]),
GEN_INT (255)));
}
else /* TARGET_THUMB */
ops[1] = operands[1];
ops[2] = GEN_INT (24);
- emit_insn (gen_rtx_SET (VOIDmode, ops[0], gen_rtx_ASHIFT (SImode, ops[1], ops[2])));
+ emit_insn (gen_rtx_SET (VOIDmode, ops[0],
+ gen_rtx_ASHIFT (SImode, ops[1], ops[2])));
ops[0] = operands[0];
ops[1] = temp;
ops[2] = GEN_INT (24);
- emit_insn (gen_rtx_SET (VOIDmode, ops[0], gen_rtx_LSHIFTRT (SImode, ops[1], ops[2])));
+ emit_insn (gen_rtx_SET (VOIDmode, ops[0],
+ gen_rtx_LSHIFTRT (SImode, ops[1], ops[2])));
}
DONE;
}
-")
+ "
+)
(define_insn "*thumb_zero_extendqisi2"
[(set (match_operand:SI 0 "register_operand" "=l")
emit_insn (gen_extendhisi2_mem (operands[0], operands[1]));
DONE;
}
- if (! s_register_operand (operands[1], HImode))
+ if (!s_register_operand (operands[1], HImode))
operands[1] = copy_to_mode_reg (HImode, operands[1]);
operands[1] = gen_lowpart (SImode, operands[1]);
operands[2] = gen_reg_rtx (SImode);
(ior:SI (ashiftrt:SI (match_dup 6) (const_int 16)) (match_dup 5)))]
"TARGET_ARM"
"
-{
- rtx mem1, mem2;
- rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
-
- mem1 = gen_rtx_MEM (QImode, addr);
- MEM_COPY_ATTRIBUTES (mem1, operands[1]);
- mem2 = gen_rtx_MEM (QImode, plus_constant (addr, 1));
- MEM_COPY_ATTRIBUTES (mem2, operands[1]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[1] = mem1;
- operands[2] = gen_reg_rtx (SImode);
- operands[3] = gen_reg_rtx (SImode);
- operands[6] = gen_reg_rtx (SImode);
- operands[7] = mem2;
+ {
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx_MEM (QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ mem2 = gen_rtx_MEM (QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = gen_reg_rtx (SImode);
+ operands[7] = mem2;
- if (BYTES_BIG_ENDIAN)
- {
- operands[4] = operands[2];
- operands[5] = operands[3];
- }
- else
- {
- operands[4] = operands[3];
- operands[5] = operands[2];
- }
-}
-")
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+ }"
+)
(define_insn "*arm_extendhisi_insn"
[(set (match_operand:SI 0 "s_register_operand" "=r")
[(set (match_operand:SI 0 "s_register_operand" "")
(sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" "")))
(clobber (match_operand:SI 2 "s_register_operand" ""))]
- "TARGET_ARM && (! arm_arch4)"
+ "TARGET_ARM && (!arm_arch4)"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0) (ashiftrt:SI (match_dup 2) (const_int 16)))]
"
[(sign_extend:SI (match_operand:HI 1 "alignable_memory_operand" ""))
(match_operand:SI 4 "s_register_operand" "")]))
(clobber (match_operand:SI 2 "s_register_operand" ""))]
- "TARGET_ARM && (! arm_arch4)"
+ "TARGET_ARM && (!arm_arch4)"
[(set (match_dup 2) (match_dup 1))
(set (match_dup 0)
(match_op_dup 3
(const_int 24)))]
"TARGET_ARM"
"
-{
- if (arm_arch4 && GET_CODE (operands[1]) == MEM)
- {
- emit_insn (gen_rtx_SET (VOIDmode,
- operands[0],
- gen_rtx_SIGN_EXTEND (HImode, operands[1])));
- DONE;
- }
- if (! s_register_operand (operands[1], QImode))
- operands[1] = copy_to_mode_reg (QImode, operands[1]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[1] = gen_lowpart (SImode, operands[1]);
- operands[2] = gen_reg_rtx (SImode);
-}")
+ {
+ if (arm_arch4 && GET_CODE (operands[1]) == MEM)
+ {
+ emit_insn (gen_rtx_SET (VOIDmode,
+ operands[0],
+ gen_rtx_SIGN_EXTEND (HImode, operands[1])));
+ DONE;
+ }
+ if (!s_register_operand (operands[1], QImode))
+ operands[1] = copy_to_mode_reg (QImode, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = gen_lowpart (SImode, operands[1]);
+ operands[2] = gen_reg_rtx (SImode);
+ }"
+)
; Rather than restricting all byte accesses to memory addresses that ldrsb
; can handle, we fix up the ones that ldrsb can't grok with a split.
operands[1] = XEXP (operands[1], 0);
if (GET_CODE (operands[1]) == PLUS
&& GET_CODE (XEXP (operands[1], 1)) == CONST_INT
- && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
- || const_ok_for_arm (-offset)))
+ && !(const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
{
HOST_WIDE_INT low = (offset > 0
? (offset & 0xff) : -((-offset) & 0xff));
/* Ensure the sum is in correct canonical form */
else if (GET_CODE (operands[1]) == PLUS
&& GET_CODE (XEXP (operands[1], 1)) != CONST_INT
- && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ && !s_register_operand (XEXP (operands[1], 1), VOIDmode))
operands[1] = gen_rtx_PLUS (GET_MODE (operands[1]),
XEXP (operands[1], 1),
XEXP (operands[1], 0));
- }
-")
+ }"
+)
(define_expand "extendqisi2"
[(set (match_dup 2)
gen_rtx_SIGN_EXTEND (SImode, operands[1])));
DONE;
}
- if (! s_register_operand (operands[1], QImode))
+ if (!s_register_operand (operands[1], QImode))
operands[1] = copy_to_mode_reg (QImode, operands[1]);
operands[1] = gen_lowpart (SImode, operands[1]);
operands[2] = gen_reg_rtx (SImode);
ops[1] = operands[1];
ops[2] = GEN_INT (24);
- emit_insn (gen_rtx_SET (VOIDmode, ops[0], gen_rtx_ASHIFT (SImode, ops[1], ops[2])));
+ emit_insn (gen_rtx_SET (VOIDmode, ops[0],
+ gen_rtx_ASHIFT (SImode, ops[1], ops[2])));
ops[0] = operands[0];
ops[1] = operands[2];
ops[2] = GEN_INT (24);
- emit_insn (gen_rtx_SET (VOIDmode, ops[0], gen_rtx_ASHIFTRT (SImode, ops[1], ops[2])));
+ emit_insn (gen_rtx_SET (VOIDmode, ops[0],
+ gen_rtx_ASHIFTRT (SImode, ops[1], ops[2])));
DONE;
}
operands[1] = XEXP (operands[1], 0);
if (GET_CODE (operands[1]) == PLUS
&& GET_CODE (XEXP (operands[1], 1)) == CONST_INT
- && ! (const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
- || const_ok_for_arm (-offset)))
+ && !(const_ok_for_arm (offset = INTVAL (XEXP (operands[1], 1)))
+ || const_ok_for_arm (-offset)))
{
HOST_WIDE_INT low = (offset > 0
? (offset & 0xff) : -((-offset) & 0xff));
/* Ensure the sum is in correct canonical form */
else if (GET_CODE (operands[1]) == PLUS
&& GET_CODE (XEXP (operands[1], 1)) != CONST_INT
- && ! s_register_operand (XEXP (operands[1], 1), VOIDmode))
+ && !s_register_operand (XEXP (operands[1], 1), VOIDmode))
operands[1] = gen_rtx_PLUS (GET_MODE (operands[1]),
XEXP (operands[1], 1),
XEXP (operands[1], 0));
- }
-")
+ }"
+)
(define_insn "*thumb_extendqisi2_insn"
[(set (match_operand:SI 0 "register_operand" "=l,l")
if (GET_CODE (b) == REG)
output_asm_insn (\"ldrsb\\t%0, [%1, %2]\", ops);
else if (REGNO (a) == REGNO (ops[0]))
- output_asm_insn (\"ldrb\\t%0, [%1, %2]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ {
+ output_asm_insn (\"ldrb\\t%0, [%1, %2]\", ops);
+ output_asm_insn (\"lsl\\t%0, %0, #24\", ops);
+ output_asm_insn (\"asr\\t%0, %0, #24\", ops);
+ }
else
output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
}
else
{
if (REGNO (b) == REGNO (ops[0]))
- output_asm_insn (\"ldrb\\t%0, [%2, %1]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ {
+ output_asm_insn (\"ldrb\\t%0, [%2, %1]\", ops);
+ output_asm_insn (\"lsl\\t%0, %0, #24\", ops);
+ output_asm_insn (\"asr\\t%0, %0, #24\", ops);
+ }
else
output_asm_insn (\"mov\\t%0, %2\;ldrsb\\t%0, [%1, %0]\", ops);
}
}
else if (GET_CODE (mem) == REG && REGNO (ops[0]) == REGNO (mem))
{
- output_asm_insn (\"ldrb\\t%0, [%0, #0]\;lsl\\t%0, %0, #24\;asr\\t%0, %0, #24\", ops);
+ output_asm_insn (\"ldrb\\t%0, [%0, #0]\", ops);
+ output_asm_insn (\"lsl\\t%0, %0, #24\", ops);
+ output_asm_insn (\"asr\\t%0, %0, #24\", ops);
}
else
{
"
if (TARGET_THUMB)
{
- if (! no_new_pseudos)
+ if (!no_new_pseudos)
{
if (GET_CODE (operands[0]) != REG)
operands[1] = force_reg (DImode, operands[1]);
)
(define_insn "*arm_movdi"
- [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r,r,o<>")
- (match_operand:DI 1 "di_operand" "rIK,mi,r"))]
+ [(set (match_operand:DI 0 "nonimmediate_di_operand" "=r, r, o<>")
+ (match_operand:DI 1 "di_operand" "rIK,mi,r"))]
"TARGET_ARM"
"*
return (output_move_double (operands));
;;; ??? The 'i' constraint looks funny, but it should always be replaced by
;;; thumb_reorg with a memory reference.
(define_insn "*thumb_movdi_insn"
- [(set (match_operand:DI 0 "nonimmediate_operand" "=l,l,l,l,>,l,m,*r")
- (match_operand:DI 1 "general_operand" "l,I,J,>,l,mi,l,*r"))]
+ [(set (match_operand:DI 0 "nonimmediate_operand" "=l,l,l,l,>,l, m,*r")
+ (match_operand:DI 1 "general_operand" "l, I,J,>,l,mi,l,*r"))]
"TARGET_THUMB
&& ( register_operand (operands[0], DImode)
|| register_operand (operands[1], DImode))"
case 5:
return thumb_load_double_from_address (operands);
case 6:
- operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ operands[2] = gen_rtx (MEM, SImode,
+ plus_constant (XEXP (operands[0], 0), 4));
output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
return \"\";
case 7:
}
else /* TARGET_THUMB.... */
{
- if (! no_new_pseudos)
+ if (!no_new_pseudos)
{
if (GET_CODE (operands[0]) != REG)
operands[1] = force_reg (SImode, operands[1]);
|| label_mentioned_p (operands[1])))
operands[1] = legitimize_pic_address (operands[1], SImode,
(no_new_pseudos ? operands[0] : 0));
-")
+ "
+)
(define_insn "*arm_movsi_insn"
- [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,m")
- (match_operand:SI 1 "general_operand" "rI,K,mi,r"))]
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r, m")
+ (match_operand:SI 1 "general_operand" "rI,K,mi,r"))]
"TARGET_ARM
&& ( register_operand (operands[0], SImode)
|| register_operand (operands[1], SImode))"
[(set (match_operand:SI 0 "s_register_operand" "")
(match_operand:SI 1 "const_int_operand" ""))]
"TARGET_ARM
- && (! ( const_ok_for_arm (INTVAL (operands[1]))
- || const_ok_for_arm (~INTVAL (operands[1]))))"
+ && (!(const_ok_for_arm (INTVAL (operands[1]))
+ || const_ok_for_arm (~INTVAL (operands[1]))))"
[(clobber (const_int 0))]
"
arm_split_constant (SET, SImode, INTVAL (operands[1]), operands[0],
NULL_RTX, 0);
DONE;
-")
+ "
+)
(define_insn "*thumb_movsi_insn"
- [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l,m,*lh")
- (match_operand:SI 1 "general_operand" "l,I,J,K,>,l,mi,l,*lh"))]
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=l,l,l,l,l,>,l, m,*lh")
+ (match_operand:SI 1 "general_operand" "l, I,J,K,>,l,mi,l,*lh"))]
"TARGET_THUMB
&& ( register_operand (operands[0], SImode)
|| register_operand (operands[1], SImode))"
(set (match_dup 4) (subreg:QI (match_dup 2) 0))] ;explicit subreg safe
"TARGET_ARM"
"
-{
- rtx addr = XEXP (operands[1], 0);
- enum rtx_code code = GET_CODE (addr);
-
- if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
- || code == MINUS)
- addr = force_reg (SImode, addr);
-
- operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
- operands[1] = change_address (operands[1], QImode, NULL_RTX);
- operands[3] = gen_lowpart (QImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[2] = gen_reg_rtx (SImode);
-}
-")
+ {
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode,
+ plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+ }"
+)
(define_expand "storehi_bigend"
[(set (match_dup 4) (match_dup 3))
(set (match_operand 1 "" "") (subreg:QI (match_dup 2) 0))]
"TARGET_ARM"
"
-{
- rtx addr = XEXP (operands[1], 0);
- enum rtx_code code = GET_CODE (addr);
-
- if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
- || code == MINUS)
- addr = force_reg (SImode, addr);
-
- operands[4] = change_address (operands[1], QImode, plus_constant (addr, 1));
- operands[1] = change_address (operands[1], QImode, NULL_RTX);
- operands[3] = gen_lowpart (QImode, operands[0]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[2] = gen_reg_rtx (SImode);
-}
-")
+ {
+ rtx addr = XEXP (operands[1], 0);
+ enum rtx_code code = GET_CODE (addr);
+
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
+
+ operands[4] = change_address (operands[1], QImode,
+ plus_constant (addr, 1));
+ operands[1] = change_address (operands[1], QImode, NULL_RTX);
+ operands[3] = gen_lowpart (QImode, operands[0]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[2] = gen_reg_rtx (SImode);
+ }"
+)
;; Subroutine to store a half word integer constant into memory.
(define_expand "storeinthi"
(set (match_dup 3) (subreg:QI (match_dup 2) 0))]
"TARGET_ARM"
"
-{
- HOST_WIDE_INT value = INTVAL (operands[1]);
- rtx addr = XEXP (operands[0], 0);
- enum rtx_code code = GET_CODE (addr);
+ {
+ HOST_WIDE_INT value = INTVAL (operands[1]);
+ rtx addr = XEXP (operands[0], 0);
+ enum rtx_code code = GET_CODE (addr);
- if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
- || code == MINUS)
- addr = force_reg (SImode, addr);
+ if ((code == PLUS && GET_CODE (XEXP (addr, 1)) != CONST_INT)
+ || code == MINUS)
+ addr = force_reg (SImode, addr);
- operands[1] = gen_reg_rtx (SImode);
- if (BYTES_BIG_ENDIAN)
- {
- emit_insn (gen_movsi (operands[1], GEN_INT ((value >> 8) & 255)));
- if ((value & 255) == ((value >> 8) & 255))
- operands[2] = operands[1];
- else
- {
- operands[2] = gen_reg_rtx (SImode);
- emit_insn (gen_movsi (operands[2], GEN_INT (value & 255)));
- }
- }
- else
- {
- emit_insn (gen_movsi (operands[1], GEN_INT (value & 255)));
- if ((value & 255) == ((value >> 8) & 255))
- operands[2] = operands[1];
- else
- {
- operands[2] = gen_reg_rtx (SImode);
- emit_insn (gen_movsi (operands[2], GEN_INT ((value >> 8) & 255)));
- }
- }
+ operands[1] = gen_reg_rtx (SImode);
+ if (BYTES_BIG_ENDIAN)
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT ((value >> 8) & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT (value & 255)));
+ }
+ }
+ else
+ {
+ emit_insn (gen_movsi (operands[1], GEN_INT (value & 255)));
+ if ((value & 255) == ((value >> 8) & 255))
+ operands[2] = operands[1];
+ else
+ {
+ operands[2] = gen_reg_rtx (SImode);
+ emit_insn (gen_movsi (operands[2], GEN_INT ((value >> 8) & 255)));
+ }
+ }
- operands[3] = change_address (operands[0], QImode, plus_constant (addr, 1));
- operands[0] = change_address (operands[0], QImode, NULL_RTX);
-}
-")
+ operands[3] = change_address (operands[0], QImode,
+ plus_constant (addr, 1));
+ operands[0] = change_address (operands[0], QImode, NULL_RTX);
+ }"
+)
(define_expand "storehi_single_op"
[(set (match_operand:HI 0 "memory_operand" "")
(match_operand:HI 1 "general_operand" ""))]
"TARGET_ARM && arm_arch4"
"
- if (! s_register_operand (operands[1], HImode))
+ if (!s_register_operand (operands[1], HImode))
operands[1] = copy_to_mode_reg (HImode, operands[1]);
-")
+ "
+)
(define_expand "movhi"
[(set (match_operand:HI 0 "general_operand" "")
"
if (TARGET_ARM)
{
- if (! no_new_pseudos)
+ if (!no_new_pseudos)
{
if (GET_CODE (operands[0]) == MEM)
{
HOST_WIDE_INT val = INTVAL (operands[1]) & 0xffff;
/* If the constant is already valid, leave it alone. */
- if (! const_ok_for_arm (val))
+ if (!const_ok_for_arm (val))
{
/* If setting all the top bits will make the constant
loadable in a single instruction, then set them.
Otherwise, sign extend the number. */
- if (const_ok_for_arm (~ (val | ~0xffff)))
+ if (const_ok_for_arm (~(val | ~0xffff)))
val |= ~0xffff;
else if (val & 0x8000)
val |= ~0xffff;
emit_insn (gen_movsi (reg, GEN_INT (val)));
operands[1] = gen_rtx_SUBREG (HImode, reg, 0);
}
- else if (! arm_arch4)
+ else if (!arm_arch4)
{
/* Note: We do not have to worry about TARGET_MMU_TRAPS
for v4 and up architectures because LDRH instructions will
if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
|| (GET_CODE (base) == PLUS
- && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && (GET_CODE (offset = XEXP (base, 1))
+ == CONST_INT)
&& ((INTVAL(offset) & 1) != 1)
&& GET_CODE (base = XEXP (base, 0)) == REG))
&& REGNO_POINTER_ALIGN (REGNO (base)) >= 32)
{
rtx reg2 = gen_reg_rtx (SImode);
- emit_insn (gen_lshrsi3 (reg2, reg, GEN_INT (16)));
+ emit_insn (gen_lshrsi3 (reg2, reg,
+ GEN_INT (16)));
reg = reg2;
}
}
if ((GET_CODE (base = XEXP (operands[1], 0)) == REG
|| (GET_CODE (base) == PLUS
- && GET_CODE (offset = XEXP (base, 1)) == CONST_INT
+ && (GET_CODE (offset = XEXP (base, 1))
+ == CONST_INT)
&& GET_CODE (base = XEXP (base, 0)) == REG))
&& REGNO_POINTER_ALIGN (REGNO (base)) >= 32)
{
{
HOST_WIDE_INT new_offset = INTVAL (offset) ^ 2;
new = gen_rtx_MEM (SImode,
- plus_constant (base, new_offset));
+ plus_constant (base,
+ new_offset));
MEM_COPY_ATTRIBUTES (new, operands[1]);
emit_insn (gen_movsi (reg, new));
}
else
{
- new = gen_rtx_MEM (SImode, XEXP (operands[1], 0));
+ new = gen_rtx_MEM (SImode,
+ XEXP (operands[1], 0));
MEM_COPY_ATTRIBUTES (new, operands[1]);
emit_insn (gen_rotated_loadsi (reg, new));
}
}
else
{
- emit_insn (gen_movhi_bigend (operands[0], operands[1]));
+ emit_insn (gen_movhi_bigend (operands[0],
+ operands[1]));
DONE;
}
}
}
/* Handle loading a large integer during reload */
else if (GET_CODE (operands[1]) == CONST_INT
- && ! const_ok_for_arm (INTVAL (operands[1]))
- && ! const_ok_for_arm (~INTVAL (operands[1])))
+ && !const_ok_for_arm (INTVAL (operands[1]))
+ && !const_ok_for_arm (~INTVAL (operands[1])))
{
/* Writing a constant to memory needs a scratch, which should
be handled with SECONDARY_RELOADs. */
}
else /* TARGET_THUMB */
{
- if (! no_new_pseudos)
+ if (!no_new_pseudos)
{
if (GET_CODE (operands[0]) != REG)
operands[1] = force_reg (HImode, operands[1]);
/* ??? We shouldn't really get invalid addresses here, but this can
- happen if we are passed a SP (never OK for HImode/QImode) or virtual
- register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
- relative address. */
+ happen if we are passed a SP (never OK for HImode/QImode) or
+ virtual register (rejected by GO_IF_LEGITIMATE_ADDRESS for
+ HImode/QImode) relative address. */
/* ??? This should perhaps be fixed elsewhere, for instance, in
fixup_stack_1, by checking for other kinds of invalid addresses,
e.g. a bare reference to a virtual register. This may confuse the
alpha though, which must handle this case differently. */
if (GET_CODE (operands[0]) == MEM
- && ! memory_address_p (GET_MODE (operands[0]),
- XEXP (operands[0], 0)))
+ && !memory_address_p (GET_MODE (operands[0]),
+ XEXP (operands[0], 0)))
{
rtx temp = copy_to_reg (XEXP (operands[0], 0));
operands[0] = change_address (operands[0], VOIDmode, temp);
}
if (GET_CODE (operands[1]) == MEM
- && ! memory_address_p (GET_MODE (operands[1]),
- XEXP (operands[1], 0)))
+ && !memory_address_p (GET_MODE (operands[1]),
+ XEXP (operands[1], 0)))
{
rtx temp = copy_to_reg (XEXP (operands[1], 0));
operands[1] = change_address (operands[1], VOIDmode, temp);
}
/* Handle loading a large integer during reload */
else if (GET_CODE (operands[1]) == CONST_INT
- && ! CONST_OK_FOR_THUMB_LETTER (INTVAL (operands[1]), 'I'))
+ && !CONST_OK_FOR_THUMB_LETTER (INTVAL (operands[1]), 'I'))
{
/* Writing a constant to memory needs a scratch, which should
be handled with SECONDARY_RELOADs. */
)
(define_insn "*thumb_movhi_insn"
- [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=l,l, m,*r,*h,l")
(match_operand:HI 1 "general_operand" "l,mn,l,*h,*r,I"))]
"TARGET_THUMB
&& ( register_operand (operands[0], HImode)
[(set (match_operand:SI 0 "s_register_operand" "=r")
(rotate:SI (match_operand:SI 1 "offsettable_memory_operand" "o")
(const_int 16)))]
- "TARGET_ARM && (! TARGET_MMU_TRAPS)"
+ "TARGET_ARM && (!TARGET_MMU_TRAPS)"
"*
-{
- rtx ops[2];
+ {
+ rtx ops[2];
- ops[0] = operands[0];
- ops[1] = gen_rtx_MEM (SImode, plus_constant (XEXP (operands[1], 0), 2));
- output_asm_insn (\"ldr%?\\t%0, %1\\t%@ load-rotate\", ops);
- return \"\";
+ ops[0] = operands[0];
+ ops[1] = gen_rtx_MEM (SImode, plus_constant (XEXP (operands[1], 0), 2));
+ output_asm_insn (\"ldr%?\\t%0, %1\\t%@ load-rotate\", ops);
+ return \"\";
}"
[(set_attr "type" "load")
(set_attr "predicable" "yes")]
(ior:SI (ashift:SI (match_dup 4) (const_int 8)) (match_dup 5)))]
"TARGET_ARM"
"
-{
- rtx mem1, mem2;
- rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
-
- mem1 = gen_rtx_MEM (QImode, addr);
- MEM_COPY_ATTRIBUTES (mem1, operands[1]);
- mem2 = gen_rtx_MEM (QImode, plus_constant (addr, 1));
- MEM_COPY_ATTRIBUTES (mem2, operands[1]);
- operands[0] = gen_lowpart (SImode, operands[0]);
- operands[1] = mem1;
- operands[2] = gen_reg_rtx (SImode);
- operands[3] = gen_reg_rtx (SImode);
- operands[6] = mem2;
+ {
+ rtx mem1, mem2;
+ rtx addr = copy_to_mode_reg (SImode, XEXP (operands[1], 0));
+
+ mem1 = gen_rtx_MEM (QImode, addr);
+ MEM_COPY_ATTRIBUTES (mem1, operands[1]);
+ mem2 = gen_rtx_MEM (QImode, plus_constant (addr, 1));
+ MEM_COPY_ATTRIBUTES (mem2, operands[1]);
+ operands[0] = gen_lowpart (SImode, operands[0]);
+ operands[1] = mem1;
+ operands[2] = gen_reg_rtx (SImode);
+ operands[3] = gen_reg_rtx (SImode);
+ operands[6] = mem2;
- if (BYTES_BIG_ENDIAN)
- {
- operands[4] = operands[2];
- operands[5] = operands[3];
- }
- else
- {
- operands[4] = operands[3];
- operands[5] = operands[2];
- }
-}
-")
+ if (BYTES_BIG_ENDIAN)
+ {
+ operands[4] = operands[2];
+ operands[5] = operands[3];
+ }
+ else
+ {
+ operands[4] = operands[3];
+ operands[5] = operands[2];
+ }
+ }"
+)
(define_expand "movhi_bigend"
[(set (match_dup 2)
"
operands[2] = gen_reg_rtx (SImode);
operands[3] = gen_reg_rtx (SImode);
-")
+ "
+)
;; Pattern to recognise insn generated default case above
(define_insn "*movhi_insn_arch4"
[(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,m,r")
- (match_operand:HI 1 "general_operand" "rI,K,r,m"))]
+ (match_operand:HI 1 "general_operand" "rI,K,r,m"))]
"TARGET_ARM
&& arm_arch4
&& (GET_CODE (operands[1]) != CONST_INT
[(set (match_operand:HI 0 "s_register_operand" "=r,r,r")
(match_operand:HI 1 "general_operand" "rI,K,m"))]
"TARGET_ARM
- && ! arm_arch4
- && ! BYTES_BIG_ENDIAN
- && ! TARGET_MMU_TRAPS
+ && !arm_arch4
+ && !BYTES_BIG_ENDIAN
+ && !TARGET_MMU_TRAPS
&& (GET_CODE (operands[1]) != CONST_INT
|| const_ok_for_arm (INTVAL (operands[1]))
|| const_ok_for_arm (~INTVAL (operands[1])))"
[(set (match_operand:HI 0 "s_register_operand" "=r,r,r")
(match_operand:HI 1 "general_operand" "rI,K,m"))]
"TARGET_ARM
- && ! arm_arch4
+ && !arm_arch4
&& BYTES_BIG_ENDIAN
- && ! TARGET_MMU_TRAPS
+ && !TARGET_MMU_TRAPS
&& (GET_CODE (operands[1]) != CONST_INT
|| const_ok_for_arm (INTVAL (operands[1]))
|| const_ok_for_arm (~INTVAL (operands[1])))"
(const_int 16)))]
"TARGET_ARM
&& BYTES_BIG_ENDIAN
- && ! TARGET_MMU_TRAPS"
+ && !TARGET_MMU_TRAPS"
"ldr%?\\t%0, %1\\t%@ movhi_bigend"
[(set_attr "type" "load")
(set_attr "predicable" "yes")
;; to take any notice of the "o" constraints on reload_memory_operand operand.
(define_expand "reload_outhi"
[(parallel [(match_operand:HI 0 "arm_reload_memory_operand" "=o")
- (match_operand:HI 1 "s_register_operand" "r")
- (match_operand:DI 2 "s_register_operand" "=&l")])]
+ (match_operand:HI 1 "s_register_operand" "r")
+ (match_operand:DI 2 "s_register_operand" "=&l")])]
"TARGET_EITHER"
"if (TARGET_ARM)
arm_reload_out_hi (operands);
}
else /* TARGET_THUMB */
{
- if (! no_new_pseudos)
+ if (!no_new_pseudos)
{
if (GET_CODE (operands[0]) != REG)
operands[1] = force_reg (QImode, operands[1]);
/* ??? We shouldn't really get invalid addresses here, but this can
- happen if we are passed a SP (never OK for HImode/QImode) or virtual
- register (rejected by GO_IF_LEGITIMATE_ADDRESS for HImode/QImode)
- relative address. */
+ happen if we are passed a SP (never OK for HImode/QImode) or
+ virtual register (rejected by GO_IF_LEGITIMATE_ADDRESS for
+ HImode/QImode) relative address. */
/* ??? This should perhaps be fixed elsewhere, for instance, in
fixup_stack_1, by checking for other kinds of invalid addresses,
e.g. a bare reference to a virtual register. This may confuse the
alpha though, which must handle this case differently. */
if (GET_CODE (operands[0]) == MEM
- && ! memory_address_p (GET_MODE (operands[0]),
+ && !memory_address_p (GET_MODE (operands[0]),
XEXP (operands[0], 0)))
{
rtx temp = copy_to_reg (XEXP (operands[0], 0));
operands[0] = change_address (operands[0], VOIDmode, temp);
}
- if (GET_CODE (operands[1]) == MEM
- && ! memory_address_p (GET_MODE (operands[1]),
+ if (GET_CODE (operands[1]) == MEM
+ && !memory_address_p (GET_MODE (operands[1]),
XEXP (operands[1], 0)))
- {
- rtx temp = copy_to_reg (XEXP (operands[1], 0));
- operands[1] = change_address (operands[1], VOIDmode, temp);
- }
+ {
+ rtx temp = copy_to_reg (XEXP (operands[1], 0));
+ operands[1] = change_address (operands[1], VOIDmode, temp);
+ }
}
/* Handle loading a large integer during reload */
else if (GET_CODE (operands[1]) == CONST_INT
- && ! CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
+ && !CONST_OK_FOR_LETTER_P (INTVAL (operands[1]), 'I'))
{
/* Writing a constant to memory needs a scratch, which should
be handled with SECONDARY_RELOADs. */
(define_insn "*thumb_movqi_insn"
[(set (match_operand:QI 0 "nonimmediate_operand" "=l,l,m,*r,*h,l")
- (match_operand:QI 1 "general_operand" "l,m,l,*h,*r,I"))]
+ (match_operand:QI 1 "general_operand" "l, m,l,*h,*r,I"))]
"TARGET_THUMB
&& ( register_operand (operands[0], QImode)
|| register_operand (operands[1], QImode))"
}
else /* TARGET_THUMB */
{
- if (! no_new_pseudos)
+ if (!no_new_pseudos)
{
if (GET_CODE (operands[0]) != REG)
operands[1] = force_reg (SFmode, operands[1]);
[(set (match_operand:SF 0 "nonimmediate_operand" "")
(match_operand:SF 1 "immediate_operand" ""))]
"TARGET_ARM
- && ! TARGET_HARD_FLOAT
+ && !TARGET_HARD_FLOAT
&& reload_completed
&& GET_CODE (operands[1]) == CONST_DOUBLE"
[(set (match_dup 2) (match_dup 3))]
operands[3] = gen_lowpart (SImode, operands[1]);
if (operands[2] == 0 || operands[3] == 0)
FAIL;
-")
+ "
+)
(define_insn "*arm_movsf_hard_insn"
- [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,f,r,r,r,m")
- (match_operand:SF 1 "general_operand" "fG,H,mE,f,r,f,r,mE,r"))]
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f, m,f,r,r,r, m")
+ (match_operand:SF 1 "general_operand" "fG,H,mE,f,r,f,r,mE,r"))]
"TARGET_ARM
&& TARGET_HARD_FLOAT
- && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], SFmode))"
"@
mvf%?s\\t%0, %1
mnf%?s\\t%0, #%N1
(match_operand:SF 1 "general_operand" "r,mE,r"))]
"TARGET_ARM
&& TARGET_SOFT_FLOAT
- && (GET_CODE (operands[0]) != MEM || register_operand (operands[1], SFmode))"
+ && (GET_CODE (operands[0]) != MEM
+ || register_operand (operands[1], SFmode))"
"@
mov%?\\t%0, %1
ldr%?\\t%0, %1\\t%@ float
;;; ??? This should have alternatives for constants.
(define_insn "*thumb_movsf_insn"
- [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l,m,*r,*h")
- (match_operand:SF 1 "general_operand" "l,>,l,mF,l,*h,*r"))]
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=l,l,>,l, m,*r,*h")
+ (match_operand:SF 1 "general_operand" "l, >,l,mF,l,*h,*r"))]
"TARGET_THUMB
&& ( register_operand (operands[0], SFmode)
|| register_operand (operands[1], SFmode))"
}
else /* TARGET_THUMB */
{
- if (! no_new_pseudos)
+ if (!no_new_pseudos)
{
if (GET_CODE (operands[0]) != REG)
operands[1] = force_reg (DFmode, operands[1]);
(match_operand:SI 2 "s_register_operand" "=&r")]
"TARGET_ARM"
"
-{
- enum rtx_code code = GET_CODE (XEXP (operands[0], 0));
+ {
+ enum rtx_code code = GET_CODE (XEXP (operands[0], 0));
- if (code == REG)
- operands[2] = XEXP (operands[0], 0);
- else if (code == POST_INC || code == PRE_DEC)
- {
- operands[0] = gen_rtx_SUBREG (DImode, operands[0], 0);
- operands[1] = gen_rtx_SUBREG (DImode, operands[1], 0);
- emit_insn (gen_movdi (operands[0], operands[1]));
- DONE;
- }
- else if (code == PRE_INC)
- {
- rtx reg = XEXP (XEXP (operands[0], 0), 0);
- emit_insn (gen_addsi3 (reg, reg, GEN_INT (8)));
- operands[2] = reg;
- }
- else if (code == POST_DEC)
- operands[2] = XEXP (XEXP (operands[0], 0), 0);
- else
- emit_insn (gen_addsi3 (operands[2], XEXP (XEXP (operands[0], 0), 0),
- XEXP (XEXP (operands[0], 0), 1)));
+ if (code == REG)
+ operands[2] = XEXP (operands[0], 0);
+ else if (code == POST_INC || code == PRE_DEC)
+ {
+ operands[0] = gen_rtx_SUBREG (DImode, operands[0], 0);
+ operands[1] = gen_rtx_SUBREG (DImode, operands[1], 0);
+ emit_insn (gen_movdi (operands[0], operands[1]));
+ DONE;
+ }
+ else if (code == PRE_INC)
+ {
+ rtx reg = XEXP (XEXP (operands[0], 0), 0);
- emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DFmode, operands[2]),
- operands[1]));
+ emit_insn (gen_addsi3 (reg, reg, GEN_INT (8)));
+ operands[2] = reg;
+ }
+ else if (code == POST_DEC)
+ operands[2] = XEXP (XEXP (operands[0], 0), 0);
+ else
+ emit_insn (gen_addsi3 (operands[2], XEXP (XEXP (operands[0], 0), 0),
+ XEXP (XEXP (operands[0], 0), 1)));
- if (code == POST_DEC)
- emit_insn (gen_addsi3 (operands[2], operands[2], GEN_INT (-8)));
+ emit_insn (gen_rtx_SET (VOIDmode, gen_rtx_MEM (DFmode, operands[2]),
+ operands[1]));
- DONE;
-}
-")
+ if (code == POST_DEC)
+ emit_insn (gen_addsi3 (operands[2], operands[2], GEN_INT (-8)));
+
+ DONE;
+ }"
+)
(define_insn "*movdf_hard_insn"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=r,Q,r,m,r,f,f,f,m,!f,!r")
- (match_operand:DF 1 "general_operand" "Q,r,r,r,mF,fG,H,mF,f,r,f"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand"
+ "=r,Q,r,m,r, f, f,f, m,!f,!r")
+ (match_operand:DF 1 "general_operand"
+ "Q, r,r,r,mF,fG,H,mF,f,r, f"))]
"TARGET_ARM
&& TARGET_HARD_FLOAT
&& (GET_CODE (operands[0]) != MEM
;;; ??? The 'F' constraint looks funny, but it should always be replaced by
;;; thumb_reorg with a memory reference.
(define_insn "*thumb_movdf_insn"
- [(set (match_operand:DF 0 "nonimmediate_operand" "=l,l,>,l,m,*r")
- (match_operand:DF 1 "general_operand" "l,>,l,mF,l,*r"))]
+ [(set (match_operand:DF 0 "nonimmediate_operand" "=l,l,>,l, m,*r")
+ (match_operand:DF 1 "general_operand" "l, >,l,mF,l,*r"))]
"TARGET_THUMB
&& ( register_operand (operands[0], DFmode)
|| register_operand (operands[1], DFmode))"
case 3:
return thumb_load_double_from_address (operands);
case 4:
- operands[2] = gen_rtx (MEM, SImode, plus_constant (XEXP (operands[0], 0), 4));
+ operands[2] = gen_rtx (MEM, SImode,
+ plus_constant (XEXP (operands[0], 0), 4));
output_asm_insn (\"str\\t%1, %0\;str\\t%H1, %2\", operands);
return \"\";
case 5:
TRUE, FALSE, RTX_UNCHANGING_P(operands[1]),
MEM_IN_STRUCT_P(operands[1]),
MEM_SCALAR_P (operands[1]));
-")
+ "
+)
;; Load multiple with write-back
TRUE, FALSE, RTX_UNCHANGING_P (operands[0]),
MEM_IN_STRUCT_P(operands[0]),
MEM_SCALAR_P (operands[0]));
-")
+ "
+)
;; Store multiple with write-back
"TARGET_THUMB"
"* return thumb_output_move_mem_multiple (3, operands);"
[(set_attr "length" "4")
-;; This isn't entirely accurate... It loads as well, but in terms of
-;; scheduling the following insn it is better to consider it as a store
+ ; This isn't entirely accurate... It loads as well, but in terms of
+ ; scheduling the following insn it is better to consider it as a store
(set_attr "type" "store3")]
)
"TARGET_THUMB"
"* return thumb_output_move_mem_multiple (2, operands);"
[(set_attr "length" "4")
-;; This isn't entirely accurate... It loads as well, but in terms of
-;; scheduling the following insn it is better to consider it as a store
+ ; This isn't entirely accurate... It loads as well, but in terms of
+ ; scheduling the following insn it is better to consider it as a store
(set_attr "type" "store2")]
)
(define_insn "*negated_cbranchsi4"
[(set (pc)
(if_then_else
- (match_operator 0 "arm_comparison_operator"
- [(match_operand:SI 1 "register_operand" "l")
- (neg:SI (match_operand:SI 2 "nonmemory_operand" "l"))])
- (label_ref (match_operand 3 "" ""))
- (pc)))]
+ (match_operator 0 "arm_comparison_operator"
+ [(match_operand:SI 1 "register_operand" "l")
+ (neg:SI (match_operand:SI 2 "nonmemory_operand" "l"))])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
"TARGET_THUMB"
"*
output_asm_insn (\"cmn\\t%1, %2\", operands);
(match_operand:SF 1 "fpu_rhs_operand" "")]
"TARGET_ARM && TARGET_HARD_FLOAT"
"
-{
arm_compare_op0 = operands[0];
arm_compare_op1 = operands[1];
DONE;
-}
-")
+ "
+)
(define_expand "cmpdf"
[(match_operand:DF 0 "s_register_operand" "")
(match_operand:DF 1 "fpu_rhs_operand" "")]
"TARGET_ARM && TARGET_HARD_FLOAT"
"
-{
arm_compare_op0 = operands[0];
arm_compare_op1 = operands[1];
DONE;
-}
-")
+ "
+)
(define_expand "cmpxf"
[(match_operand:XF 0 "s_register_operand" "")
(match_operand:XF 1 "fpu_rhs_operand" "")]
"TARGET_ARM && ENABLE_XF_PATTERNS && TARGET_HARD_FLOAT"
- "{
- arm_compare_op0 = operands[0];
- arm_compare_op1 = operands[1];
- DONE;
- }"
+ "
+ arm_compare_op0 = operands[0];
+ arm_compare_op1 = operands[1];
+ DONE;
+ "
)
(define_insn "*arm_cmpsi_insn"
(match_operand:SI 3 "arm_not_operand" "")))]
"TARGET_ARM"
"
-{
- enum rtx_code code = GET_CODE (operands[1]);
- rtx ccreg = arm_gen_compare_reg (code, arm_compare_op0, arm_compare_op1);
+ {
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = arm_gen_compare_reg (code, arm_compare_op0, arm_compare_op1);
- operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
-}")
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+ }"
+)
(define_expand "movsfcc"
[(set (match_operand:SF 0 "s_register_operand" "")
(match_operand:SF 3 "nonmemory_operand" "")))]
"TARGET_ARM"
"
-{
- enum rtx_code code = GET_CODE (operands[1]);
- rtx ccreg;
-
- /* When compiling for SOFT_FLOAT, ensure both arms are in registers.
- Otherwise, ensure it is a valid FP add operand */
- if ((! TARGET_HARD_FLOAT)
- || (! fpu_add_operand (operands[3], SFmode)))
- operands[3] = force_reg (SFmode, operands[3]);
+ {
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg;
- ccreg = arm_gen_compare_reg (code, arm_compare_op0, arm_compare_op1);
+ /* When compiling for SOFT_FLOAT, ensure both arms are in registers.
+ Otherwise, ensure it is a valid FP add operand */
+ if ((!TARGET_HARD_FLOAT)
+ || (!fpu_add_operand (operands[3], SFmode)))
+ operands[3] = force_reg (SFmode, operands[3]);
- operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
-}")
+ ccreg = arm_gen_compare_reg (code, arm_compare_op0, arm_compare_op1);
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+ }"
+)
(define_expand "movdfcc"
[(set (match_operand:DF 0 "s_register_operand" "")
(match_operand:DF 3 "fpu_add_operand" "")))]
"TARGET_ARM && TARGET_HARD_FLOAT"
"
-{
- enum rtx_code code = GET_CODE (operands[1]);
- rtx ccreg = arm_gen_compare_reg (code, arm_compare_op0, arm_compare_op1);
+ {
+ enum rtx_code code = GET_CODE (operands[1]);
+ rtx ccreg = arm_gen_compare_reg (code, arm_compare_op0, arm_compare_op1);
- operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
-}")
+ operands[1] = gen_rtx (code, VOIDmode, ccreg, const0_rtx);
+ }"
+)
(define_insn "*movsicc_insn"
[(set (match_operand:SI 0 "s_register_operand" "=r,r,r,r,r,r,r,r")
mvn%d3\\t%0, #%B1\;mov%D3\\t%0, %2
mvn%d3\\t%0, #%B1\;mvn%D3\\t%0, #%B2"
[(set_attr "length" "4,4,4,4,8,8,8,8")
- (set_attr "conds" "use")])
+ (set_attr "conds" "use")]
+)
(define_insn "*movsfcc_hard_insn"
[(set (match_operand:SF 0 "s_register_operand" "=f,f,f,f,f,f,f,f")
mnf%d3s\\t%0, #%N1\;mnf%D3s\\t%0, #%N2"
[(set_attr "length" "4,4,4,4,8,8,8,8")
(set_attr "type" "ffarith")
- (set_attr "conds" "use")])
+ (set_attr "conds" "use")]
+)
(define_insn "*movsfcc_soft_insn"
[(set (match_operand:SF 0 "s_register_operand" "=r,r")
mnf%d3d\\t%0, #%N1\;mnf%D3d\\t%0, #%N2"
[(set_attr "length" "4,4,4,4,8,8,8,8")
(set_attr "type" "ffarith")
- (set_attr "conds" "use")])
+ (set_attr "conds" "use")]
+)
+
\f
;; Jump and linkage insns
(clobber (reg:SI 14))]
"TARGET_ARM"
"*
- return output_call (& operands[1]);
+ return output_call (&operands[1]);
"
[(set_attr "length" "12")
(set_attr "type" "call")]
(match_operand 2 "" "")))
(use (match_operand 3 "" ""))
(clobber (reg:SI 14))]
- "TARGET_ARM && (! CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))"
+ "TARGET_ARM && (!CONSTANT_ADDRESS_P (XEXP (operands[1], 0)))"
"*
- return output_call_mem (& operands[1]);
+ return output_call_mem (&operands[1]);
"
[(set_attr "length" "12")
(set_attr "type" "call")]
(clobber (reg:SI 14))]
"TARGET_ARM
&& (GET_CODE (operands[0]) == SYMBOL_REF)
- && ! arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
+ && !arm_is_longcall_p (operands[0], INTVAL (operands[2]), 1)"
"*
{
return NEED_PLT_RELOC ? \"bl%?\\t%a0(PLT)\" : \"bl%?\\t%a0\";
(clobber (reg:SI 14))]
"TARGET_ARM
&& (GET_CODE (operands[1]) == SYMBOL_REF)
- && ! arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
+ && !arm_is_longcall_p (operands[1], INTVAL (operands[3]), 1)"
"*
{
return NEED_PLT_RELOC ? \"bl%?\\t%a1(PLT)\" : \"bl%?\\t%a1\";
(match_operand 2 "" "")])]
"TARGET_ARM"
"
-{
- int i;
+ {
+ int i;
- emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
+ emit_call_insn (GEN_CALL (operands[0], const0_rtx, NULL, const0_rtx));
- for (i = 0; i < XVECLEN (operands[2], 0); i++)
- {
- rtx set = XVECEXP (operands[2], 0, i);
- emit_move_insn (SET_DEST (set), SET_SRC (set));
- }
+ for (i = 0; i < XVECLEN (operands[2], 0); i++)
+ {
+ rtx set = XVECEXP (operands[2], 0, i);
- /* The optimizer does not know that the call sets the function value
- registers we stored in the result block. We avoid problems by
- claiming that all hard registers are used and clobbered at this
- point. */
- emit_insn (gen_blockage ());
+ emit_move_insn (SET_DEST (set), SET_SRC (set));
+ }
- DONE;
-}")
+ /* The optimizer does not know that the call sets the function value
+ registers we stored in the result block. We avoid problems by
+ claiming that all hard registers are used and clobbered at this
+ point. */
+ emit_insn (gen_blockage ());
+
+ DONE;
+ }"
+)
;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
;; all of memory. This blocks insns from being moved across this point.
(match_operand:SI 4 "" "")] ; Out of range label
"TARGET_ARM"
"
-{
- rtx reg;
- if (operands[1] != const0_rtx)
- {
- reg = gen_reg_rtx (SImode);
- emit_insn (gen_addsi3 (reg, operands[0],
- GEN_INT (-INTVAL (operands[1]))));
- operands[0] = reg;
- }
+ {
+ rtx reg;
+ if (operands[1] != const0_rtx)
+ {
+ reg = gen_reg_rtx (SImode);
- if (! const_ok_for_arm (INTVAL (operands[2])))
- operands[2] = force_reg (SImode, operands[2]);
+ emit_insn (gen_addsi3 (reg, operands[0],
+ GEN_INT (-INTVAL (operands[1]))));
+ operands[0] = reg;
+ }
- emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3],
- operands[4]));
- DONE;
-}")
+ if (!const_ok_for_arm (INTVAL (operands[2])))
+ operands[2] = force_reg (SImode, operands[2]);
+
+ emit_jump_insn (gen_casesi_internal (operands[0], operands[2], operands[3],
+ operands[4]));
+ DONE;
+ }"
+)
;; The USE in this pattern is needed to tell flow analysis that this is
;; a CASESI insn. It has no other purpose.
operands[2] = operands[1];
operands[1] = operands[0];
return output_add_immediate (operands);
-"
-; we have no idea how long the add_immediate is, it could be up to 4.
-[(set_attr "length" "20")])
+ "
+ [
+ ; we have no idea how long the add_immediate is, it could be up to 4.
+ (set_attr "length" "20")]
+)
(define_insn "*reload_mulsi_compare0"
[(set (reg:CC_NOOV 24)
operands[2] = operands[4];
operands[1] = operands[0];
return output_add_immediate (operands);
-"
-[(set_attr "length" "20")
- (set_attr "type" "mult")])
+ "
+ [(set_attr "length" "20")
+ (set_attr "type" "mult")]
+)
(define_insn "*reload_muladdsi_compare0"
[(set (reg:CC_NOOV 24)
(const_int 0)))]
"TARGET_ARM"
"*
-{
- static const char * const opcodes[4][2] =
{
- {\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmp%D5\\t%0, %1\"},
- {\"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\", \"cmp\\t%2, %3\;cmn%D5\\t%0, #%n1\"},
- {\"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\", \"cmn\\t%2, #%n3\;cmp%D5\\t%0, %1\"},
- {\"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\",
- \"cmn\\t%2, #%n3\;cmn%D5\\t%0, #%n1\"}
- };
- int swap =
- comparison_dominates_p (GET_CODE (operands[5]),
- reverse_condition (GET_CODE (operands[4])));
-
- return opcodes[which_alternative][swap];
-}
-"
+ static const char * const opcodes[4][2] =
+ {
+ {\"cmp\\t%0, %1\;cmp%d4\\t%2, %3\",
+ \"cmp\\t%2, %3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmp%d4\\t%2, %3\",
+ \"cmp\\t%2, %3\;cmn%D5\\t%0, #%n1\"},
+ {\"cmp\\t%0, %1\;cmn%d4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmp%D5\\t%0, %1\"},
+ {\"cmn\\t%0, #%n1\;cmn%d4\\t%2, #%n3\",
+ \"cmn\\t%2, #%n3\;cmn%D5\\t%0, #%n1\"}
+ };
+ int swap =
+ comparison_dominates_p (GET_CODE (operands[5]),
+ reverse_condition (GET_CODE (operands[4])));
+
+ return opcodes[which_alternative][swap];
+ }"
[(set_attr "conds" "set")
(set_attr "length" "8")]
)
output_asm_insn (\"cmp\\t%1, %2\", operands);
output_asm_insn (\"mov%D3\\t%0, #0\", operands);
return \"mvn%d3\\t%0, #0\";
-"
+ "
[(set_attr "conds" "clob")
(set_attr "length" "12")]
)
if (which_alternative != 1)
output_asm_insn (\"mov%D5\\t%0, %2\", operands);
return \"\";
-"
+ "
[(set_attr "conds" "clob")
(set_attr "length" "8,8,12")]
)
if (which_alternative != 0)
return \"mov%D6\\t%0, %1\";
return \"\";
-"
+ "
[(set_attr "conds" "clob")
(set_attr "length" "8,12")]
)
if (which_alternative != 0)
output_asm_insn (\"mov%d6\\t%0, %1\", operands);
return \"%I7%D6\\t%0, %2, %3\";
-"
+ "
[(set_attr "conds" "clob")
(set_attr "length" "8,12")]
)
(clobber (match_scratch:SI 4 "=r"))]
"TARGET_ARM && adjacent_mem_locations (operands[2], operands[3])"
"*
-{
- rtx ldm[3];
- rtx arith[4];
- int val1 = 0, val2 = 0;
+ {
+ rtx ldm[3];
+ rtx arith[4];
+ int val1 = 0, val2 = 0;
- if (REGNO (operands[0]) > REGNO (operands[4]))
- {
- ldm[1] = operands[4];
- ldm[2] = operands[0];
- }
- else
- {
- ldm[1] = operands[0];
- ldm[2] = operands[4];
- }
- if (GET_CODE (XEXP (operands[2], 0)) != REG)
- val1 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
- if (GET_CODE (XEXP (operands[3], 0)) != REG)
- val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1));
- arith[0] = operands[0];
- arith[3] = operands[1];
- if (val1 < val2)
- {
- arith[1] = ldm[1];
- arith[2] = ldm[2];
- }
- else
- {
- arith[1] = ldm[2];
- arith[2] = ldm[1];
- }
- if (val1 && val2)
- {
- rtx ops[3];
- ldm[0] = ops[0] = operands[4];
- ops[1] = XEXP (XEXP (operands[2], 0), 0);
- ops[2] = XEXP (XEXP (operands[2], 0), 1);
- output_add_immediate (ops);
- if (val1 < val2)
- output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
- else
- output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
- }
- else if (val1)
- {
- ldm[0] = XEXP (operands[3], 0);
- if (val1 < val2)
- output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
- else
- output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
- }
- else
- {
- ldm[0] = XEXP (operands[2], 0);
- if (val1 < val2)
- output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
- else
- output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
- }
- output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith);
- return \"\";
-}
-"
-[(set_attr "length" "12")
- (set_attr "predicable" "yes")
- (set_attr "type" "load")])
+ if (REGNO (operands[0]) > REGNO (operands[4]))
+ {
+ ldm[1] = operands[4];
+ ldm[2] = operands[0];
+ }
+ else
+ {
+ ldm[1] = operands[0];
+ ldm[2] = operands[4];
+ }
+ if (GET_CODE (XEXP (operands[2], 0)) != REG)
+ val1 = INTVAL (XEXP (XEXP (operands[2], 0), 1));
+ if (GET_CODE (XEXP (operands[3], 0)) != REG)
+ val2 = INTVAL (XEXP (XEXP (operands[3], 0), 1));
+ arith[0] = operands[0];
+ arith[3] = operands[1];
+ if (val1 < val2)
+ {
+ arith[1] = ldm[1];
+ arith[2] = ldm[2];
+ }
+ else
+ {
+ arith[1] = ldm[2];
+ arith[2] = ldm[1];
+ }
+ if (val1 && val2)
+ {
+ rtx ops[3];
+ ldm[0] = ops[0] = operands[4];
+ ops[1] = XEXP (XEXP (operands[2], 0), 0);
+ ops[2] = XEXP (XEXP (operands[2], 0), 1);
+ output_add_immediate (ops);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ else if (val1)
+ {
+ ldm[0] = XEXP (operands[3], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ }
+ else
+ {
+ ldm[0] = XEXP (operands[2], 0);
+ if (val1 < val2)
+ output_asm_insn (\"ldm%?ia\\t%0, {%1, %2}\", ldm);
+ else
+ output_asm_insn (\"ldm%?da\\t%0, {%1, %2}\", ldm);
+ }
+ output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith);
+ return \"\";
+ }"
+ [(set_attr "length" "12")
+ (set_attr "predicable" "yes")
+ (set_attr "type" "load")]
+)
;; the arm can support extended pre-inc instructions
(set (match_operand:SI 0 "s_register_operand" "=r")
(plus:SI (match_dup 1) (match_dup 2)))]
"TARGET_ARM
- && (! BYTES_BIG_ENDIAN)
- && ! TARGET_MMU_TRAPS
+ && !BYTES_BIG_ENDIAN
+ && !TARGET_MMU_TRAPS
&& REGNO (operands[0]) != FRAME_POINTER_REGNUM
&& REGNO (operands[1]) != FRAME_POINTER_REGNUM
&& (GET_CODE (operands[2]) != REG
(set (match_operand:SI 0 "s_register_operand" "=r")
(minus:SI (match_dup 1) (match_dup 2)))]
"TARGET_ARM
- && (!BYTES_BIG_ENDIAN)
- && ! TARGET_MMU_TRAPS
+ && !BYTES_BIG_ENDIAN
+ && !TARGET_MMU_TRAPS
&& REGNO (operands[0]) != FRAME_POINTER_REGNUM
&& REGNO (operands[1]) != FRAME_POINTER_REGNUM
&& (GET_CODE (operands[2]) != REG
(plus:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
(match_dup 1)))]
"TARGET_ARM
- && (! BYTES_BIG_ENDIAN)
- && ! TARGET_MMU_TRAPS
+ && !BYTES_BIG_ENDIAN
+ && !TARGET_MMU_TRAPS
&& REGNO (operands[0]) != FRAME_POINTER_REGNUM
&& REGNO (operands[1]) != FRAME_POINTER_REGNUM
&& REGNO (operands[3]) != FRAME_POINTER_REGNUM"
(minus:SI (match_dup 1) (match_op_dup 2 [(match_dup 3)
(match_dup 4)])))]
"TARGET_ARM
- && (! BYTES_BIG_ENDIAN)
- && ! TARGET_MMU_TRAPS
+ && !BYTES_BIG_ENDIAN
+ && !TARGET_MMU_TRAPS
&& REGNO (operands[0]) != FRAME_POINTER_REGNUM
&& REGNO (operands[1]) != FRAME_POINTER_REGNUM
&& REGNO (operands[3]) != FRAME_POINTER_REGNUM"
(plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
"TARGET_ARM
&& (REGNO (operands[2]) != REGNO (operands[0]))
- && (GET_CODE (operands[1]) != REG || (REGNO (operands[1]) != REGNO (operands[0])))"
- "str%?b\\t%2, [%0], %1")
+ && (GET_CODE (operands[1]) != REG
+ || (REGNO (operands[1]) != REGNO (operands[0])))"
+ "str%?b\\t%2, [%0], %1"
+)
(define_peephole
[(set (match_operand:QI 0 "s_register_operand" "=r")
(plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
"TARGET_ARM
&& REGNO (operands[0]) != REGNO(operands[1])
- && (GET_CODE (operands[2]) != REG || REGNO(operands[0]) != REGNO (operands[2]))"
- "ldr%?b\\t%0, [%1], %2")
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?b\\t%0, [%1], %2"
+)
(define_peephole
[(set (mem:SI (match_operand:SI 0 "s_register_operand" "+r"))
(plus:SI (match_dup 0) (match_operand:SI 1 "index_operand" "rJ")))]
"TARGET_ARM
&& (REGNO (operands[2]) != REGNO (operands[0]))
- && (GET_CODE (operands[1]) != REG || (REGNO (operands[1]) != REGNO (operands[0])))"
- "str%?\\t%2, [%0], %1")
+ && (GET_CODE (operands[1]) != REG
+ || (REGNO (operands[1]) != REGNO (operands[0])))"
+ "str%?\\t%2, [%0], %1"
+)
(define_peephole
[(set (match_operand:HI 0 "s_register_operand" "=r")
(set (match_dup 1)
(plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
"TARGET_ARM
- && (! BYTES_BIG_ENDIAN)
- && ! TARGET_MMU_TRAPS
+ && !BYTES_BIG_ENDIAN
+ && !TARGET_MMU_TRAPS
&& REGNO (operands[0]) != REGNO(operands[1])
- && (GET_CODE (operands[2]) != REG || REGNO(operands[0]) != REGNO (operands[2]))"
- "ldr%?\\t%0, [%1], %2\\t%@ loadhi")
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2\\t%@ loadhi"
+)
(define_peephole
[(set (match_operand:SI 0 "s_register_operand" "=r")
(plus:SI (match_dup 1) (match_operand:SI 2 "index_operand" "rJ")))]
"TARGET_ARM
&& REGNO (operands[0]) != REGNO(operands[1])
- && (GET_CODE (operands[2]) != REG || REGNO(operands[0]) != REGNO (operands[2]))"
- "ldr%?\\t%0, [%1], %2")
+ && (GET_CODE (operands[2]) != REG
+ || REGNO(operands[0]) != REGNO (operands[2]))"
+ "ldr%?\\t%0, [%1], %2"
+)
(define_peephole
[(set (mem:QI (plus:SI (match_operand:SI 0 "s_register_operand" "+r")
(set (match_dup 0) (plus:SI (match_dup 0) (match_dup 1)))]
"TARGET_ARM
&& (REGNO (operands[2]) != REGNO (operands[0]))
- && (GET_CODE (operands[1]) != REG || (REGNO (operands[1]) != REGNO (operands[0])))"
- "str%?b\\t%2, [%0, %1]!")
+ && (GET_CODE (operands[1]) != REG
+ || (REGNO (operands[1]) != REGNO (operands[0])))"
+ "str%?b\\t%2, [%0, %1]!"
+)
(define_peephole
[(set (mem:QI (plus:SI (match_operator:SI 4 "shift_operator"
"TARGET_ARM
&& (REGNO (operands[3]) != REGNO (operands[2]))
&& (REGNO (operands[0]) != REGNO (operands[2]))"
- "str%?b\\t%3, [%2, %0%S4]!")
+ "str%?b\\t%3, [%2, %0%S4]!"
+)
; This pattern is never tried by combine, so do it as a peephole
"TARGET_ARM && load_multiple_sequence (operands, 4, NULL, NULL, NULL)"
"*
return emit_ldm_seq (operands, 4);
-")
+ "
+)
(define_peephole
[(set (match_operand:SI 0 "s_register_operand" "=r")
"TARGET_ARM && load_multiple_sequence (operands, 3, NULL, NULL, NULL)"
"*
return emit_ldm_seq (operands, 3);
-")
+ "
+)
(define_peephole
[(set (match_operand:SI 0 "s_register_operand" "=r")
"TARGET_ARM && load_multiple_sequence (operands, 2, NULL, NULL, NULL)"
"*
return emit_ldm_seq (operands, 2);
-")
+ "
+)
(define_peephole
[(set (match_operand:SI 4 "memory_operand" "=m")
"TARGET_ARM && store_multiple_sequence (operands, 4, NULL, NULL, NULL)"
"*
return emit_stm_seq (operands, 4);
-")
+ "
+)
(define_peephole
[(set (match_operand:SI 3 "memory_operand" "=m")
"TARGET_ARM && store_multiple_sequence (operands, 3, NULL, NULL, NULL)"
"*
return emit_stm_seq (operands, 3);
-")
+ "
+)
(define_peephole
[(set (match_operand:SI 2 "memory_operand" "=m")
"TARGET_ARM && store_multiple_sequence (operands, 2, NULL, NULL, NULL)"
"*
return emit_stm_seq (operands, 2);
-")
+ "
+)
(define_split
[(set (match_operand:SI 0 "s_register_operand" "")
[(set (match_dup 5) (not:SI (ashiftrt:SI (match_dup 1) (const_int 31))))
(set (match_dup 0) (and:SI (match_op_dup 2 [(match_dup 3) (match_dup 4)])
(match_dup 5)))]
- "")
+ ""
+)
;; This split can be used because CC_Z mode implies that the following
;; branch will be an equality, or an unsigned inequality, so the sign
(set (reg:CC 24) (compare:CC (match_dup 2) (match_dup 1)))]
"
operands[1] = GEN_INT (((unsigned long) INTVAL (operands[1])) >> 24);
-")
+ "
+)
(define_expand "prologue"
[(clobber (const_int 0))]
else /* TARGET_THUMB */
return thumb_unexpanded_epilogue ();
"
-;; Length is absolute worst case
+ ; Length is absolute worst case
[(set_attr "length" "44")
(set_attr "type" "block")]
)
(use (match_operand:SI 2 "register_operand" "r"))]
"TARGET_EITHER"
"
-{
- cfun->machine->eh_epilogue_sp_ofs = operands[1];
- if (GET_CODE (operands[2]) != REG || REGNO (operands[2]) != 2)
- {
- rtx ra = gen_rtx_REG (Pmode, 2);
- emit_move_insn (ra, operands[2]);
- operands[2] = ra;
- }
-}")
+ {
+ cfun->machine->eh_epilogue_sp_ofs = operands[1];
+ if (GET_CODE (operands[2]) != REG || REGNO (operands[2]) != 2)
+ {
+ rtx ra = gen_rtx_REG (Pmode, 2);
+
+ emit_move_insn (ra, operands[2]);
+ operands[2] = ra;
+ }
+ }"
+)
;; This split is only used during output to reduce the number of patterns
;; that need assembler instructions adding to them. We allowed the setting
(match_dup 4)
(match_dup 5)))]
"
-{
- enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
- operands[3]);
+ {
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]),
+ operands[2], operands[3]);
- operands[6] = gen_rtx_REG (mode, 24);
- operands[7] = gen_rtx_COMPARE (mode, operands[2], operands[3]);
-}
-")
+ operands[6] = gen_rtx_REG (mode, 24);
+ operands[7] = gen_rtx_COMPARE (mode, operands[2], operands[3]);
+ }"
+)
(define_split
[(set (match_operand:SI 0 "s_register_operand" "")
(match_dup 4)
(not:SI (match_dup 5))))]
"
-{
- enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]), operands[2],
- operands[3]);
+ {
+ enum machine_mode mode = SELECT_CC_MODE (GET_CODE (operands[1]),
+ operands[2], operands[3]);
- operands[6] = gen_rtx_REG (mode, 24);
- operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
-}
-")
+ operands[6] = gen_rtx_REG (mode, 24);
+ operands[7] = gen_rtx (COMPARE, mode, operands[2], operands[3]);
+ }"
+)
(define_insn "*cond_move_not"
[(set (match_operand:SI 0 "s_register_operand" "=r,r")
(unspec:BLK [(match_operand:SI 1 "s_register_operand" "r")] 2))])]
"TARGET_ARM"
"*
-{
- int num_saves = XVECLEN (operands[2], 0);
+ {
+ int num_saves = XVECLEN (operands[2], 0);
- /* For the StrongARM at least it is faster to
- use STR to store only a single register. */
- if (num_saves == 2)
- output_asm_insn (\"str\\t%1, [%m0, #-4]!\", operands);
- else
- {
- int i;
- char pattern[100];
+ /* For the StrongARM at least it is faster to
+ use STR to store only a single register. */
+ if (num_saves == 2)
+ output_asm_insn (\"str\\t%1, [%m0, #-4]!\", operands);
+ else
+ {
+ int i;
+ char pattern[100];
- strcpy (pattern, \"stmfd\\t%m0!, {%1\");
-
- /* We skip the first register, since we can extract that directly from
- the pattern. */
- for (i = 2; i < num_saves; i++)
- {
- strcat (pattern, \", %|\");
- strcat (pattern, reg_names[REGNO (XEXP (XVECEXP (operands[2], 0, i),
- 0))]);
- }
-
- strcat (pattern, \"}\");
- output_asm_insn (pattern, operands);
- }
-
- return \"\";
-}"
-[(set_attr "type" "store4")])
+ strcpy (pattern, \"stmfd\\t%m0!, {%1\");
+
+ /* We skip the first register, since we can extract that directly from
+ the pattern. */
+ for (i = 2; i < num_saves; i++)
+ {
+ strcat (pattern, \", %|\");
+ strcat (pattern,
+ reg_names[REGNO (XEXP (XVECEXP (operands[2], 0, i), 0))]);
+ }
+
+ strcat (pattern, \"}\");
+ output_asm_insn (pattern, operands);
+ }
+
+ return \"\";
+ }"
+ [(set_attr "type" "store4")]
+)
;; Similarly for the floating point registers
(define_insn "*push_fp_multi"
(unspec:BLK [(match_operand:XF 1 "f_register_operand" "f")] 2))])]
"TARGET_ARM"
"*
-{
- char pattern[100];
+ {
+ char pattern[100];
- sprintf (pattern, \"sfmfd\\t%%1, %d, [%%m0]!\", XVECLEN (operands[2], 0));
- output_asm_insn (pattern, operands);
- return \"\";
-}"
-[(set_attr "type" "f_store")])
+ sprintf (pattern, \"sfmfd\\t%%1, %d, [%%m0]!\", XVECLEN (operands[2], 0));
+ output_asm_insn (pattern, operands);
+ return \"\";
+ }"
+ [(set_attr "type" "f_store")]
+)
;; Special patterns for dealing with the constant pool