+2015-11-06 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+
+ * config/arm/arm-ldmstm.ml: Rewrite to generate unified asm templates.
+ * config/arm/arm.c (arm_asm_trampoline_template): Make unified asm safe.
+ (arm_output_multireg_pop): Likewise.
+ (output_move_double): Likewise.
+ (output_move_quad): Likewise.
+ (output_return_instruction): Likewise.
+ (arm_print_operand): Remove support for %( and %. print modifiers.
+ (arm_output_shift): Make unified asm.
+ (arm_declare_function_name): Likewise.
+ * config/arm/arm.h (TARGET_UNIFIED_ASM): Delete.
+ (ASM_APP_OFF): Adjust.
+ (ASM_OUTPUT_REG_PUSH): Undo special casing for TARGET_ARM.
+ (ASM_OUTPUT_REG_POP): Likewise.
+ * config/arm/arm.md: Adjust uses of %., %(, %)
+ * config/arm/sync.md: Likewise.
+ * config/arm/thumb2.md: Likewise.
+ * config/arm/ldmstm.md: Regenerate.
+ * config/arm/arm.opt (masm-unified-syntax): Do not special case Thumb.
+ * doc/invoke.texi (masm-unified-syntax): Update documentation.
+
2015-11-06 David Malcolm <dmalcolm@redhat.com>
* input.c (dump_line_table_statistics): Dump stats on adhoc table.
type optype = IN | OUT | INOUT
-let rec string_of_addrmode addrmode =
+let rec string_of_addrmode addrmode thumb update =
+ if thumb || update
+then
match addrmode with
- IA -> "ia" | IB -> "ib" | DA -> "da" | DB -> "db"
+ IA -> "ia"
+ | IB -> "ib"
+ | DA -> "da"
+ | DB -> "db"
+else
+ match addrmode with
+ IA -> ""
+ | IB -> "ib"
+ | DA -> "da"
+ | DB -> "db"
let rec initial_offset addrmode nregs =
match addrmode with
| _, _ -> raise (InvalidAddrMode "ERROR: Invalid Addressing mode for Thumb1.")
let write_pattern_1 name ls addrmode nregs write_set_fn update thumb =
- let astr = string_of_addrmode addrmode in
+ let astr = string_of_addrmode addrmode thumb update in
Printf.printf "(define_insn \"*%s%s%d_%s%s\"\n"
(if thumb then "thumb_" else "") name nregs astr
(if update then "_update" else "");
Printf.printf ")]\n \"%s && XVECLEN (operands[0], 0) == %d\"\n"
(target addrmode thumb)
(if update then nregs + 1 else nregs);
- Printf.printf " \"%s%%(%s%%)\\t%%%d%s, {"
- name astr (nregs + 1) (if update then "!" else "");
+ if thumb then
+ Printf.printf " \"%s%s\\t%%%d%s, {" name astr (nregs + 1) (if update then "!" else "")
+ else
+ Printf.printf " \"%s%s%%?\\t%%%d%s, {" name astr (nregs + 1) (if update then "!" else "");
for n = 1 to nregs; do
Printf.printf "%%%d%s" n (if n < nregs then ", " else "")
done;
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
};
-#define ARM_LSL_NAME (TARGET_UNIFIED_ASM ? "lsl" : "asl")
+#define ARM_LSL_NAME "lsl"
#define streq(string1, string2) (strcmp (string1, string2) == 0)
#define THUMB2_WORK_REGS (0xff & ~( (1 << THUMB_HARD_FRAME_POINTER_REGNUM) \
static void
arm_asm_trampoline_template (FILE *f)
{
- if (TARGET_UNIFIED_ASM)
- fprintf (f, "\t.syntax unified\n");
- else
- fprintf (f, "\t.syntax divided\n");
+ fprintf (f, "\t.syntax unified\n");
if (TARGET_ARM)
{
}
conditional = reverse ? "%?%D0" : "%?%d0";
- if ((regno_base == SP_REGNUM) && TARGET_THUMB)
+ if ((regno_base == SP_REGNUM) && update)
{
- /* Output pop (not stmfd) because it has a shorter encoding. */
- gcc_assert (update);
sprintf (pattern, "pop%s\t{", conditional);
}
else
/* Output ldmfd when the base register is SP, otherwise output ldmia.
It's just a convention, their semantics are identical. */
if (regno_base == SP_REGNUM)
- sprintf (pattern, "ldm%sfd\t", conditional);
- else if (TARGET_UNIFIED_ASM)
- sprintf (pattern, "ldmia%s\t", conditional);
+ /* update is never true here, hence there is no need to handle
+ pop here. */
+ sprintf (pattern, "ldmfd%s", conditional);
+
+ if (update)
+ sprintf (pattern, "ldmia%s\t", conditional);
else
- sprintf (pattern, "ldm%sia\t", conditional);
+ sprintf (pattern, "ldm%s\t", conditional);
strcat (pattern, reg_names[regno_base]);
if (update)
{
if (TARGET_LDRD
&& !(fix_cm3_ldrd && reg0 == REGNO(XEXP (operands[1], 0))))
- output_asm_insn ("ldr%(d%)\t%0, [%m1]", operands);
+ output_asm_insn ("ldrd%?\t%0, [%m1]", operands);
else
- output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
+ output_asm_insn ("ldmia%?\t%m1, %M0", operands);
}
break;
case PRE_INC:
gcc_assert (TARGET_LDRD);
if (emit)
- output_asm_insn ("ldr%(d%)\t%0, [%m1, #8]!", operands);
+ output_asm_insn ("ldrd%?\t%0, [%m1, #8]!", operands);
break;
case PRE_DEC:
if (emit)
{
if (TARGET_LDRD)
- output_asm_insn ("ldr%(d%)\t%0, [%m1, #-8]!", operands);
+ output_asm_insn ("ldrd%?\t%0, [%m1, #-8]!", operands);
else
- output_asm_insn ("ldm%(db%)\t%m1!, %M0", operands);
+ output_asm_insn ("ldmdb%?\t%m1!, %M0", operands);
}
break;
if (emit)
{
if (TARGET_LDRD)
- output_asm_insn ("ldr%(d%)\t%0, [%m1], #8", operands);
+ output_asm_insn ("ldrd%?\t%0, [%m1], #8", operands);
else
- output_asm_insn ("ldm%(ia%)\t%m1!, %M0", operands);
+ output_asm_insn ("ldmia%?\t%m1!, %M0", operands);
}
break;
case POST_DEC:
gcc_assert (TARGET_LDRD);
if (emit)
- output_asm_insn ("ldr%(d%)\t%0, [%m1], #-8", operands);
+ output_asm_insn ("ldrd%?\t%0, [%m1], #-8", operands);
break;
case PRE_MODIFY:
if (emit)
{
output_asm_insn ("add%?\t%1, %1, %2", otherops);
- output_asm_insn ("ldr%(d%)\t%0, [%1] @split", otherops);
+ output_asm_insn ("ldrd%?\t%0, [%1] @split", otherops);
}
if (count)
*count = 2;
&& INTVAL (otherops[2]) < 256))
{
if (emit)
- output_asm_insn ("ldr%(d%)\t%0, [%1, %2]!", otherops);
+ output_asm_insn ("ldrd%?\t%0, [%1, %2]!", otherops);
}
else
{
&& INTVAL (otherops[2]) < 256))
{
if (emit)
- output_asm_insn ("ldr%(d%)\t%0, [%1], %2", otherops);
+ output_asm_insn ("ldrd%?\t%0, [%1], %2", otherops);
}
else
{
if (emit)
{
if (TARGET_LDRD)
- output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
+ output_asm_insn ("ldrd%?\t%0, [%1]", operands);
else
- output_asm_insn ("ldm%(ia%)\t%1, %M0", operands);
+ output_asm_insn ("ldmia%?\t%1, %M0", operands);
}
if (count)
{
case -8:
if (emit)
- output_asm_insn ("ldm%(db%)\t%1, %M0", otherops);
+ output_asm_insn ("ldmdb%?\t%1, %M0", otherops);
return "";
case -4:
if (TARGET_THUMB2)
break;
if (emit)
- output_asm_insn ("ldm%(da%)\t%1, %M0", otherops);
+ output_asm_insn ("ldmda%?\t%1, %M0", otherops);
return "";
case 4:
if (TARGET_THUMB2)
break;
if (emit)
- output_asm_insn ("ldm%(ib%)\t%1, %M0", otherops);
+ output_asm_insn ("ldmib%?\t%1, %M0", otherops);
return "";
}
}
if (emit)
{
output_asm_insn ("add%?\t%0, %1, %2", otherops);
- output_asm_insn ("ldr%(d%)\t%0, [%1]", operands);
+ output_asm_insn ("ldrd%?\t%0, [%1]", operands);
}
if (count)
*count = 2;
{
otherops[0] = operands[0];
if (emit)
- output_asm_insn ("ldr%(d%)\t%0, [%1, %2]", otherops);
+ output_asm_insn ("ldrd%?\t%0, [%1, %2]", otherops);
}
return "";
}
*count = 2;
if (TARGET_LDRD)
- return "ldr%(d%)\t%0, [%1]";
+ return "ldrd%?\t%0, [%1]";
- return "ldm%(ia%)\t%1, %M0";
+ return "ldmia%?\t%1, %M0";
}
else
{
if (emit)
{
if (TARGET_LDRD)
- output_asm_insn ("str%(d%)\t%1, [%m0]", operands);
+ output_asm_insn ("strd%?\t%1, [%m0]", operands);
else
- output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
+ output_asm_insn ("stm%?\t%m0, %M1", operands);
}
break;
case PRE_INC:
gcc_assert (TARGET_LDRD);
if (emit)
- output_asm_insn ("str%(d%)\t%1, [%m0, #8]!", operands);
+ output_asm_insn ("strd%?\t%1, [%m0, #8]!", operands);
break;
case PRE_DEC:
if (emit)
{
if (TARGET_LDRD)
- output_asm_insn ("str%(d%)\t%1, [%m0, #-8]!", operands);
+ output_asm_insn ("strd%?\t%1, [%m0, #-8]!", operands);
else
- output_asm_insn ("stm%(db%)\t%m0!, %M1", operands);
+ output_asm_insn ("stmdb%?\t%m0!, %M1", operands);
}
break;
if (emit)
{
if (TARGET_LDRD)
- output_asm_insn ("str%(d%)\t%1, [%m0], #8", operands);
+ output_asm_insn ("strd%?\t%1, [%m0], #8", operands);
else
- output_asm_insn ("stm%(ia%)\t%m0!, %M1", operands);
+ output_asm_insn ("stm%?\t%m0!, %M1", operands);
}
break;
case POST_DEC:
gcc_assert (TARGET_LDRD);
if (emit)
- output_asm_insn ("str%(d%)\t%1, [%m0], #-8", operands);
+ output_asm_insn ("strd%?\t%1, [%m0], #-8", operands);
break;
case PRE_MODIFY:
else if (GET_CODE (XEXP (operands[0], 0)) == PRE_MODIFY)
{
if (emit)
- output_asm_insn ("str%(d%)\t%0, [%1, %2]!", otherops);
+ output_asm_insn ("strd%?\t%0, [%1, %2]!", otherops);
}
else
{
if (emit)
- output_asm_insn ("str%(d%)\t%0, [%1], %2", otherops);
+ output_asm_insn ("strd%?\t%0, [%1], %2", otherops);
}
break;
{
case -8:
if (emit)
- output_asm_insn ("stm%(db%)\t%m0, %M1", operands);
+ output_asm_insn ("stmdb%?\t%m0, %M1", operands);
return "";
case -4:
if (TARGET_THUMB2)
break;
if (emit)
- output_asm_insn ("stm%(da%)\t%m0, %M1", operands);
+ output_asm_insn ("stmda%?\t%m0, %M1", operands);
return "";
case 4:
if (TARGET_THUMB2)
break;
if (emit)
- output_asm_insn ("stm%(ib%)\t%m0, %M1", operands);
+ output_asm_insn ("stmib%?\t%m0, %M1", operands);
return "";
}
}
otherops[0] = operands[1];
otherops[1] = XEXP (XEXP (operands[0], 0), 0);
if (emit)
- output_asm_insn ("str%(d%)\t%0, [%1, %2]", otherops);
+ output_asm_insn ("strd%?\t%0, [%1, %2]", otherops);
return "";
}
/* Fall through */
switch (GET_CODE (XEXP (operands[1], 0)))
{
case REG:
- output_asm_insn ("ldm%(ia%)\t%m1, %M0", operands);
+ output_asm_insn ("ldmia%?\t%m1, %M0", operands);
break;
case LABEL_REF:
case CONST:
output_asm_insn ("adr%?\t%0, %1", operands);
- output_asm_insn ("ldm%(ia%)\t%0, %M0", operands);
+ output_asm_insn ("ldmia%?\t%0, %M0", operands);
break;
default:
switch (GET_CODE (XEXP (operands[0], 0)))
{
case REG:
- output_asm_insn ("stm%(ia%)\t%m0, %M1", operands);
+ output_asm_insn ("stm%?\t%m0, %M1", operands);
break;
default:
gcc_assert (stack_adjust == 0 || stack_adjust == 4);
if (stack_adjust && arm_arch5 && TARGET_ARM)
- if (TARGET_UNIFIED_ASM)
sprintf (instr, "ldmib%s\t%%|sp, {", conditional);
- else
- sprintf (instr, "ldm%sib\t%%|sp, {", conditional);
else
{
/* If we can't use ldmib (SA110 bug),
if (stack_adjust)
live_regs_mask |= 1 << 3;
- if (TARGET_UNIFIED_ASM)
- sprintf (instr, "ldmfd%s\t%%|sp, {", conditional);
- else
- sprintf (instr, "ldm%sfd\t%%|sp, {", conditional);
+ sprintf (instr, "ldmfd%s\t%%|sp, {", conditional);
}
}
else
- if (TARGET_UNIFIED_ASM)
sprintf (instr, "pop%s\t{", conditional);
- else
- sprintf (instr, "ldm%sfd\t%%|sp!, {", conditional);
p = instr + strlen (instr);
arm_print_condition (stream);
return;
- case '(':
- /* Nothing in unified syntax, otherwise the current condition code. */
- if (!TARGET_UNIFIED_ASM)
- arm_print_condition (stream);
- break;
-
- case ')':
- /* The current condition code in unified syntax, otherwise nothing. */
- if (TARGET_UNIFIED_ASM)
- arm_print_condition (stream);
- break;
-
case '.':
/* The current condition code for a condition code setting instruction.
Preceded by 's' in unified syntax, otherwise followed by 's'. */
- if (TARGET_UNIFIED_ASM)
- {
- fputc('s', stream);
- arm_print_condition (stream);
- }
- else
- {
- arm_print_condition (stream);
- fputc('s', stream);
- }
+ fputc('s', stream);
+ arm_print_condition (stream);
return;
case '!':
/* If the instruction is conditionally executed then print
the current condition code, otherwise print 's'. */
- gcc_assert (TARGET_THUMB2 && TARGET_UNIFIED_ASM);
+ gcc_assert (TARGET_THUMB2);
if (current_insn_predicate)
arm_print_condition (stream);
else
char c;
c = flag_chars[set_flags];
- if (TARGET_UNIFIED_ASM)
+ shift = shift_op(operands[3], &val);
+ if (shift)
{
- shift = shift_op(operands[3], &val);
- if (shift)
- {
- if (val != -1)
- operands[2] = GEN_INT(val);
- sprintf (pattern, "%s%%%c\t%%0, %%1, %%2", shift, c);
- }
- else
- sprintf (pattern, "mov%%%c\t%%0, %%1", c);
+ if (val != -1)
+ operands[2] = GEN_INT(val);
+ sprintf (pattern, "%s%%%c\t%%0, %%1, %%2", shift, c);
}
else
- sprintf (pattern, "mov%%%c\t%%0, %%1%%S3", c);
+ sprintf (pattern, "mov%%%c\t%%0, %%1", c);
+
output_asm_insn (pattern, operands);
return "";
}
void
arm_declare_function_name (FILE *stream, const char *name, tree decl)
{
- if (TARGET_UNIFIED_ASM)
- fprintf (stream, "\t.syntax unified\n");
- else
- fprintf (stream, "\t.syntax divided\n");
+
+ fprintf (stream, "\t.syntax unified\n");
if (TARGET_THUMB)
{
&& (arm_disable_literal_pool \
|| (!optimize_size && !current_tune->prefer_constant_pool)))
-/* We could use unified syntax for arm mode, but for now we just use it
- for thumb mode. */
-#define TARGET_UNIFIED_ASM (TARGET_THUMB)
-
/* Nonzero if this chip provides the DMB instruction. */
#define TARGET_HAVE_DMB (arm_arch6m || arm_arch7)
"\t.syntax divided\n")
#undef ASM_APP_OFF
-#define ASM_APP_OFF (TARGET_ARM ? "\t.arm\n\t.syntax divided\n" : \
- "\t.thumb\n\t.syntax unified\n")
+#define ASM_APP_OFF "\t.syntax unified\n"
/* Output a push or a pop instruction (only used when profiling).
We can't push STATIC_CHAIN_REGNUM (r12) directly with Thumb-1. We know
#define ASM_OUTPUT_REG_PUSH(STREAM, REGNO) \
do \
{ \
- if (TARGET_ARM) \
- asm_fprintf (STREAM,"\tstmfd\t%r!,{%r}\n", \
- STACK_POINTER_REGNUM, REGNO); \
- else if (TARGET_THUMB1 \
+ if (TARGET_THUMB1 \
&& (REGNO) == STATIC_CHAIN_REGNUM) \
{ \
asm_fprintf (STREAM, "\tpush\t{r7}\n"); \
#define ASM_OUTPUT_REG_POP(STREAM, REGNO) \
do \
{ \
- if (TARGET_ARM) \
- asm_fprintf (STREAM, "\tldmfd\t%r!,{%r}\n", \
- STACK_POINTER_REGNUM, REGNO); \
- else if (TARGET_THUMB1 \
- && (REGNO) == STATIC_CHAIN_REGNUM) \
+ if (TARGET_THUMB1 \
+ && (REGNO) == STATIC_CHAIN_REGNUM) \
{ \
asm_fprintf (STREAM, "\tpop\t{r7}\n"); \
asm_fprintf (STREAM, "\tmov\t%r, r7\n", REGNO);\
(plus:SI (match_dup 1) (match_dup 2)))]
"TARGET_ARM"
"@
- add%.\\t%0, %1, %2
- sub%.\\t%0, %1, #%n2
- add%.\\t%0, %1, %2"
+ adds%?\\t%0, %1, %2
+ subs%?\\t%0, %1, #%n2
+ adds%?\\t%0, %1, %2"
[(set_attr "conds" "set")
(set_attr "type" "alus_imm,alus_imm,alus_sreg")]
)
(match_operand:SI 3 "arm_addimm_operand" "I,L")))]
"TARGET_32BIT && INTVAL (operands[2]) == -INTVAL (operands[3])"
"@
- add%.\\t%0, %1, %3
- sub%.\\t%0, %1, #%n3"
+ adds%?\\t%0, %1, %3
+ subs%?\\t%0, %1, #%n3"
[(set_attr "conds" "set")
(set_attr "type" "alus_sreg")]
)
(plus:SI (match_dup 1) (match_dup 2)))]
"TARGET_32BIT"
"@
- add%.\\t%0, %1, %2
- sub%.\\t%0, %1, #%n2
- add%.\\t%0, %1, %2"
+ adds%?\\t%0, %1, %2
+ subs%?\\t%0, %1, #%n2
+ adds%?\\t%0, %1, %2"
[(set_attr "conds" "set")
(set_attr "type" "alus_imm,alus_imm,alus_sreg")]
)
(plus:SI (match_dup 1) (match_dup 2)))]
"TARGET_32BIT"
"@
- add%.\\t%0, %1, %2
- add%.\\t%0, %1, %2
- sub%.\\t%0, %1, #%n2"
+ adds%?\\t%0, %1, %2
+ adds%?\\t%0, %1, %2
+ subs%?\\t%0, %1, #%n2"
[(set_attr "conds" "set")
(set_attr "type" "alus_imm,alus_imm,alus_sreg")]
)
(LTUGEU:SI (reg:<cnb> CC_REGNUM) (const_int 0))))
(clobber (reg:CC CC_REGNUM))]
"TARGET_32BIT"
- "adc%.\\t%0, %1, %2"
+ "adcs%?\\t%0, %1, %2"
[(set_attr "conds" "set")
(set_attr "type" "adcs_reg")]
)
(minus:SI (match_dup 1) (match_dup 2)))]
"TARGET_32BIT"
"@
- sub%.\\t%0, %1, %2
- sub%.\\t%0, %1, %2
- rsb%.\\t%0, %2, %1"
+ subs%?\\t%0, %1, %2
+ subs%?\\t%0, %1, %2
+ rsbs%?\\t%0, %2, %1"
[(set_attr "conds" "set")
(set_attr "type" "alus_imm,alus_sreg,alus_sreg")]
)
(minus:SI (match_dup 1) (match_dup 2)))]
"TARGET_32BIT"
"@
- sub%.\\t%0, %1, %2
- sub%.\\t%0, %1, %2
- rsb%.\\t%0, %2, %1"
+ subs%?\\t%0, %1, %2
+ subs%?\\t%0, %1, %2
+ rsbs%?\\t%0, %2, %1"
[(set_attr "conds" "set")
(set_attr "type" "alus_imm,alus_sreg,alus_sreg")]
)
(set (match_operand:SI 0 "s_register_operand" "=&r,&r")
(mult:SI (match_dup 2) (match_dup 1)))]
"TARGET_ARM && !arm_arch6"
- "mul%.\\t%0, %2, %1"
+ "muls%?\\t%0, %2, %1"
[(set_attr "conds" "set")
(set_attr "type" "muls")]
)
(set (match_operand:SI 0 "s_register_operand" "=r")
(mult:SI (match_dup 2) (match_dup 1)))]
"TARGET_ARM && arm_arch6 && optimize_size"
- "mul%.\\t%0, %2, %1"
+ "muls%?\\t%0, %2, %1"
[(set_attr "conds" "set")
(set_attr "type" "muls")]
)
(const_int 0)))
(clobber (match_scratch:SI 0 "=&r,&r"))]
"TARGET_ARM && !arm_arch6"
- "mul%.\\t%0, %2, %1"
+ "muls%?\\t%0, %2, %1"
[(set_attr "conds" "set")
(set_attr "type" "muls")]
)
(const_int 0)))
(clobber (match_scratch:SI 0 "=r"))]
"TARGET_ARM && arm_arch6 && optimize_size"
- "mul%.\\t%0, %2, %1"
+ "muls%?\\t%0, %2, %1"
[(set_attr "conds" "set")
(set_attr "type" "muls")]
)
(plus:SI (mult:SI (match_dup 2) (match_dup 1))
(match_dup 3)))]
"TARGET_ARM && arm_arch6"
- "mla%.\\t%0, %2, %1, %3"
+ "mlas%?\\t%0, %2, %1, %3"
[(set_attr "conds" "set")
(set_attr "type" "mlas")]
)
(plus:SI (mult:SI (match_dup 2) (match_dup 1))
(match_dup 3)))]
"TARGET_ARM && arm_arch6 && optimize_size"
- "mla%.\\t%0, %2, %1, %3"
+ "mlas%?\\t%0, %2, %1, %3"
[(set_attr "conds" "set")
(set_attr "type" "mlas")]
)
(const_int 0)))
(clobber (match_scratch:SI 0 "=&r,&r,&r,&r"))]
"TARGET_ARM && !arm_arch6"
- "mla%.\\t%0, %2, %1, %3"
+ "mlas%?\\t%0, %2, %1, %3"
[(set_attr "conds" "set")
(set_attr "type" "mlas")]
)
(const_int 0)))
(clobber (match_scratch:SI 0 "=r"))]
"TARGET_ARM && arm_arch6 && optimize_size"
- "mla%.\\t%0, %2, %1, %3"
+ "mlas%?\\t%0, %2, %1, %3"
[(set_attr "conds" "set")
(set_attr "type" "mlas")]
)
(and:SI (match_dup 1) (match_dup 2)))]
"TARGET_32BIT"
"@
- and%.\\t%0, %1, %2
- bic%.\\t%0, %1, #%B2
- and%.\\t%0, %1, %2"
+ ands%?\\t%0, %1, %2
+ bics%?\\t%0, %1, #%B2
+ ands%?\\t%0, %1, %2"
[(set_attr "conds" "set")
(set_attr "type" "logics_imm,logics_imm,logics_reg")]
)
"TARGET_32BIT"
"@
tst%?\\t%0, %1
- bic%.\\t%2, %0, #%B1
+ bics%?\\t%2, %0, #%B1
tst%?\\t%0, %1"
[(set_attr "conds" "set")
(set_attr "type" "logics_imm,logics_imm,logics_reg")]
(const_int 0)))
(clobber (match_scratch:SI 4 "=r"))]
"TARGET_ARM || (TARGET_THUMB2 && CONST_INT_P (operands[2]))"
- "bic%.%?\\t%4, %3, %1%S0"
+ "bics%?\\t%4, %3, %1%S0"
[(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")
(set_attr "conds" "set")
(match_dup 2)]))
(match_dup 3)))])]
"TARGET_ARM || (TARGET_THUMB2 && CONST_INT_P (operands[2]))"
- "bic%.%?\\t%4, %3, %1%S0"
+ "bics%?\\t%4, %3, %1%S0"
[(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")
(set_attr "conds" "set")
(set (match_operand:SI 0 "s_register_operand" "=r")
(and:SI (not:SI (match_dup 2)) (match_dup 1)))]
"TARGET_32BIT"
- "bic%.\\t%0, %1, %2"
+ "bics\\t%0, %1, %2"
[(set_attr "conds" "set")
(set_attr "type" "logics_shift_reg")]
)
(const_int 0)))
(clobber (match_scratch:SI 0 "=r"))]
"TARGET_32BIT"
- "bic%.\\t%0, %1, %2"
+ "bics\\t%0, %1, %2"
[(set_attr "conds" "set")
(set_attr "type" "logics_shift_reg")]
)
(set (match_operand:SI 0 "s_register_operand" "=r,r")
(ior:SI (match_dup 1) (match_dup 2)))]
"TARGET_32BIT"
- "orr%.\\t%0, %1, %2"
+ "orrs%?\\t%0, %1, %2"
[(set_attr "conds" "set")
(set_attr "type" "logics_imm,logics_reg")]
)
(const_int 0)))
(clobber (match_scratch:SI 0 "=r,r"))]
"TARGET_32BIT"
- "orr%.\\t%0, %1, %2"
+ "orrs%?\\t%0, %1, %2"
[(set_attr "conds" "set")
(set_attr "type" "logics_imm,logics_reg")]
)
(set (match_operand:SI 0 "s_register_operand" "=r,r")
(xor:SI (match_dup 1) (match_dup 2)))]
"TARGET_32BIT"
- "eor%.\\t%0, %1, %2"
+ "eors%?\\t%0, %1, %2"
[(set_attr "conds" "set")
(set_attr "type" "logics_imm,logics_reg")]
)
(set (match_operand:SI 0 "s_register_operand" "=r,r")
(not:SI (match_op_dup 3 [(match_dup 1) (match_dup 2)])))]
"TARGET_32BIT"
- "mvn%.\\t%0, %1%S3"
+ "mvns%?\\t%0, %1%S3"
[(set_attr "conds" "set")
(set_attr "shift" "1")
(set_attr "arch" "32,a")
(const_int 0)))
(clobber (match_scratch:SI 0 "=r,r"))]
"TARGET_32BIT"
- "mvn%.\\t%0, %1%S3"
+ "mvns%?\\t%0, %1%S3"
[(set_attr "conds" "set")
(set_attr "shift" "1")
(set_attr "arch" "32,a")
(unspec:HI [(match_operand:HI 1 "memory_operand" "Uw,Uh")]
UNSPEC_UNALIGNED_LOAD)))]
"unaligned_access && TARGET_32BIT"
- "ldr%(sh%)\t%0, %1\t@ unaligned"
+ "ldrsh%?\t%0, %1\t@ unaligned"
[(set_attr "arch" "t2,any")
(set_attr "length" "2,4")
(set_attr "predicable" "yes")
(unspec:HI [(match_operand:HI 1 "memory_operand" "Uw,m")]
UNSPEC_UNALIGNED_LOAD)))]
"unaligned_access && TARGET_32BIT"
- "ldr%(h%)\t%0, %1\t@ unaligned"
+ "ldrh%?\t%0, %1\t@ unaligned"
[(set_attr "arch" "t2,any")
(set_attr "length" "2,4")
(set_attr "predicable" "yes")
(unspec:HI [(match_operand:HI 1 "s_register_operand" "l,r")]
UNSPEC_UNALIGNED_STORE))]
"unaligned_access && TARGET_32BIT"
- "str%(h%)\t%1, %0\t@ unaligned"
+ "strh%?\t%1, %0\t@ unaligned"
[(set_attr "arch" "t2,any")
(set_attr "length" "2,4")
(set_attr "predicable" "yes")
(set (match_operand:SI 0 "s_register_operand" "=r")
(not:SI (match_dup 1)))]
"TARGET_32BIT"
- "mvn%.\\t%0, %1"
+ "mvns%?\\t%0, %1"
[(set_attr "conds" "set")
(set_attr "type" "mvn_reg")]
)
(const_int 0)))
(clobber (match_scratch:SI 0 "=r"))]
"TARGET_32BIT"
- "mvn%.\\t%0, %1"
+ "mvns%?\\t%0, %1"
[(set_attr "conds" "set")
(set_attr "type" "mvn_reg")]
)
"TARGET_ARM && arm_arch4 && !arm_arch6"
"@
#
- ldr%(h%)\\t%0, %1"
+ ldrh%?\\t%0, %1"
[(set_attr "type" "alu_shift_reg,load_byte")
(set_attr "predicable" "yes")]
)
"TARGET_ARM && arm_arch6"
"@
uxth%?\\t%0, %1
- ldr%(h%)\\t%0, %1"
+ ldrh%?\\t%0, %1"
[(set_attr "predicable" "yes")
(set_attr "type" "extend,load_byte")]
)
"TARGET_ARM && !arm_arch6"
"@
#
- ldr%(b%)\\t%0, %1\\t%@ zero_extendqisi2"
+ ldrb%?\\t%0, %1\\t%@ zero_extendqisi2"
[(set_attr "length" "8,4")
(set_attr "type" "alu_shift_reg,load_byte")
(set_attr "predicable" "yes")]
(zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,Uh")))]
"TARGET_ARM && arm_arch6"
"@
- uxtb%(%)\\t%0, %1
- ldr%(b%)\\t%0, %1\\t%@ zero_extendqisi2"
+ uxtb%?\\t%0, %1
+ ldrb%?\\t%0, %1\\t%@ zero_extendqisi2"
[(set_attr "type" "extend,load_byte")
(set_attr "predicable" "yes")]
)
"TARGET_ARM && arm_arch4 && !arm_arch6"
"@
#
- ldr%(sh%)\\t%0, %1"
+ ldrsh%?\\t%0, %1"
[(set_attr "length" "8,4")
(set_attr "type" "alu_shift_reg,load_byte")
(set_attr "predicable" "yes")]
"TARGET_32BIT && arm_arch6"
"@
sxth%?\\t%0, %1
- ldr%(sh%)\\t%0, %1"
+ ldrsh%?\\t%0, %1"
[(set_attr "type" "extend,load_byte")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")]
[(set (match_operand:HI 0 "s_register_operand" "=r")
(sign_extend:HI (match_operand:QI 1 "arm_extendqisi_mem_op" "Uq")))]
"TARGET_ARM && arm_arch4"
- "ldr%(sb%)\\t%0, %1"
+ "ldrsb%?\\t%0, %1"
[(set_attr "type" "load_byte")
(set_attr "predicable" "yes")]
)
"TARGET_ARM && arm_arch4 && !arm_arch6"
"@
#
- ldr%(sb%)\\t%0, %1"
+ ldrsb%?\\t%0, %1"
[(set_attr "length" "8,4")
(set_attr "type" "alu_shift_reg,load_byte")
(set_attr "predicable" "yes")]
"TARGET_ARM && arm_arch6"
"@
sxtb%?\\t%0, %1
- ldr%(sb%)\\t%0, %1"
+ ldrsb%?\\t%0, %1"
[(set_attr "type" "extend,load_byte")
(set_attr "predicable" "yes")]
)
"TARGET_32BIT"
"@
cmp%?\\t%0, #0
- sub%.\\t%0, %1, #0"
+ subs%?\\t%0, %1, #0"
[(set_attr "conds" "set")
(set_attr "type" "alus_imm,alus_imm")]
)
mov%?\\t%0, %1\\t%@ movhi
mvn%?\\t%0, #%B1\\t%@ movhi
movw%?\\t%0, %L1\\t%@ movhi
- str%(h%)\\t%1, %0\\t%@ movhi
- ldr%(h%)\\t%0, %1\\t%@ movhi"
+ strh%?\\t%1, %0\\t%@ movhi
+ ldrh%?\\t%0, %1\\t%@ movhi"
[(set_attr "predicable" "yes")
(set_attr "pool_range" "*,*,*,*,256")
(set_attr "neg_pool_range" "*,*,*,*,244")
mov%?\\t%0, %1
mov%?\\t%0, %1
mvn%?\\t%0, #%B1
- ldr%(b%)\\t%0, %1
- str%(b%)\\t%1, %0
- ldr%(b%)\\t%0, %1
- str%(b%)\\t%1, %0"
+ ldrb%?\\t%0, %1
+ strb%?\\t%1, %0
+ ldrb%?\\t%0, %1
+ strb%?\\t%1, %0"
[(set_attr "type" "mov_reg,mov_reg,mov_imm,mov_imm,mvn_imm,load1,store1,load1,store1")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "yes,yes,yes,no,no,no,no,no,no")
switch (which_alternative)
{
case 0: /* ARM register from memory */
- return \"ldr%(h%)\\t%0, %1\\t%@ __fp16\";
+ return \"ldrh%?\\t%0, %1\\t%@ __fp16\";
case 1: /* memory from ARM register */
- return \"str%(h%)\\t%1, %0\\t%@ __fp16\";
+ return \"strh%?\\t%1, %0\\t%@ __fp16\";
case 2: /* ARM register from ARM register */
return \"mov%?\\t%0, %1\\t%@ __fp16\";
case 3: /* ARM register from constant */
(const_int 0)))
(clobber (match_scratch:SI 1 "=r"))]
"TARGET_32BIT"
- "orr%.\\t%1, %Q0, %R0"
+ "orrs%?\\t%1, %Q0, %R0"
[(set_attr "conds" "set")
(set_attr "type" "logics_reg")]
)
(define_insn "nop"
[(const_int 0)]
"TARGET_EITHER"
- "*
- if (TARGET_UNIFIED_ASM)
- return \"nop\";
- if (TARGET_ARM)
- return \"mov%?\\t%|r0, %|r0\\t%@ nop\";
- return \"mov\\tr8, r8\";
- "
+ "nop"
[(set (attr "length")
(if_then_else (eq_attr "is_thumb" "yes")
(const_int 2)
(match_op_dup 1 [(match_op_dup 3 [(match_dup 4) (match_dup 5)])
(match_dup 2)]))]
"TARGET_32BIT"
- "%i1%.\\t%0, %2, %4%S3"
+ "%i1s%?\\t%0, %2, %4%S3"
[(set_attr "conds" "set")
(set_attr "shift" "4")
(set_attr "arch" "32,a")
(const_int 0)))
(clobber (match_scratch:SI 0 "=r,r"))]
"TARGET_32BIT"
- "%i1%.\\t%0, %2, %4%S3"
+ "%i1s%?\\t%0, %2, %4%S3"
[(set_attr "conds" "set")
(set_attr "shift" "4")
(set_attr "arch" "32,a")
(minus:SI (match_dup 1)
(match_op_dup 2 [(match_dup 3) (match_dup 4)])))]
"TARGET_32BIT"
- "sub%.\\t%0, %1, %3%S2"
+ "subs%?\\t%0, %1, %3%S2"
[(set_attr "conds" "set")
(set_attr "shift" "3")
(set_attr "arch" "32,a,a")
(const_int 0)))
(clobber (match_scratch:SI 0 "=r,r,r"))]
"TARGET_32BIT"
- "sub%.\\t%0, %1, %3%S2"
+ "subs%?\\t%0, %1, %3%S2"
[(set_attr "conds" "set")
(set_attr "shift" "3")
(set_attr "arch" "32,a,a")
if (val1 == 4 || val2 == 4)
/* Other val must be 8, since we know they are adjacent and neither
is zero. */
- output_asm_insn (\"ldm%(ib%)\\t%0, {%1, %2}\", ldm);
+ output_asm_insn (\"ldmib%?\\t%0, {%1, %2}\", ldm);
else if (const_ok_for_arm (val1) || const_ok_for_arm (-val1))
{
ldm[0] = ops[0] = operands[4];
ops[2] = GEN_INT (val1);
output_add_immediate (ops);
if (val1 < val2)
- output_asm_insn (\"ldm%(ia%)\\t%0, {%1, %2}\", ldm);
+ output_asm_insn (\"ldmia%?\\t%0, {%1, %2}\", ldm);
else
- output_asm_insn (\"ldm%(da%)\\t%0, {%1, %2}\", ldm);
+ output_asm_insn (\"ldmda%?\\t%0, {%1, %2}\", ldm);
}
else
{
else if (val1 != 0)
{
if (val1 < val2)
- output_asm_insn (\"ldm%(da%)\\t%0, {%1, %2}\", ldm);
+ output_asm_insn (\"ldmda%?\\t%0, {%1, %2}\", ldm);
else
- output_asm_insn (\"ldm%(ia%)\\t%0, {%1, %2}\", ldm);
+ output_asm_insn (\"ldmia%?\\t%0, {%1, %2}\", ldm);
}
else
{
if (val1 < val2)
- output_asm_insn (\"ldm%(ia%)\\t%0, {%1, %2}\", ldm);
+ output_asm_insn (\"ldmia%?\\t%0, {%1, %2}\", ldm);
else
- output_asm_insn (\"ldm%(da%)\\t%0, {%1, %2}\", ldm);
+ output_asm_insn (\"ldmda%?\\t%0, {%1, %2}\", ldm);
}
output_asm_insn (\"%I3%?\\t%0, %1, %2\", arith);
return \"\";
int i;
char pattern[100];
- if (TARGET_ARM)
- strcpy (pattern, \"stm%(fd%)\\t%m0!, {%1\");
- else if (TARGET_THUMB2)
+ if (TARGET_32BIT)
strcpy (pattern, \"push%?\\t{%1\");
else
strcpy (pattern, \"push\\t{%1\");
masm-syntax-unified
Target Report Var(inline_asm_unified) Init(0) Save
-Assume unified syntax for Thumb inline assembly code.
+Assume unified syntax for inline assembly code.
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
-(define_insn "*ldm4_ia"
+(define_insn "*ldm4_"
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (match_operand:SI 5 "s_register_operand" "rk")))
(mem:SI (plus:SI (match_dup 5)
(const_int 12))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
- "ldm%(ia%)\t%5, {%1, %2, %3, %4}"
+ "ldm%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "load4")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(mem:SI (plus:SI (match_dup 5)
(const_int 12))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 4"
- "ldm%(ia%)\t%5, {%1, %2, %3, %4}"
+ "ldmia\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "load4")])
(define_insn "*ldm4_ia_update"
(mem:SI (plus:SI (match_dup 5)
(const_int 12))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
- "ldm%(ia%)\t%5!, {%1, %2, %3, %4}"
+ "ldmia%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "load4")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(mem:SI (plus:SI (match_dup 5)
(const_int 12))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 5"
- "ldm%(ia%)\t%5!, {%1, %2, %3, %4}"
+ "ldmia\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "load4")])
-(define_insn "*stm4_ia"
+(define_insn "*stm4_"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 5 "s_register_operand" "rk"))
(match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 5) (const_int 12)))
(match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
- "stm%(ia%)\t%5, {%1, %2, %3, %4}"
+ "stm%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 5) (const_int 12)))
(match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
- "stm%(ia%)\t%5!, {%1, %2, %3, %4}"
+ "stmia%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 5) (const_int 12)))
(match_operand:SI 4 "low_register_operand" ""))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 5"
- "stm%(ia%)\t%5!, {%1, %2, %3, %4}"
+ "stmia\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store4")])
(define_insn "*ldm4_ib"
(mem:SI (plus:SI (match_dup 5)
(const_int 16))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
- "ldm%(ib%)\t%5, {%1, %2, %3, %4}"
+ "ldmib%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "load4")
(set_attr "predicable" "yes")])
(mem:SI (plus:SI (match_dup 5)
(const_int 16))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 5"
- "ldm%(ib%)\t%5!, {%1, %2, %3, %4}"
+ "ldmib%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "load4")
(set_attr "predicable" "yes")])
(set (mem:SI (plus:SI (match_dup 5) (const_int 16)))
(match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
- "stm%(ib%)\t%5, {%1, %2, %3, %4}"
+ "stmib%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
(set_attr "predicable" "yes")])
(set (mem:SI (plus:SI (match_dup 5) (const_int 16)))
(match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 5"
- "stm%(ib%)\t%5!, {%1, %2, %3, %4}"
+ "stmib%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
(set_attr "predicable" "yes")])
(set (match_operand:SI 4 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 5)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
- "ldm%(da%)\t%5, {%1, %2, %3, %4}"
+ "ldmda%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "load4")
(set_attr "predicable" "yes")])
(set (match_operand:SI 4 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 5)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 5"
- "ldm%(da%)\t%5!, {%1, %2, %3, %4}"
+ "ldmda%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "load4")
(set_attr "predicable" "yes")])
(set (mem:SI (match_dup 5))
(match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
- "stm%(da%)\t%5, {%1, %2, %3, %4}"
+ "stmda%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
(set_attr "predicable" "yes")])
(set (mem:SI (match_dup 5))
(match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 5"
- "stm%(da%)\t%5!, {%1, %2, %3, %4}"
+ "stmda%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
(set_attr "predicable" "yes")])
(mem:SI (plus:SI (match_dup 5)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
- "ldm%(db%)\t%5, {%1, %2, %3, %4}"
+ "ldmdb%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "load4")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(mem:SI (plus:SI (match_dup 5)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
- "ldm%(db%)\t%5!, {%1, %2, %3, %4}"
+ "ldmdb%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "load4")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 5) (const_int -4)))
(match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
- "stm%(db%)\t%5, {%1, %2, %3, %4}"
+ "stmdb%?\t%5, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 5) (const_int -4)))
(match_operand:SI 4 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 5"
- "stm%(db%)\t%5!, {%1, %2, %3, %4}"
+ "stmdb%?\t%5!, {%1, %2, %3, %4}"
[(set_attr "type" "store4")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
FAIL;
})
-(define_insn "*ldm3_ia"
+(define_insn "*ldm3_"
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (match_operand:SI 4 "s_register_operand" "rk")))
(mem:SI (plus:SI (match_dup 4)
(const_int 8))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
- "ldm%(ia%)\t%4, {%1, %2, %3}"
+ "ldm%?\t%4, {%1, %2, %3}"
[(set_attr "type" "load3")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(mem:SI (plus:SI (match_dup 4)
(const_int 8))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 3"
- "ldm%(ia%)\t%4, {%1, %2, %3}"
+ "ldmia\t%4, {%1, %2, %3}"
[(set_attr "type" "load3")])
(define_insn "*ldm3_ia_update"
(mem:SI (plus:SI (match_dup 4)
(const_int 8))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
- "ldm%(ia%)\t%4!, {%1, %2, %3}"
+ "ldmia%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "load3")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(mem:SI (plus:SI (match_dup 4)
(const_int 8))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 4"
- "ldm%(ia%)\t%4!, {%1, %2, %3}"
+ "ldmia\t%4!, {%1, %2, %3}"
[(set_attr "type" "load3")])
-(define_insn "*stm3_ia"
+(define_insn "*stm3_"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 4 "s_register_operand" "rk"))
(match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 4) (const_int 8)))
(match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
- "stm%(ia%)\t%4, {%1, %2, %3}"
+ "stm%?\t%4, {%1, %2, %3}"
[(set_attr "type" "store3")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 4) (const_int 8)))
(match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
- "stm%(ia%)\t%4!, {%1, %2, %3}"
+ "stmia%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "store3")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 4) (const_int 8)))
(match_operand:SI 3 "low_register_operand" ""))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 4"
- "stm%(ia%)\t%4!, {%1, %2, %3}"
+ "stmia\t%4!, {%1, %2, %3}"
[(set_attr "type" "store3")])
(define_insn "*ldm3_ib"
(mem:SI (plus:SI (match_dup 4)
(const_int 12))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
- "ldm%(ib%)\t%4, {%1, %2, %3}"
+ "ldmib%?\t%4, {%1, %2, %3}"
[(set_attr "type" "load3")
(set_attr "predicable" "yes")])
(mem:SI (plus:SI (match_dup 4)
(const_int 12))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
- "ldm%(ib%)\t%4!, {%1, %2, %3}"
+ "ldmib%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "load3")
(set_attr "predicable" "yes")])
(set (mem:SI (plus:SI (match_dup 4) (const_int 12)))
(match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
- "stm%(ib%)\t%4, {%1, %2, %3}"
+ "stmib%?\t%4, {%1, %2, %3}"
[(set_attr "type" "store3")
(set_attr "predicable" "yes")])
(set (mem:SI (plus:SI (match_dup 4) (const_int 12)))
(match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
- "stm%(ib%)\t%4!, {%1, %2, %3}"
+ "stmib%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "store3")
(set_attr "predicable" "yes")])
(set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 4)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
- "ldm%(da%)\t%4, {%1, %2, %3}"
+ "ldmda%?\t%4, {%1, %2, %3}"
[(set_attr "type" "load3")
(set_attr "predicable" "yes")])
(set (match_operand:SI 3 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 4)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
- "ldm%(da%)\t%4!, {%1, %2, %3}"
+ "ldmda%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "load3")
(set_attr "predicable" "yes")])
(set (mem:SI (match_dup 4))
(match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
- "stm%(da%)\t%4, {%1, %2, %3}"
+ "stmda%?\t%4, {%1, %2, %3}"
[(set_attr "type" "store3")
(set_attr "predicable" "yes")])
(set (mem:SI (match_dup 4))
(match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 4"
- "stm%(da%)\t%4!, {%1, %2, %3}"
+ "stmda%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "store3")
(set_attr "predicable" "yes")])
(mem:SI (plus:SI (match_dup 4)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
- "ldm%(db%)\t%4, {%1, %2, %3}"
+ "ldmdb%?\t%4, {%1, %2, %3}"
[(set_attr "type" "load3")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(mem:SI (plus:SI (match_dup 4)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
- "ldm%(db%)\t%4!, {%1, %2, %3}"
+ "ldmdb%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "load3")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 4) (const_int -4)))
(match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
- "stm%(db%)\t%4, {%1, %2, %3}"
+ "stmdb%?\t%4, {%1, %2, %3}"
[(set_attr "type" "store3")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 4) (const_int -4)))
(match_operand:SI 3 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 4"
- "stm%(db%)\t%4!, {%1, %2, %3}"
+ "stmdb%?\t%4!, {%1, %2, %3}"
[(set_attr "type" "store3")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
FAIL;
})
-(define_insn "*ldm2_ia"
+(define_insn "*ldm2_"
[(match_parallel 0 "load_multiple_operation"
[(set (match_operand:SI 1 "arm_hard_general_register_operand" "")
(mem:SI (match_operand:SI 3 "s_register_operand" "rk")))
(mem:SI (plus:SI (match_dup 3)
(const_int 4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
- "ldm%(ia%)\t%3, {%1, %2}"
+ "ldm%?\t%3, {%1, %2}"
[(set_attr "type" "load2")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(mem:SI (plus:SI (match_dup 3)
(const_int 4))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 2"
- "ldm%(ia%)\t%3, {%1, %2}"
+ "ldmia\t%3, {%1, %2}"
[(set_attr "type" "load2")])
(define_insn "*ldm2_ia_update"
(mem:SI (plus:SI (match_dup 3)
(const_int 4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
- "ldm%(ia%)\t%3!, {%1, %2}"
+ "ldmia%?\t%3!, {%1, %2}"
[(set_attr "type" "load2")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(mem:SI (plus:SI (match_dup 3)
(const_int 4))))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 3"
- "ldm%(ia%)\t%3!, {%1, %2}"
+ "ldmia\t%3!, {%1, %2}"
[(set_attr "type" "load2")])
-(define_insn "*stm2_ia"
+(define_insn "*stm2_"
[(match_parallel 0 "store_multiple_operation"
[(set (mem:SI (match_operand:SI 3 "s_register_operand" "rk"))
(match_operand:SI 1 "arm_hard_general_register_operand" ""))
(set (mem:SI (plus:SI (match_dup 3) (const_int 4)))
(match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
- "stm%(ia%)\t%3, {%1, %2}"
+ "stm%?\t%3, {%1, %2}"
[(set_attr "type" "store2")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 3) (const_int 4)))
(match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
- "stm%(ia%)\t%3!, {%1, %2}"
+ "stmia%?\t%3!, {%1, %2}"
[(set_attr "type" "store2")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 3) (const_int 4)))
(match_operand:SI 2 "low_register_operand" ""))])]
"TARGET_THUMB1 && XVECLEN (operands[0], 0) == 3"
- "stm%(ia%)\t%3!, {%1, %2}"
+ "stmia\t%3!, {%1, %2}"
[(set_attr "type" "store2")])
(define_insn "*ldm2_ib"
(mem:SI (plus:SI (match_dup 3)
(const_int 8))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 2"
- "ldm%(ib%)\t%3, {%1, %2}"
+ "ldmib%?\t%3, {%1, %2}"
[(set_attr "type" "load2")
(set_attr "predicable" "yes")])
(mem:SI (plus:SI (match_dup 3)
(const_int 8))))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
- "ldm%(ib%)\t%3!, {%1, %2}"
+ "ldmib%?\t%3!, {%1, %2}"
[(set_attr "type" "load2")
(set_attr "predicable" "yes")])
(set (mem:SI (plus:SI (match_dup 3) (const_int 8)))
(match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 2"
- "stm%(ib%)\t%3, {%1, %2}"
+ "stmib%?\t%3, {%1, %2}"
[(set_attr "type" "store2")
(set_attr "predicable" "yes")])
(set (mem:SI (plus:SI (match_dup 3) (const_int 8)))
(match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
- "stm%(ib%)\t%3!, {%1, %2}"
+ "stmib%?\t%3!, {%1, %2}"
[(set_attr "type" "store2")
(set_attr "predicable" "yes")])
(set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 3)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 2"
- "ldm%(da%)\t%3, {%1, %2}"
+ "ldmda%?\t%3, {%1, %2}"
[(set_attr "type" "load2")
(set_attr "predicable" "yes")])
(set (match_operand:SI 2 "arm_hard_general_register_operand" "")
(mem:SI (match_dup 3)))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
- "ldm%(da%)\t%3!, {%1, %2}"
+ "ldmda%?\t%3!, {%1, %2}"
[(set_attr "type" "load2")
(set_attr "predicable" "yes")])
(set (mem:SI (match_dup 3))
(match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 2"
- "stm%(da%)\t%3, {%1, %2}"
+ "stmda%?\t%3, {%1, %2}"
[(set_attr "type" "store2")
(set_attr "predicable" "yes")])
(set (mem:SI (match_dup 3))
(match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_ARM && XVECLEN (operands[0], 0) == 3"
- "stm%(da%)\t%3!, {%1, %2}"
+ "stmda%?\t%3!, {%1, %2}"
[(set_attr "type" "store2")
(set_attr "predicable" "yes")])
(mem:SI (plus:SI (match_dup 3)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
- "ldm%(db%)\t%3, {%1, %2}"
+ "ldmdb%?\t%3, {%1, %2}"
[(set_attr "type" "load2")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(mem:SI (plus:SI (match_dup 3)
(const_int -4))))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
- "ldm%(db%)\t%3!, {%1, %2}"
+ "ldmdb%?\t%3!, {%1, %2}"
[(set_attr "type" "load2")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 3) (const_int -4)))
(match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 2"
- "stm%(db%)\t%3, {%1, %2}"
+ "stmdb%?\t%3, {%1, %2}"
[(set_attr "type" "store2")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
(set (mem:SI (plus:SI (match_dup 3) (const_int -4)))
(match_operand:SI 2 "arm_hard_general_register_operand" ""))])]
"TARGET_32BIT && XVECLEN (operands[0], 0) == 3"
- "stm%(db%)\t%3!, {%1, %2}"
+ "stmdb%?\t%3!, {%1, %2}"
[(set_attr "type" "store2")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")])
{
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_release (model))
- return \"ldr%(<sync_sfx>%)\\t%0, %1\";
+ return \"ldr<sync_sfx>%?\\t%0, %1\";
else
return \"lda<sync_sfx>%?\\t%0, %1\";
}
{
enum memmodel model = memmodel_from_int (INTVAL (operands[2]));
if (is_mm_relaxed (model) || is_mm_consume (model) || is_mm_acquire (model))
- return \"str%(<sync_sfx>%)\t%1, %0\";
+ return \"str<sync_sfx>%?\t%1, %0\";
else
return \"stl<sync_sfx>%?\t%1, %0\";
}
mov%?\\t%0, %1\\t%@ movhi
mov%?\\t%0, %1\\t%@ movhi
movw%?\\t%0, %L1\\t%@ movhi
- str%(h%)\\t%1, %0\\t%@ movhi
- ldr%(h%)\\t%0, %1\\t%@ movhi"
+ strh%?\\t%1, %0\\t%@ movhi
+ ldrh%?\\t%0, %1\\t%@ movhi"
[(set_attr "type" "mov_reg,mov_imm,mov_imm,mov_imm,store1,load1")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "yes,no,yes,no,no,no")
"TARGET_THUMB2 && arm_arch6"
"@
sxtb%?\\t%0, %1
- ldr%(sb%)\\t%0, %1"
+ ldrsb%?\\t%0, %1"
[(set_attr "type" "extend,load_byte")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")
"TARGET_THUMB2 && arm_arch6"
"@
uxth%?\\t%0, %1
- ldr%(h%)\\t%0, %1"
+ ldrh%?\\t%0, %1"
[(set_attr "type" "extend,load_byte")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")
(zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r,m")))]
"TARGET_THUMB2 && arm_arch6"
"@
- uxtb%(%)\\t%0, %1
- ldr%(b%)\\t%0, %1\\t%@ zero_extendqisi2"
+ uxtb%?\\t%0, %1
+ ldrb%?\\t%0, %1\\t%@ zero_extendqisi2"
[(set_attr "type" "extend,load_byte")
(set_attr "predicable" "yes")
(set_attr "predicable_short_it" "no")
@item -masm-syntax-unified
@opindex masm-syntax-unified
Assume inline assembler is using unified asm syntax. The default is
-currently off which implies divided syntax. Currently this option is
-available only for Thumb1 and has no effect on ARM state and Thumb2.
-However, this may change in future releases of GCC. Divided syntax
-should be considered deprecated.
+currently off which implies divided syntax. This option has no impact
+on Thumb2. However, this may change in future releases of GCC.
+Divided syntax should be considered deprecated.
@item -mrestrict-it
@opindex mrestrict-it
+2015-11-06 Ramana Radhakrishnan <ramana.radhakrishnan@arm.com>
+
+ * gcc.target/arm/combine-movs.c: Adjust for unified asm.
+ * gcc.target/arm/interrupt-1.c: Likewise.
+ * gcc.target/arm/interrupt-2.c: Likewise.
+ * gcc.target/arm/unaligned-memcpy-4.c: Likewise.
+
2015-11-06 Thomas Schwinge <thomas@codesourcery.com>
* gfortran.dg/goacc/combined_loop.f90: XFAIL.
r[i] = 0;
}
-/* { dg-final { scan-assembler "lsrs\tr\[0-9\]" { target arm_thumb2 } } } */
-/* { dg-final { scan-assembler "movs\tr\[0-9\]" { target { ! arm_thumb2 } } } } */
+/* { dg-final { scan-assembler "lsrs\tr\[0-9\]" } } */
bar (0);
}
-/* { dg-final { scan-assembler "stmfd\tsp!, {r0, r1, r2, r3, r4, fp, ip, lr}" } } */
-/* { dg-final { scan-assembler "ldmfd\tsp!, {r0, r1, r2, r3, r4, fp, ip, pc}\\^" } } */
+/* { dg-final { scan-assembler "push\t{r0, r1, r2, r3, r4, fp, ip, lr}" } } */
+/* { dg-final { scan-assembler "pop\t{r0, r1, r2, r3, r4, fp, ip, pc}\\^" } } */
foo = 0;
}
-/* { dg-final { scan-assembler "stmfd\tsp!, {r0, r1, r2, r3, r4, r5, ip, lr}" } } */
-/* { dg-final { scan-assembler "ldmfd\tsp!, {r0, r1, r2, r3, r4, r5, ip, pc}\\^" } } */
+/* { dg-final { scan-assembler "push\t{r0, r1, r2, r3, r4, r5, ip, lr}" } } */
+/* { dg-final { scan-assembler "pop\t{r0, r1, r2, r3, r4, r5, ip, pc}\\^" } } */
/* We know both src and dest to be aligned: expect multiword loads/stores. */
-/* { dg-final { scan-assembler-times "ldmia" 1 { target { ! { arm_prefer_ldrd_strd } } } } } */
+/* { dg-final { scan-assembler-times "ldm" 1 { target { ! { arm_prefer_ldrd_strd } } } } } */
/* { dg-final { scan-assembler-times "stmia" 1 { target { ! { arm_prefer_ldrd_strd } } } } } */
/* { dg-final { scan-assembler "ldrd" { target { arm_prefer_ldrd_strd } } } } */
/* { dg-final { scan-assembler-times "ldm" 0 { target { arm_prefer_ldrd_strd } } } } */