2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
+ * config/arm/arm-protos.h (clear_operation_p): Declare.
+ * config/arm/arm.c (clear_operation_p): New function.
+ (cmse_clear_registers): Generate clear_multiple instruction pattern if
+ targeting Armv8.1-M Mainline or successor.
+ (output_return_instruction): Only output APSR register clearing if
+ Armv8.1-M Mainline instructions not available.
+ (thumb_exit): Likewise.
+ * config/arm/predicates.md (clear_multiple_operation): New predicate.
+ * config/arm/thumb2.md (clear_apsr): New define_insn.
+ (clear_multiple): Likewise.
+ * config/arm/unspecs.md (VUNSPEC_CLRM_APSR): New volatile unspec.
+
+2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
+2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
* config/arm/arm.c (fp_sysreg_names): Declare and define.
(use_return_insn): Also return false for Armv8.1-M Mainline.
(output_return_instruction): Skip FPSCR clearing if Armv8.1-M
extern int thumb1_legitimate_address_p (machine_mode, rtx, int);
extern bool ldm_stm_operation_p (rtx, bool, machine_mode mode,
bool, bool);
+extern bool clear_operation_p (rtx);
extern int arm_const_double_rtx (rtx);
extern int vfp3_const_double_rtx (rtx);
extern int neon_immediate_valid_for_move (rtx, machine_mode, rtx *, int *);
return true;
}
+/* Checks whether OP is a valid parallel pattern for a CLRM insn. To be a
+ valid CLRM pattern, OP must have the following form:
+
+ [(set (reg:SI <N>) (const_int 0))
+ (set (reg:SI <M>) (const_int 0))
+ ...
+ (unspec_volatile [(const_int 0)]
+ VUNSPEC_CLRM_APSR)
+ (clobber (reg:CC CC_REGNUM))
+ ]
+
+ Any number (including 0) of set expressions is valid, the volatile unspec is
+ optional. All registers but SP and PC are allowed and registers must be in
+ strict increasing order. */
+
+bool
+clear_operation_p (rtx op)
+{
+ HOST_WIDE_INT i;
+ unsigned regno, last_regno;
+ rtx elt, reg, zero;
+ machine_mode mode;
+ HOST_WIDE_INT count = XVECLEN (op, 0);
+
+ for (i = 0; i < count; i++)
+ {
+ elt = XVECEXP (op, 0, i);
+
+ if (GET_CODE (elt) == UNSPEC_VOLATILE)
+ {
+ if (XINT (elt, 1) != VUNSPEC_CLRM_APSR
+ || XVECLEN (elt, 0) != 1
+ || XVECEXP (elt, 0, 0) != CONST0_RTX (SImode)
+ || i != count - 2)
+ return false;
+
+ continue;
+ }
+
+ if (GET_CODE (elt) == CLOBBER)
+ continue;
+
+ if (GET_CODE (elt) != SET)
+ return false;
+
+ reg = SET_DEST (elt);
+ regno = REGNO (reg);
+ mode = GET_MODE (reg);
+ zero = SET_SRC (elt);
+
+ if (!REG_P (reg)
+ || GET_MODE (reg) != SImode
+ || regno == SP_REGNUM
+ || regno == PC_REGNUM
+ || (i != 0 && regno <= last_regno)
+ || zero != CONST0_RTX (SImode))
+ return false;
+
+ last_regno = REGNO (reg);
+ }
+
+ return true;
+}
+
/* Return true iff it would be profitable to turn a sequence of NOPS loads
or stores (depending on IS_STORE) into a load-multiple or store-multiple
instruction. ADD_OFFSET is nonzero if the base address register needs
/* Clear full registers. */
+ if (TARGET_HAVE_FPCXT_CMSE)
+ {
+ rtvec vunspec_vec;
+ int i, j, k, nb_regs;
+ rtx use_seq, par, reg, set, vunspec;
+ int to_clear_bitmap_size = SBITMAP_SIZE (to_clear_bitmap);
+ auto_sbitmap core_regs_bitmap (to_clear_bitmap_size);
+ auto_sbitmap to_clear_core_bitmap (to_clear_bitmap_size);
+
+ /* Get set of core registers to clear. */
+ bitmap_clear (core_regs_bitmap);
+ bitmap_set_range (core_regs_bitmap, R0_REGNUM,
+ IP_REGNUM - R0_REGNUM + 1);
+ bitmap_and (to_clear_core_bitmap, to_clear_bitmap,
+ core_regs_bitmap);
+ gcc_assert (!bitmap_empty_p (to_clear_core_bitmap));
+
+ if (bitmap_empty_p (to_clear_core_bitmap))
+ return;
+
+ /* Create clrm RTX pattern. */
+ nb_regs = bitmap_count_bits (to_clear_core_bitmap);
+ par = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nb_regs + 2));
+
+ /* Insert core register clearing RTX in the pattern. */
+ start_sequence ();
+ for (j = 0, i = minregno; j < nb_regs; i++)
+ {
+ if (!bitmap_bit_p (to_clear_core_bitmap, i))
+ continue;
+
+ reg = gen_rtx_REG (SImode, i);
+ set = gen_rtx_SET (reg, const0_rtx);
+ XVECEXP (par, 0, j++) = set;
+ emit_use (reg);
+ }
+
+ /* Insert APSR register clearing RTX in the pattern
+ * along with clobbering CC. */
+ vunspec_vec = gen_rtvec (1, gen_int_mode (0, SImode));
+ vunspec = gen_rtx_UNSPEC_VOLATILE (SImode, vunspec_vec,
+ VUNSPEC_CLRM_APSR);
+
+ XVECEXP (par, 0, j++) = vunspec;
+
+ rtx ccreg = gen_rtx_REG (CCmode, CC_REGNUM);
+ rtx clobber = gen_rtx_CLOBBER (VOIDmode, ccreg);
+ XVECEXP (par, 0, j) = clobber;
+
+ use_seq = get_insns ();
+ end_sequence ();
+
+ emit_insn_after (use_seq, emit_insn (par));
+ minregno = FIRST_VFP_REGNUM;
+ }
+
/* If not marked for clearing, clearing_reg already does not contain
any secret. */
if (clearing_regno <= maxregno
default:
if (IS_CMSE_ENTRY (func_type))
{
- /* Check if we have to clear the 'GE bits' which is only used if
- parallel add and subtraction instructions are available. */
- if (TARGET_INT_SIMD)
- snprintf (instr, sizeof (instr),
- "msr%s\tAPSR_nzcvqg, %%|lr", conditional);
- else
- snprintf (instr, sizeof (instr),
- "msr%s\tAPSR_nzcvq, %%|lr", conditional);
-
- output_asm_insn (instr, & operand);
- /* Do not clear FPSCR if targeting Armv8.1-M Mainline, VLDR takes
- care of it. */
- if (TARGET_HARD_FLOAT && ! TARGET_HAVE_FPCXT_CMSE)
+ /* For Armv8.1-M, this is cleared as part of the CLRM instruction
+ emitted by cmse_nonsecure_entry_clear_before_return () and the
+ VSTR/VLDR instructions in the prologue and epilogue. */
+ if (!TARGET_HAVE_FPCXT_CMSE)
{
- /* Clear the cumulative exception-status bits (0-4,7) and the
- condition code bits (28-31) of the FPSCR. We need to
- remember to clear the first scratch register used (IP) and
- save and restore the second (r4). */
- snprintf (instr, sizeof (instr), "push\t{%%|r4}");
- output_asm_insn (instr, & operand);
- snprintf (instr, sizeof (instr), "vmrs\t%%|ip, fpscr");
- output_asm_insn (instr, & operand);
- snprintf (instr, sizeof (instr), "movw\t%%|r4, #65376");
- output_asm_insn (instr, & operand);
- snprintf (instr, sizeof (instr), "movt\t%%|r4, #4095");
- output_asm_insn (instr, & operand);
- snprintf (instr, sizeof (instr), "and\t%%|ip, %%|r4");
- output_asm_insn (instr, & operand);
- snprintf (instr, sizeof (instr), "vmsr\tfpscr, %%|ip");
- output_asm_insn (instr, & operand);
- snprintf (instr, sizeof (instr), "pop\t{%%|r4}");
- output_asm_insn (instr, & operand);
- snprintf (instr, sizeof (instr), "mov\t%%|ip, %%|lr");
+ /* Check if we have to clear the 'GE bits' which is only used if
+ parallel add and subtraction instructions are available. */
+ if (TARGET_INT_SIMD)
+ snprintf (instr, sizeof (instr),
+ "msr%s\tAPSR_nzcvqg, %%|lr", conditional);
+ else
+ snprintf (instr, sizeof (instr),
+ "msr%s\tAPSR_nzcvq, %%|lr", conditional);
+
output_asm_insn (instr, & operand);
+ /* Do not clear FPSCR if targeting Armv8.1-M Mainline, VLDR takes
+ care of it. */
+ if (TARGET_HARD_FLOAT)
+ {
+ /* Clear the cumulative exception-status bits (0-4,7) and
+ the condition code bits (28-31) of the FPSCR. We need
+ to remember to clear the first scratch register used
+ (IP) and save and restore the second (r4).
+
+ Important note: the length of the
+ thumb2_cmse_entry_return insn pattern must account for
+ the size of the below instructions. */
+ output_asm_insn ("push\t{%|r4}", & operand);
+ output_asm_insn ("vmrs\t%|ip, fpscr", & operand);
+ output_asm_insn ("movw\t%|r4, #65376", & operand);
+ output_asm_insn ("movt\t%|r4, #4095", & operand);
+ output_asm_insn ("and\t%|ip, %|r4", & operand);
+ output_asm_insn ("vmsr\tfpscr, %|ip", & operand);
+ output_asm_insn ("pop\t{%|r4}", & operand);
+ output_asm_insn ("mov\t%|ip, %|lr", & operand);
+ }
}
snprintf (instr, sizeof (instr), "bxns\t%%|lr");
}
saved_regs += 4;
insn = emit_insn (gen_push_fpsysreg_insn (stack_pointer_rtx,
GEN_INT (FPCXTNS_ENUM)));
+ rtx dwarf = gen_rtx_SET (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx, -4));
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
RTX_FRAME_RELATED_P (insn) = 1;
}
if (IS_CMSE_ENTRY (arm_current_func_type ()))
{
- asm_fprintf (f, "\tmsr\tAPSR_nzcvq, %r\n",
- reg_containing_return_addr);
+ /* For Armv8.1-M, this is cleared as part of the CLRM instruction
+ emitted by cmse_nonsecure_entry_clear_before_return (). */
+ if (!TARGET_HAVE_FPCXT_CMSE)
+ asm_fprintf (f, "\tmsr\tAPSR_nzcvq, %r\n",
+ reg_containing_return_addr);
asm_fprintf (f, "\tbxns\t%r\n", reg_containing_return_addr);
}
else
address. It may therefore contain information that we might not want
to leak, hence it must be cleared. The value in R0 will never be a
secret at this point, so it is safe to use it, see the clearing code
- in 'cmse_nonsecure_entry_clear_before_return'. */
+ in cmse_nonsecure_entry_clear_before_return (). */
if (reg_containing_return_addr != LR_REGNUM)
asm_fprintf (f, "\tmov\tlr, r0\n");
- asm_fprintf (f, "\tmsr\tAPSR_nzcvq, %r\n", reg_containing_return_addr);
+ /* For Armv8.1-M, this is cleared as part of the CLRM instruction emitted
+ by cmse_nonsecure_entry_clear_before_return (). */
+ if (!TARGET_HAVE_FPCXT_CMSE)
+ asm_fprintf (f, "\tmsr\tAPSR_nzcvq, %r\n", reg_containing_return_addr);
asm_fprintf (f, "\tbxns\t%r\n", reg_containing_return_addr);
}
else
insn = emit_insn (gen_pop_fpsysreg_insn (stack_pointer_rtx,
GEN_INT (FPCXTNS_ENUM)));
+ rtx dwarf = gen_rtx_SET (stack_pointer_rtx,
+ plus_constant (Pmode, stack_pointer_rtx, 4));
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR, dwarf);
RTX_FRAME_RELATED_P (insn) = 1;
}
}
(match_test "satisfies_constraint_Dy (op)")
(match_test "satisfies_constraint_G (op)"))))
+(define_special_predicate "clear_multiple_operation"
+ (match_code "parallel")
+{
+ return clear_operation_p (op);
+})
+
(define_special_predicate "load_multiple_operation"
(match_code "parallel")
{
FAIL;
}")
+(define_insn "*clear_apsr"
+ [(unspec_volatile:SI [(const_int 0)] VUNSPEC_CLRM_APSR)
+ (clobber (reg:CC CC_REGNUM))]
+ "TARGET_THUMB2 && TARGET_HAVE_FPCXT_CMSE && use_cmse"
+ "clrm%?\\t{APSR}"
+ [(set_attr "predicable" "yes")]
+)
+
+;; The operands are validated through the clear_multiple_operation
+;; match_parallel predicate rather than through constraints so enable it only
+;; after reload.
+(define_insn "*clear_multiple"
+ [(match_parallel 0 "clear_multiple_operation"
+ [(set (match_operand:SI 1 "register_operand" "")
+ (const_int 0))])]
+ "TARGET_THUMB2 && TARGET_HAVE_FPCXT_CMSE && use_cmse && reload_completed"
+ {
+ char pattern[100];
+ int i, num_saves = XVECLEN (operands[0], 0);
+
+ strcpy (pattern, \"clrm%?\\t{\");
+ for (i = 0; i < num_saves; i++)
+ {
+ if (GET_CODE (XVECEXP (operands[0], 0, i)) == UNSPEC_VOLATILE)
+ {
+ strcat (pattern, \"APSR\");
+ ++i;
+ }
+ else
+ strcat (pattern,
+ reg_names[REGNO (XEXP (XVECEXP (operands[0], 0, i), 0))]);
+ if (i < num_saves - 1)
+ strcat (pattern, \", %|\");
+ }
+ strcat (pattern, \"}\");
+ output_asm_insn (pattern, operands);
+ return \"\";
+ }
+ [(set_attr "predicable" "yes")]
+)
VUNSPEC_SPECULATION_BARRIER ; Represents an unconditional speculation barrier.
VUNSPEC_APSR_WRITE ; Represent writing the APSR.
VUNSPEC_VSTR_VLDR ; Represent the vstr/vldr instruction.
+ VUNSPEC_CLRM_APSR ; Represent the clearing of APSR with clrm instruction.
])
;; Enumerators for NEON unspecs.
2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
+ * gcc.target/arm/cmse/bitfield-1.c: Add check for CLRM.
+ * gcc.target/arm/cmse/bitfield-2.c: Likewise.
+ * gcc.target/arm/cmse/bitfield-3.c: Likewise.
+ * gcc.target/arm/cmse/struct-1.c: Likewise.
+ * gcc.target/arm/cmse/cmse-14.c: Likewise.
+ * gcc.target/arm/cmse/cmse-1.c: Likewise. Restrict checks for Armv8-M
+ GPR clearing when CLRM is not available.
+ * gcc.target/arm/cmse/mainline/8_1m/bitfield-4.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/bitfield-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/bitfield-6.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/bitfield-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/bitfield-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/bitfield-9.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/hard-sp/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/hard/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/hard/cmse-5.c: likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/hard/cmse-7.c: likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/hard/cmse-8.c: likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/soft/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/soft/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/soft/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/soft/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp-sp/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp-sp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp-sp/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-13.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-5.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-7.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/softfp/cmse-8.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/union-1.c: Likewise.
+ * gcc.target/arm/cmse/mainline/8_1m/union-2.c: Likewise.
+
+2020-01-16 Mihail-Calin Ionescu <mihail.ionescu@arm.com>
+2020-01-16 Thomas Preud'homme <thomas.preudhomme@arm.com>
+
* gcc.target/arm/cmse/bitfield-1.c: add checks for VSTR and VLDR.
* gcc.target/arm/cmse/bitfield-2.c: Likewise.
* gcc.target/arm/cmse/bitfield-3.c: Likewise.
/* { dg-final { scan-assembler "movw\tr1, #1855" } } */
/* { dg-final { scan-assembler "movt\tr1, 65535" } } */
/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" { target arm_cmse_clear_ok } } } */
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" { target arm_cmse_clear_ok } } } */
/* { dg-final { scan-assembler "bxns" } } */
/* { dg-final { scan-assembler "movw\tr1, #1919" } } */
/* { dg-final { scan-assembler "movt\tr1, 2047" } } */
/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" { target arm_cmse_clear_ok } } } */
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" { target arm_cmse_clear_ok } } } */
/* { dg-final { scan-assembler "bxns" } } */
/* { dg-final { scan-assembler "movw\tr1, #65535" } } */
/* { dg-final { scan-assembler "movt\tr1, 63" } } */
/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" { target arm_cmse_clear_ok } } } */
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" { target arm_cmse_clear_ok } } } */
/* { dg-final { scan-assembler "bxns" } } */
/* { dg-final { scan-assembler "bic" } } */
/* { dg-final { scan-assembler "push\t\{r4, r5, r6" } } */
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" { target arm_cmse_clear_ok } } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" { target arm_cmse_clear_ok } } } */
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" { target arm_cmse_clear_ok } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq" } } */
+/* { dg-final { scan-assembler "msr\tAPSR_nzcvq" { target { ! arm_cmse_clear_ok } } } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" { target arm_cmse_clear_ok } } } */
int call_callback (void)
{
return bar ();
}
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" { target arm_cmse_clear_ok } } } */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler-not "^(.*\\s)?bl?\[^\\s]*\\s+bar" } } */
/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r3, APSR\}" } } */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r3, APSR\}" } } */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r3, APSR\}" } } */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "and\tr0, r0, ip" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
/* { dg-final { scan-assembler-not "vmov\.f32\ts2, #1\.0" } } */
#include "../../../cmse-5.x"
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" } } */
-/* { dg-final { scan-assembler "mov\tr0, lr" } } */
-/* { dg-final { scan-assembler "mov\tr1, lr" } } */
-/* { dg-final { scan-assembler "mov\tr2, lr" } } */
-/* { dg-final { scan-assembler "mov\tr3, lr" } } */
/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { ! arm_dsp } } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target arm_dsp } } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, ip, APSR\}" } } */
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" } } */
/* { dg-final { scan-assembler "bxns" } } */
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
/* { dg-final { scan-assembler-not "vmov\.f32\ts1, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
/* { dg-final { scan-assembler-not "vmov\.f64\td0, #1\.0" } } */
#include "../../../cmse-5.x"
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" } } */
-/* { dg-final { scan-assembler "mov\tr0, lr" } } */
-/* { dg-final { scan-assembler "mov\tr1, lr" } } */
-/* { dg-final { scan-assembler "mov\tr2, lr" } } */
-/* { dg-final { scan-assembler "mov\tr3, lr" } } */
/* { dg-final { scan-assembler-not "vmov\.f32\ts0, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { ! arm_dsp } } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target arm_dsp } } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, ip, APSR\}" } } */
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" } } */
/* { dg-final { scan-assembler "bxns" } } */
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler-not "vmov\.f64\td0, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
/* { dg-final { scan-assembler-not "mov\tr2, r4" } } */
/* { dg-final { scan-assembler-not "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, APSR\}" } } */
/* { dg-final { scan-assembler-not "vmov" } } */
/* { dg-final { scan-assembler-not "vmsr" } } */
#include "../../../cmse-5.x"
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" } } */
-/* { dg-final { scan-assembler "mov\tr1, lr" } } */
-/* { dg-final { scan-assembler "mov\tr2, lr" } } */
-/* { dg-final { scan-assembler "mov\tr3, lr" } } */
-/* { dg-final { scan-assembler "mov\tip, lr" } } */
/* { dg-final { scan-assembler-not "vmov" } } */
/* { dg-final { scan-assembler-not "vmsr" } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { ! arm_dsp } } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target arm_dsp } } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" } } */
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" } } */
/* { dg-final { scan-assembler "bxns" } } */
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler-not "vmov" } } */
/* { dg-final { scan-assembler-not "vmsr" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler-not "vmov" } } */
/* { dg-final { scan-assembler-not "vmsr" } } */
/* { dg-final { scan-assembler "__acle_se_foo:" } } */
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" } } */
/* { dg-final { scan-assembler-not "mov\tr0, lr" } } */
-/* { dg-final { scan-assembler "mov\tr1, lr" } } */
-/* { dg-final { scan-assembler "mov\tr2, lr" } } */
-/* { dg-final { scan-assembler "mov\tr3, lr" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts0, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts1, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts2, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts13, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts14, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f32\ts15, #1\.0" } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { ! arm_dsp } } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target arm_dsp } } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" } } */
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" } } */
/* { dg-final { scan-assembler "bxns" } } */
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
/* Now we check that we use the correct intrinsic to call. */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r2, r3, APSR\}" } } */
/* Now we check that we use the correct intrinsic to call. */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "\n\tmov\tr1, r4" } } */
-/* { dg-final { scan-assembler-not "\n\tmov\tr2, r4\n\tmov\tr3, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, APSR\}" } } */
/* Now we check that we use the correct intrinsic to call. */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "__acle_se_foo:" } } */
/* { dg-final { scan-assembler "vstr\tFPCXTNS, \\\[sp, #-4\\\]!" } } */
-/* { dg-final { scan-assembler-not "mov\tr0, lr" } } */
-/* { dg-final { scan-assembler "mov\tr1, lr" } } */
-/* { dg-final { scan-assembler "mov\tr2, lr" } } */
-/* { dg-final { scan-assembler "mov\tr3, lr" } } */
/* { dg-final { scan-assembler "vmov\.f64\td0, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td1, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td2, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td5, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td6, #1\.0" } } */
/* { dg-final { scan-assembler "vmov\.f64\td7, #1\.0" } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvq, lr" { target { ! arm_dsp } } } } */
-/* { dg-final { scan-assembler "msr\tAPSR_nzcvqg, lr" { target arm_dsp } } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" } } */
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" } } */
/* { dg-final { scan-assembler "bxns" } } */
/* Checks for saving and clearing prior to function call. */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r0, r1, r2, r3, APSR\}" } } */
/* Now we check that we use the correct intrinsic to call. */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
/* { dg-final { scan-assembler-not "mov\tr1, r4" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r2, r3, APSR\}" } } */
/* Now we check that we use the correct intrinsic to call. */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "and\tr1, r1, ip" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr2, r4" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r2, r3, APSR\}" } } */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "and\tr2, r2, ip" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
-/* { dg-final { scan-assembler "mov\tr3, r4" } } */
+/* { dg-final { scan-assembler "clrm\t\{r3, APSR\}" } } */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "lsrs\tr4, r4, #1" } } */
/* { dg-final { scan-assembler "lsls\tr4, r4, #1" } } */
/* { dg-final { scan-assembler-not "mov\tr0, r4" } } */
-/* { dg-final { scan-assembler "\n\tmov\tr1, r4" } } */
-/* { dg-final { scan-assembler-not "\n\tmov\tr2, r4\n\tmov\tr3, r4" } } */
+/* { dg-final { scan-assembler "mov\tr1, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr2, r4" } } */
+/* { dg-final { scan-assembler-not "mov\tr3, r4" } } */
/* Now we check that we use the correct intrinsic to call. */
/* { dg-final { scan-assembler "bl\t__gnu_cmse_nonsecure_call" } } */
/* { dg-final { scan-assembler "movs\tr1, #255" } } */
/* { dg-final { scan-assembler "movt\tr1, 65535" } } */
/* { dg-final { scan-assembler "ands\tr0(, r0)?, r1" } } */
+/* { dg-final { scan-assembler "clrm\t\{r1, r2, r3, ip, APSR\}" { target arm_cmse_clear_ok } } } */
/* { dg-final { scan-assembler "vldr\tFPCXTNS, \\\[sp\\\], #4" { target arm_cmse_clear_ok } } } */
/* { dg-final { scan-assembler "bxns" } } */