enum reg_class
aarch64_regno_regclass (unsigned regno)
{
+ if (STUB_REGNUM_P (regno))
+ return STUB_REGS;
+
if (GP_REGNUM_P (regno))
return GENERAL_REGS;
unsigned int nregs, vec_flags;
switch (regclass)
{
+ case STUB_REGS:
case TAILCALL_ADDR_REGS:
case POINTER_REGS:
case GENERAL_REGS:
= aarch64_tune_params.regmove_cost;
/* Caller save and pointer regs are equivalent to GENERAL_REGS. */
- if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS)
+ if (to == TAILCALL_ADDR_REGS || to == POINTER_REGS
+ || to == STUB_REGS)
to = GENERAL_REGS;
- if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS)
+ if (from == TAILCALL_ADDR_REGS || from == POINTER_REGS
+ || from == STUB_REGS)
from = GENERAL_REGS;
/* Make RDFFR very expensive. In particular, if we know that the FFR
: "";
}
+static GTY (()) tree aarch64_sls_shared_thunks[30];
+static GTY (()) bool aarch64_sls_shared_thunks_needed = false;
+const char *indirect_symbol_names[30] = {
+ "__call_indirect_x0",
+ "__call_indirect_x1",
+ "__call_indirect_x2",
+ "__call_indirect_x3",
+ "__call_indirect_x4",
+ "__call_indirect_x5",
+ "__call_indirect_x6",
+ "__call_indirect_x7",
+ "__call_indirect_x8",
+ "__call_indirect_x9",
+ "__call_indirect_x10",
+ "__call_indirect_x11",
+ "__call_indirect_x12",
+ "__call_indirect_x13",
+ "__call_indirect_x14",
+ "__call_indirect_x15",
+ "", /* "__call_indirect_x16", */
+ "", /* "__call_indirect_x17", */
+ "__call_indirect_x18",
+ "__call_indirect_x19",
+ "__call_indirect_x20",
+ "__call_indirect_x21",
+ "__call_indirect_x22",
+ "__call_indirect_x23",
+ "__call_indirect_x24",
+ "__call_indirect_x25",
+ "__call_indirect_x26",
+ "__call_indirect_x27",
+ "__call_indirect_x28",
+ "__call_indirect_x29",
+};
+
+/* Function to create a BLR thunk. This thunk is used to mitigate straight
+ line speculation. Instead of a simple BLR that can be speculated past,
+ we emit a BL to this thunk, and this thunk contains a BR to the relevant
+ register. These thunks have the relevant speculation barries put after
+ their indirect branch so that speculation is blocked.
+
+ We use such a thunk so the speculation barriers are kept off the
+ architecturally executed path in order to reduce the performance overhead.
+
+ When optimizing for size we use stubs shared by the linked object.
+ When optimizing for performance we emit stubs for each function in the hope
+ that the branch predictor can better train on jumps specific for a given
+ function. */
+rtx
+aarch64_sls_create_blr_label (int regnum)
+{
+ gcc_assert (STUB_REGNUM_P (regnum));
+ if (optimize_function_for_size_p (cfun))
+ {
+ /* For the thunks shared between different functions in this compilation
+ unit we use a named symbol -- this is just for users to more easily
+ understand the generated assembly. */
+ aarch64_sls_shared_thunks_needed = true;
+ const char *thunk_name = indirect_symbol_names[regnum];
+ if (aarch64_sls_shared_thunks[regnum] == NULL)
+ {
+ /* Build a decl representing this function stub and record it for
+ later. We build a decl here so we can use the GCC machinery for
+ handling sections automatically (through `get_named_section` and
+ `make_decl_one_only`). That saves us a lot of trouble handling
+ the specifics of different output file formats. */
+ tree decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
+ get_identifier (thunk_name),
+ build_function_type_list (void_type_node,
+ NULL_TREE));
+ DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
+ NULL_TREE, void_type_node);
+ TREE_PUBLIC (decl) = 1;
+ TREE_STATIC (decl) = 1;
+ DECL_IGNORED_P (decl) = 1;
+ DECL_ARTIFICIAL (decl) = 1;
+ make_decl_one_only (decl, DECL_ASSEMBLER_NAME (decl));
+ resolve_unique_section (decl, 0, false);
+ aarch64_sls_shared_thunks[regnum] = decl;
+ }
+
+ return gen_rtx_SYMBOL_REF (Pmode, thunk_name);
+ }
+
+ if (cfun->machine->call_via[regnum] == NULL)
+ cfun->machine->call_via[regnum]
+ = gen_rtx_LABEL_REF (Pmode, gen_label_rtx ());
+ return cfun->machine->call_via[regnum];
+}
+
+/* Helper function for aarch64_sls_emit_blr_function_thunks and
+ aarch64_sls_emit_shared_blr_thunks below. */
+static void
+aarch64_sls_emit_function_stub (FILE *out_file, int regnum)
+{
+ /* Save in x16 and branch to that function so this transformation does
+ not prevent jumping to `BTI c` instructions. */
+ asm_fprintf (out_file, "\tmov\tx16, x%d\n", regnum);
+ asm_fprintf (out_file, "\tbr\tx16\n");
+}
+
+/* Emit all BLR stubs for this particular function.
+ Here we emit all the BLR stubs needed for the current function. Since we
+ emit these stubs in a consecutive block we know there will be no speculation
+ gadgets between each stub, and hence we only emit a speculation barrier at
+ the end of the stub sequences.
+
+ This is called in the TARGET_ASM_FUNCTION_EPILOGUE hook. */
+void
+aarch64_sls_emit_blr_function_thunks (FILE *out_file)
+{
+ if (! aarch64_harden_sls_blr_p ())
+ return;
+
+ bool any_functions_emitted = false;
+ /* We must save and restore the current function section since this assembly
+ is emitted at the end of the function. This means it can be emitted *just
+ after* the cold section of a function. That cold part would be emitted in
+ a different section. That switch would trigger a `.cfi_endproc` directive
+ to be emitted in the original section and a `.cfi_startproc` directive to
+ be emitted in the new section. Switching to the original section without
+ restoring would mean that the `.cfi_endproc` emitted as a function ends
+ would happen in a different section -- leaving an unmatched
+ `.cfi_startproc` in the cold text section and an unmatched `.cfi_endproc`
+ in the standard text section. */
+ section *save_text_section = in_section;
+ switch_to_section (function_section (current_function_decl));
+ for (int regnum = 0; regnum < 30; ++regnum)
+ {
+ rtx specu_label = cfun->machine->call_via[regnum];
+ if (specu_label == NULL)
+ continue;
+
+ targetm.asm_out.print_operand (out_file, specu_label, 0);
+ asm_fprintf (out_file, ":\n");
+ aarch64_sls_emit_function_stub (out_file, regnum);
+ any_functions_emitted = true;
+ }
+ if (any_functions_emitted)
+ /* Can use the SB if needs be here, since this stub will only be used
+ by the current function, and hence for the current target. */
+ asm_fprintf (out_file, "\t%s\n", aarch64_sls_barrier (true));
+ switch_to_section (save_text_section);
+}
+
+/* Emit shared BLR stubs for the current compilation unit.
+ Over the course of compiling this unit we may have converted some BLR
+ instructions to a BL to a shared stub function. This is where we emit those
+ stub functions.
+ This function is for the stubs shared between different functions in this
+ compilation unit. We share when optimizing for size instead of speed.
+
+ This function is called through the TARGET_ASM_FILE_END hook. */
+void
+aarch64_sls_emit_shared_blr_thunks (FILE *out_file)
+{
+ if (! aarch64_sls_shared_thunks_needed)
+ return;
+
+ for (int regnum = 0; regnum < 30; ++regnum)
+ {
+ tree decl = aarch64_sls_shared_thunks[regnum];
+ if (!decl)
+ continue;
+
+ const char *name = indirect_symbol_names[regnum];
+ switch_to_section (get_named_section (decl, NULL, 0));
+ ASM_OUTPUT_ALIGN (out_file, 2);
+ targetm.asm_out.globalize_label (out_file, name);
+ /* Only emits if the compiler is configured for an assembler that can
+ handle visibility directives. */
+ targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
+ ASM_OUTPUT_TYPE_DIRECTIVE (out_file, name, "function");
+ ASM_OUTPUT_LABEL (out_file, name);
+ aarch64_sls_emit_function_stub (out_file, regnum);
+ /* Use the most conservative target to ensure it can always be used by any
+ function in the translation unit. */
+ asm_fprintf (out_file, "\tdsb\tsy\n\tisb\n");
+ ASM_DECLARE_FUNCTION_SIZE (out_file, name, decl);
+ }
+}
+
+/* Implement TARGET_ASM_FILE_END. */
+void
+aarch64_asm_file_end ()
+{
+ aarch64_sls_emit_shared_blr_thunks (asm_out_file);
+ /* Since this function will be called for the ASM_FILE_END hook, we ensure
+ that what would be called otherwise (e.g. `file_end_indicate_exec_stack`
+ for FreeBSD) still gets called. */
+#ifdef TARGET_ASM_FILE_END
+ TARGET_ASM_FILE_END ();
+#endif
+}
+
+const char *
+aarch64_indirect_call_asm (rtx addr)
+{
+ gcc_assert (REG_P (addr));
+ if (aarch64_harden_sls_blr_p ())
+ {
+ rtx stub_label = aarch64_sls_create_blr_label (REGNO (addr));
+ output_asm_insn ("bl\t%0", &stub_label);
+ }
+ else
+ output_asm_insn ("blr\t%0", &addr);
+ return "";
+}
+
/* Target-specific selftests. */
#if CHECKING_P
#undef TARGET_MD_ASM_ADJUST
#define TARGET_MD_ASM_ADJUST arm_md_asm_adjust
+#undef TARGET_ASM_FILE_END
+#define TARGET_ASM_FILE_END aarch64_asm_file_end
+
+#undef TARGET_ASM_FUNCTION_EPILOGUE
+#define TARGET_ASM_FUNCTION_EPILOGUE aarch64_sls_emit_blr_function_thunks
+
struct gcc_target targetm = TARGET_INITIALIZER;
#include "gt-aarch64.h"