return new pass_stv (ctxt);
}
-/* Inserting ENDBRANCH instructions. */
+/* Inserting ENDBR and pseudo patchable-area instructions. */
-static unsigned int
-rest_of_insert_endbranch (void)
+static void
+rest_of_insert_endbr_and_patchable_area (bool need_endbr,
+ unsigned int patchable_area_size)
{
- timevar_push (TV_MACH_DEP);
-
- rtx cet_eb;
+ rtx endbr;
rtx_insn *insn;
+ rtx_insn *endbr_insn = NULL;
basic_block bb;
- /* Currently emit EB if it's a tracking function, i.e. 'nocf_check' is
- absent among function attributes. Later an optimization will be
- introduced to make analysis if an address of a static function is
- taken. A static function whose address is not taken will get a
- nocf_check attribute. This will allow to reduce the number of EB. */
-
- if (!lookup_attribute ("nocf_check",
- TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
- && (!flag_manual_endbr
- || lookup_attribute ("cf_check",
- DECL_ATTRIBUTES (cfun->decl)))
- && (!cgraph_node::get (cfun->decl)->only_called_directly_p ()
- || ix86_cmodel == CM_LARGE
- || ix86_cmodel == CM_LARGE_PIC
- || flag_force_indirect_call
- || (TARGET_DLLIMPORT_DECL_ATTRIBUTES
- && DECL_DLLIMPORT_P (cfun->decl))))
- {
- /* Queue ENDBR insertion to x86_function_profiler. */
+ if (need_endbr)
+ {
+ /* Currently emit EB if it's a tracking function, i.e. 'nocf_check'
+ is absent among function attributes. Later an optimization will
+ be introduced to make analysis if an address of a static function
+ is taken. A static function whose address is not taken will get
+ a nocf_check attribute. This will allow to reduce the number of
+ EB. */
+ if (!lookup_attribute ("nocf_check",
+ TYPE_ATTRIBUTES (TREE_TYPE (cfun->decl)))
+ && (!flag_manual_endbr
+ || lookup_attribute ("cf_check",
+ DECL_ATTRIBUTES (cfun->decl)))
+ && (!cgraph_node::get (cfun->decl)->only_called_directly_p ()
+ || ix86_cmodel == CM_LARGE
+ || ix86_cmodel == CM_LARGE_PIC
+ || flag_force_indirect_call
+ || (TARGET_DLLIMPORT_DECL_ATTRIBUTES
+ && DECL_DLLIMPORT_P (cfun->decl))))
+ {
+ if (crtl->profile && flag_fentry)
+ {
+ /* Queue ENDBR insertion to x86_function_profiler.
+ NB: Any patchable-area insn will be inserted after
+ ENDBR. */
+ cfun->machine->insn_queued_at_entrance = TYPE_ENDBR;
+ }
+ else
+ {
+ endbr = gen_nop_endbr ();
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
+ rtx_insn *insn = BB_HEAD (bb);
+ endbr_insn = emit_insn_before (endbr, insn);
+ }
+ }
+ }
+
+ if (patchable_area_size)
+ {
if (crtl->profile && flag_fentry)
- cfun->machine->endbr_queued_at_entrance = true;
+ {
+ /* Queue patchable-area insertion to x86_function_profiler.
+ NB: If there is a queued ENDBR, x86_function_profiler
+ will also handle patchable-area. */
+ if (!cfun->machine->insn_queued_at_entrance)
+ cfun->machine->insn_queued_at_entrance = TYPE_PATCHABLE_AREA;
+ }
else
{
- cet_eb = gen_nop_endbr ();
-
- bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
- insn = BB_HEAD (bb);
- emit_insn_before (cet_eb, insn);
+ rtx patchable_area
+ = gen_patchable_area (GEN_INT (patchable_area_size),
+ GEN_INT (crtl->patch_area_entry == 0));
+ if (endbr_insn)
+ emit_insn_after (patchable_area, endbr_insn);
+ else
+ {
+ bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
+ insn = BB_HEAD (bb);
+ emit_insn_before (patchable_area, insn);
+ }
}
}
+ if (!need_endbr)
+ return;
+
bb = 0;
FOR_EACH_BB_FN (bb, cfun)
{
{
if (CALL_P (insn))
{
- bool need_endbr;
need_endbr = find_reg_note (insn, REG_SETJMP, NULL) != NULL;
if (!need_endbr && !SIBLING_CALL_P (insn))
{
/* Generate ENDBRANCH after CALL, which can return more than
twice, setjmp-like functions. */
- cet_eb = gen_nop_endbr ();
- emit_insn_after_setloc (cet_eb, insn, INSN_LOCATION (insn));
+ endbr = gen_nop_endbr ();
+ emit_insn_after_setloc (endbr, insn, INSN_LOCATION (insn));
continue;
}
dest_blk = e->dest;
insn = BB_HEAD (dest_blk);
gcc_assert (LABEL_P (insn));
- cet_eb = gen_nop_endbr ();
- emit_insn_after (cet_eb, insn);
+ endbr = gen_nop_endbr ();
+ emit_insn_after (endbr, insn);
}
continue;
}
if (LABEL_P (insn) && LABEL_PRESERVE_P (insn))
{
- cet_eb = gen_nop_endbr ();
- emit_insn_after (cet_eb, insn);
+ endbr = gen_nop_endbr ();
+ emit_insn_after (endbr, insn);
continue;
}
}
}
- timevar_pop (TV_MACH_DEP);
- return 0;
+ return;
}
namespace {
-const pass_data pass_data_insert_endbranch =
+const pass_data pass_data_insert_endbr_and_patchable_area =
{
RTL_PASS, /* type. */
- "cet", /* name. */
+ "endbr_and_patchable_area", /* name. */
OPTGROUP_NONE, /* optinfo_flags. */
TV_MACH_DEP, /* tv_id. */
0, /* properties_required. */
0, /* todo_flags_finish. */
};
-class pass_insert_endbranch : public rtl_opt_pass
+class pass_insert_endbr_and_patchable_area : public rtl_opt_pass
{
public:
- pass_insert_endbranch (gcc::context *ctxt)
- : rtl_opt_pass (pass_data_insert_endbranch, ctxt)
+ pass_insert_endbr_and_patchable_area (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_insert_endbr_and_patchable_area, ctxt)
{}
/* opt_pass methods: */
virtual bool gate (function *)
{
- return ((flag_cf_protection & CF_BRANCH));
+ need_endbr = (flag_cf_protection & CF_BRANCH) != 0;
+ patchable_area_size = crtl->patch_area_size - crtl->patch_area_entry;
+ return need_endbr || patchable_area_size;
}
virtual unsigned int execute (function *)
{
- return rest_of_insert_endbranch ();
+ timevar_push (TV_MACH_DEP);
+ rest_of_insert_endbr_and_patchable_area (need_endbr,
+ patchable_area_size);
+ timevar_pop (TV_MACH_DEP);
+ return 0;
}
-}; // class pass_insert_endbranch
+private:
+ bool need_endbr;
+ unsigned int patchable_area_size;
+}; // class pass_insert_endbr_and_patchable_area
} // anon namespace
rtl_opt_pass *
-make_pass_insert_endbranch (gcc::context *ctxt)
+make_pass_insert_endbr_and_patchable_area (gcc::context *ctxt)
{
- return new pass_insert_endbranch (ctxt);
+ return new pass_insert_endbr_and_patchable_area (ctxt);
}
/* At entry of the nearest common dominator for basic blocks with
{
bool is_ms_hook = ix86_function_ms_hook_prologue (decl);
+ if (cfun)
+ cfun->machine->function_label_emitted = true;
+
if (is_ms_hook)
{
int i, filler_count = (TARGET_64BIT ? 32 : 16);
}
}
+/* Implement TARGET_ASM_PRINT_PATCHABLE_FUNCTION_ENTRY. */
+
+void
+ix86_print_patchable_function_entry (FILE *file,
+ unsigned HOST_WIDE_INT patch_area_size,
+ bool record_p)
+{
+ if (cfun->machine->function_label_emitted)
+ {
+ /* NB: When ix86_print_patchable_function_entry is called after
+ function table has been emitted, we have inserted or queued
+ a pseudo UNSPECV_PATCHABLE_AREA instruction at the proper
+ place. There is nothing to do here. */
+ return;
+ }
+
+ default_print_patchable_function_entry (file, patch_area_size,
+ record_p);
+}
+
+/* Output patchable area. NB: default_print_patchable_function_entry
+ isn't available in i386.md. */
+
+void
+ix86_output_patchable_area (unsigned int patch_area_size,
+ bool record_p)
+{
+ default_print_patchable_function_entry (asm_out_file,
+ patch_area_size,
+ record_p);
+}
+
/* Return a scratch register to use in the split stack prologue. The
split stack prologue is used for -fsplit-stack. It is the first
instructions in the function, even before the regular prologue.
void
x86_function_profiler (FILE *file, int labelno ATTRIBUTE_UNUSED)
{
- if (cfun->machine->endbr_queued_at_entrance)
- fprintf (file, "\t%s\n", TARGET_64BIT ? "endbr64" : "endbr32");
+ if (cfun->machine->insn_queued_at_entrance)
+ {
+ if (cfun->machine->insn_queued_at_entrance == TYPE_ENDBR)
+ fprintf (file, "\t%s\n", TARGET_64BIT ? "endbr64" : "endbr32");
+ unsigned int patch_area_size
+ = crtl->patch_area_size - crtl->patch_area_entry;
+ if (patch_area_size)
+ ix86_output_patchable_area (patch_area_size,
+ crtl->patch_area_entry == 0);
+ }
const char *mcount_name = MCOUNT_NAME;
#undef TARGET_ASM_FUNCTION_EPILOGUE
#define TARGET_ASM_FUNCTION_EPILOGUE ix86_output_function_epilogue
+#undef TARGET_ASM_PRINT_PATCHABLE_FUNCTION_ENTRY
+#define TARGET_ASM_PRINT_PATCHABLE_FUNCTION_ENTRY \
+ ix86_print_patchable_function_entry
+
#undef TARGET_ENCODE_SECTION_INFO
#ifndef SUBTARGET_ENCODE_SECTION_INFO
#define TARGET_ENCODE_SECTION_INFO ix86_encode_section_info