static tree handle_optimize_attribute (tree *, tree, tree, int, bool *);
static tree ignore_attribute (tree *, tree, tree, int, bool *);
static tree handle_no_split_stack_attribute (tree *, tree, tree, int, bool *);
+static tree handle_zero_call_used_regs_attribute (tree *, tree, tree, int,
+ bool *);
static tree handle_argspec_attribute (tree *, tree, tree, int, bool *);
static tree handle_fnspec_attribute (tree *, tree, tree, int, bool *);
static tree handle_warn_unused_attribute (tree *, tree, tree, int, bool *);
ignore_attribute, NULL },
{ "no_split_stack", 0, 0, true, false, false, false,
handle_no_split_stack_attribute, NULL },
+ { "zero_call_used_regs", 1, 1, true, false, false, false,
+ handle_zero_call_used_regs_attribute, NULL },
/* For internal use only (marking of function arguments).
The name contains a space to prevent its usage in source code. */
{ "arg spec", 1, -1, true, false, false, false,
return NULL_TREE;
}
+/* Handle a "zero_call_used_regs" attribute; arguments as in
+ struct attribute_spec.handler. */
+
+static tree
+handle_zero_call_used_regs_attribute (tree *node, tree name, tree args,
+ int ARG_UNUSED (flags),
+ bool *no_add_attrs)
+{
+ tree decl = *node;
+ tree id = TREE_VALUE (args);
+
+ if (TREE_CODE (decl) != FUNCTION_DECL)
+ {
+ error_at (DECL_SOURCE_LOCATION (decl),
+ "%qE attribute applies only to functions", name);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ if (TREE_CODE (id) != STRING_CST)
+ {
+ error_at (DECL_SOURCE_LOCATION (decl),
+ "%qE argument not a string", name);
+ *no_add_attrs = true;
+ return NULL_TREE;
+ }
+
+ bool found = false;
+ for (unsigned int i = 0; zero_call_used_regs_opts[i].name != NULL; ++i)
+ if (strcmp (TREE_STRING_POINTER (id),
+ zero_call_used_regs_opts[i].name) == 0)
+ {
+ found = true;
+ break;
+ }
+
+ if (!found)
+ {
+ error_at (DECL_SOURCE_LOCATION (decl),
+ "unrecognized %qE attribute argument %qs",
+ name, TREE_STRING_POINTER (id));
+ *no_add_attrs = true;
+ }
+
+ return NULL_TREE;
+}
+
/* Handle a "returns_nonnull" attribute; arguments as in
struct attribute_spec.handler. */
Variable
bool dump_base_name_prefixed = false
+; What subset of registers should be zeroed on function return
+Variable
+unsigned int flag_zero_call_used_regs
+
###
Driver
Common Report Var(flag_zero_initialized_in_bss) Init(1)
Put zero initialized data in the bss section.
+fzero-call-used-regs=
+Common Report RejectNegative Joined
+Clear call-used registers upon function return.
+
g
Common Driver RejectNegative JoinedOrMissing
Generate debug information in default format.
return false;
}
+/* Check whether the register REGNO should be zeroed on X86.
+ When ALL_SSE_ZEROED is true, all SSE registers have been zeroed
+ together, no need to zero it again.
+ When NEED_ZERO_MMX is true, MMX registers should be cleared. */
+
+static bool
+zero_call_used_regno_p (const unsigned int regno,
+ bool all_sse_zeroed,
+ bool need_zero_mmx)
+{
+ return GENERAL_REGNO_P (regno)
+ || (!all_sse_zeroed && SSE_REGNO_P (regno))
+ || MASK_REGNO_P (regno)
+ || (need_zero_mmx && MMX_REGNO_P (regno));
+}
+
+/* Return the machine_mode that is used to zero register REGNO. */
+
+static machine_mode
+zero_call_used_regno_mode (const unsigned int regno)
+{
+ /* NB: We only need to zero the lower 32 bits for integer registers
+ and the lower 128 bits for vector registers since destination are
+ zero-extended to the full register width. */
+ if (GENERAL_REGNO_P (regno))
+ return SImode;
+ else if (SSE_REGNO_P (regno))
+ return V4SFmode;
+ else if (MASK_REGNO_P (regno))
+ return HImode;
+ else if (MMX_REGNO_P (regno))
+ return V4HImode;
+ else
+ gcc_unreachable ();
+}
+
+/* Generate a rtx to zero all vector registers together if possible,
+ otherwise, return NULL. */
+
+static rtx
+zero_all_vector_registers (HARD_REG_SET need_zeroed_hardregs)
+{
+ if (!TARGET_AVX)
+ return NULL;
+
+ for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((IN_RANGE (regno, FIRST_SSE_REG, LAST_SSE_REG)
+ || (TARGET_64BIT
+ && (REX_SSE_REGNO_P (regno)
+ || (TARGET_AVX512F && EXT_REX_SSE_REGNO_P (regno)))))
+ && !TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
+ return NULL;
+
+ return gen_avx_vzeroall ();
+}
+
+/* Generate insns to zero all st registers together.
+ Return true when zeroing instructions are generated.
+ Assume the number of st registers that are zeroed is num_of_st,
+ we will emit the following sequence to zero them together:
+ fldz; \
+ fldz; \
+ ...
+ fldz; \
+ fstp %%st(0); \
+ fstp %%st(0); \
+ ...
+ fstp %%st(0);
+ i.e., num_of_st fldz followed by num_of_st fstp to clear the stack
+ mark stack slots empty.
+
+ How to compute the num_of_st:
+ There is no direct mapping from stack registers to hard register
+ numbers. If one stack register needs to be cleared, we don't know
+ where in the stack the value remains. So, if any stack register
+ needs to be cleared, the whole stack should be cleared. However,
+ x87 stack registers that hold the return value should be excluded.
+ x87 returns in the top (two for complex values) register, so
+ num_of_st should be 7/6 when x87 returns, otherwise it will be 8. */
+
+
+static bool
+zero_all_st_registers (HARD_REG_SET need_zeroed_hardregs)
+{
+ unsigned int num_of_st = 0;
+ for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if ((STACK_REGNO_P (regno) || MMX_REGNO_P (regno))
+ && TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
+ {
+ num_of_st++;
+ break;
+ }
+
+ if (num_of_st == 0)
+ return false;
+
+ bool return_with_x87 = false;
+ return_with_x87 = (crtl->return_rtx
+ && (STACK_REG_P (crtl->return_rtx)));
+
+ bool complex_return = false;
+ complex_return = (crtl->return_rtx
+ && COMPLEX_MODE_P (GET_MODE (crtl->return_rtx)));
+
+ if (return_with_x87)
+ if (complex_return)
+ num_of_st = 6;
+ else
+ num_of_st = 7;
+ else
+ num_of_st = 8;
+
+ rtx st_reg = gen_rtx_REG (XFmode, FIRST_STACK_REG);
+ for (unsigned int i = 0; i < num_of_st; i++)
+ emit_insn (gen_rtx_SET (st_reg, CONST0_RTX (XFmode)));
+
+ for (unsigned int i = 0; i < num_of_st; i++)
+ {
+ rtx insn;
+ insn = emit_insn (gen_rtx_SET (st_reg, st_reg));
+ add_reg_note (insn, REG_DEAD, st_reg);
+ }
+ return true;
+}
+
+
+/* When the routine exit in MMX mode, if any ST register needs
+ to be zeroed, we should clear all MMX registers except the
+ RET_MMX_REGNO that holds the return value. */
+static bool
+zero_all_mm_registers (HARD_REG_SET need_zeroed_hardregs,
+ unsigned int ret_mmx_regno)
+{
+ bool need_zero_all_mm = false;
+ for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (STACK_REGNO_P (regno)
+ && TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
+ {
+ need_zero_all_mm = true;
+ break;
+ }
+
+ if (!need_zero_all_mm)
+ return false;
+
+ rtx zero_mmx = NULL_RTX;
+ machine_mode mode = V4HImode;
+ for (unsigned int regno = FIRST_MMX_REG; regno <= LAST_MMX_REG; regno++)
+ if (regno != ret_mmx_regno)
+ {
+ rtx reg = gen_rtx_REG (mode, regno);
+ if (zero_mmx == NULL_RTX)
+ {
+ zero_mmx = reg;
+ emit_insn (gen_rtx_SET (reg, CONST0_RTX (mode)));
+ }
+ else
+ emit_move_insn (reg, zero_mmx);
+ }
+ return true;
+}
+
+/* TARGET_ZERO_CALL_USED_REGS. */
+/* Generate a sequence of instructions that zero registers specified by
+ NEED_ZEROED_HARDREGS. Return the ZEROED_HARDREGS that are actually
+ zeroed. */
+static HARD_REG_SET
+ix86_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
+{
+ HARD_REG_SET zeroed_hardregs;
+ bool all_sse_zeroed = false;
+ bool all_st_zeroed = false;
+ bool all_mm_zeroed = false;
+
+ CLEAR_HARD_REG_SET (zeroed_hardregs);
+
+ /* first, let's see whether we can zero all vector registers together. */
+ rtx zero_all_vec_insn = zero_all_vector_registers (need_zeroed_hardregs);
+ if (zero_all_vec_insn)
+ {
+ emit_insn (zero_all_vec_insn);
+ all_sse_zeroed = true;
+ }
+
+ /* mm/st registers are shared registers set, we should follow the following
+ rules to clear them:
+ MMX exit mode x87 exit mode
+ -------------|----------------------|---------------
+ uses x87 reg | clear all MMX | clear all x87
+ uses MMX reg | clear individual MMX | clear all x87
+ x87 + MMX | clear all MMX | clear all x87
+
+ first, we should decide which mode (MMX mode or x87 mode) the function
+ exit with. */
+
+ bool exit_with_mmx_mode = (crtl->return_rtx
+ && (MMX_REG_P (crtl->return_rtx)));
+
+ if (!exit_with_mmx_mode)
+ /* x87 exit mode, we should zero all st registers together. */
+ {
+ all_st_zeroed = zero_all_st_registers (need_zeroed_hardregs);
+ if (all_st_zeroed)
+ SET_HARD_REG_BIT (zeroed_hardregs, FIRST_STACK_REG);
+ }
+ else
+ /* MMX exit mode, check whether we can zero all mm registers. */
+ {
+ unsigned int exit_mmx_regno = REGNO (crtl->return_rtx);
+ all_mm_zeroed = zero_all_mm_registers (need_zeroed_hardregs,
+ exit_mmx_regno);
+ if (all_mm_zeroed)
+ for (unsigned int regno = FIRST_MMX_REG; regno <= LAST_MMX_REG; regno++)
+ if (regno != exit_mmx_regno)
+ SET_HARD_REG_BIT (zeroed_hardregs, regno);
+ }
+
+ /* Now, generate instructions to zero all the other registers. */
+
+ rtx zero_gpr = NULL_RTX;
+ rtx zero_vector = NULL_RTX;
+ rtx zero_mask = NULL_RTX;
+ rtx zero_mmx = NULL_RTX;
+
+ for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ if (!TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
+ continue;
+ if (!zero_call_used_regno_p (regno, all_sse_zeroed,
+ exit_with_mmx_mode && !all_mm_zeroed))
+ continue;
+
+ SET_HARD_REG_BIT (zeroed_hardregs, regno);
+
+ rtx reg, tmp, zero_rtx;
+ machine_mode mode = zero_call_used_regno_mode (regno);
+
+ reg = gen_rtx_REG (mode, regno);
+ zero_rtx = CONST0_RTX (mode);
+
+ if (mode == SImode)
+ if (zero_gpr == NULL_RTX)
+ {
+ zero_gpr = reg;
+ tmp = gen_rtx_SET (reg, zero_rtx);
+ if (!TARGET_USE_MOV0 || optimize_insn_for_size_p ())
+ {
+ rtx clob = gen_rtx_CLOBBER (VOIDmode,
+ gen_rtx_REG (CCmode,
+ FLAGS_REG));
+ tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2,
+ tmp,
+ clob));
+ }
+ emit_insn (tmp);
+ }
+ else
+ emit_move_insn (reg, zero_gpr);
+ else if (mode == V4SFmode)
+ if (zero_vector == NULL_RTX)
+ {
+ zero_vector = reg;
+ tmp = gen_rtx_SET (reg, zero_rtx);
+ emit_insn (tmp);
+ }
+ else
+ emit_move_insn (reg, zero_vector);
+ else if (mode == HImode)
+ if (zero_mask == NULL_RTX)
+ {
+ zero_mask = reg;
+ tmp = gen_rtx_SET (reg, zero_rtx);
+ emit_insn (tmp);
+ }
+ else
+ emit_move_insn (reg, zero_mask);
+ else if (mode == V4HImode)
+ if (zero_mmx == NULL_RTX)
+ {
+ zero_mmx = reg;
+ tmp = gen_rtx_SET (reg, zero_rtx);
+ emit_insn (tmp);
+ }
+ else
+ emit_move_insn (reg, zero_mmx);
+ else
+ gcc_unreachable ();
+ }
+ return zeroed_hardregs;
+}
+
/* Define how to find the value returned by a function.
VALTYPE is the data type of the value (as a tree).
If the precise function being called is known, FUNC is its FUNCTION_DECL;
#undef TARGET_FUNCTION_VALUE_REGNO_P
#define TARGET_FUNCTION_VALUE_REGNO_P ix86_function_value_regno_p
+#undef TARGET_ZERO_CALL_USED_REGS
+#define TARGET_ZERO_CALL_USED_REGS ix86_zero_call_used_regs
+
#undef TARGET_PROMOTE_FUNCTION_MODE
#define TARGET_PROMOTE_FUNCTION_MODE ix86_promote_function_mode
}
+/* Return true if REGNO is used by the epilogue. */
+bool
+df_epilogue_uses_p (unsigned int regno)
+{
+ return (EPILOGUE_USES (regno)
+ || TEST_HARD_REG_BIT (crtl->must_be_zero_on_return, regno));
+}
+
/* Set the bit for regs that are considered being used at the exit. */
static void
epilogue as being live at the end of the function since they
may be referenced by our caller. */
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (global_regs[i] || EPILOGUE_USES (i))
+ if (global_regs[i] || df_epilogue_uses_p (i))
bitmap_set_bit (exit_block_uses, i);
if (targetm.have_epilogue () && epilogue_completed)
initialized = true;
}
-
/* Recompute the parts of scanning that are based on regs_ever_live
because something changed in that array. */
return regs_ever_live[regno];
}
-
/* Set regs_ever_live[REGNO] to VALUE. If this cause regs_ever_live
to change, schedule that change for the next update. */
extern bool df_hard_reg_used_p (unsigned int);
extern unsigned int df_hard_reg_used_count (unsigned int);
extern bool df_regs_ever_live_p (unsigned int);
+extern bool df_epilogue_uses_p (unsigned int);
extern void df_set_regs_ever_live (unsigned int, bool);
extern void df_compute_regs_ever_live (bool);
extern void df_scan_verify (void);
A declaration to which @code{weakref} is attached and that is associated
with a named @code{target} must be @code{static}.
+@item zero_call_used_regs ("@var{choice}")
+@cindex @code{zero_call_used_regs} function attribute
+
+The @code{zero_call_used_regs} attribute causes the compiler to zero
+a subset of all call-used registers@footnote{A ``call-used'' register
+is a register whose contents can be changed by a function call;
+therefore, a caller cannot assume that the register has the same contents
+on return from the function as it had before calling the function. Such
+registers are also called ``call-clobbered'', ``caller-saved'', or
+``volatile''.} at function return.
+This is used to increase program security by either mitigating
+Return-Oriented Programming (ROP) attacks or preventing information leakage
+through registers.
+
+In order to satisfy users with different security needs and control the
+run-time overhead at the same time, the @var{choice} parameter provides a
+flexible way to choose the subset of the call-used registers to be zeroed.
+The three basic values of @var{choice} are:
+
+@itemize @bullet
+@item
+@samp{skip} doesn't zero any call-used registers.
+
+@item
+@samp{used} only zeros call-used registers that are used in the function.
+A ``used'' register is one whose content has been set or referenced in
+the function.
+
+@item
+@samp{all} zeros all call-used registers.
+@end itemize
+
+In addition to these three basic choices, it is possible to modify
+@samp{used} or @samp{all} as follows:
+
+@itemize @bullet
+@item
+Adding @samp{-gpr} restricts the zeroing to general-purpose registers.
+
+@item
+Adding @samp{-arg} restricts the zeroing to registers that can sometimes
+be used to pass function arguments. This includes all argument registers
+defined by the platform's calling conversion, regardless of whether the
+function uses those registers for function arguments or not.
+@end itemize
+
+The modifiers can be used individually or together. If they are used
+together, they must appear in the order above.
+
+The full list of @var{choice}s is therefore:
+
+@table @code
+@item skip
+doesn't zero any call-used register.
+
+@item used
+only zeros call-used registers that are used in the function.
+
+@item used-gpr
+only zeros call-used general purpose registers that are used in the function.
+
+@item used-arg
+only zeros call-used registers that are used in the function and pass arguments.
+
+@item used-gpr-arg
+only zeros call-used general purpose registers that are used in the function
+and pass arguments.
+
+@item all
+zeros all call-used registers.
+
+@item all-gpr
+zeros all call-used general purpose registers.
+
+@item all-arg
+zeros all call-used registers that pass arguments.
+
+@item all-gpr-arg
+zeros all call-used general purpose registers that pass
+arguments.
+@end table
+
+Of this list, @samp{used-arg}, @samp{used-gpr-arg}, @samp{all-arg},
+and @samp{all-gpr-arg} are mainly used for ROP mitigation.
+
+The default for the attribute is controlled by @option{-fzero-call-used-regs}.
@end table
@c This is the end of the target-independent attribute table
-funit-at-a-time -funroll-all-loops -funroll-loops @gol
-funsafe-math-optimizations -funswitch-loops @gol
-fipa-ra -fvariable-expansion-in-unroller -fvect-cost-model -fvpt @gol
--fweb -fwhole-program -fwpa -fuse-linker-plugin @gol
+-fweb -fwhole-program -fwpa -fuse-linker-plugin -fzero-call-used-regs @gol
--param @var{name}=@var{value}
-O -O0 -O1 -O2 -O3 -Os -Ofast -Og}
Not all targets support this option.
+@item -fzero-call-used-regs=@var{choice}
+@opindex fzero-call-used-regs
+Zero call-used registers at function return to increase program
+security by either mitigating Return-Oriented Programming (ROP)
+attacks or preventing information leakage through registers.
+
+The possible values of @var{choice} are the same as for the
+@code{zero_call_used_regs} attribute (@pxref{Function Attributes}).
+The default is @samp{skip}.
+
+You can control this behavior for a specific function by using the function
+attribute @code{zero_call_used_regs} (@pxref{Function Attributes}).
+
@item --param @var{name}=@var{value}
@opindex param
In some places, GCC uses various constants to control the amount of
is needed.
@end deftypefn
+@deftypefn {Target Hook} HARD_REG_SET TARGET_ZERO_CALL_USED_REGS (HARD_REG_SET @var{selected_regs})
+This target hook emits instructions to zero the subset of @var{selected_regs}
+that could conceivably contain values that are useful to an attacker.
+Return the set of registers that were actually cleared.
+
+The default implementation uses normal move instructions to zero
+all the registers in @var{selected_regs}. Define this hook if the
+target has more efficient ways of zeroing certain registers,
+or if you believe that certain registers would never contain
+values that are useful to an attacker.
+@end deftypefn
+
@deftypefn {Target Hook} bool TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS (void)
When optimization is disabled, this hook indicates whether or not
arguments should be allocated to stack slots. Normally, GCC allocates
@hook TARGET_GET_DRAP_RTX
+@hook TARGET_ZERO_CALL_USED_REGS
+
@hook TARGET_ALLOCATE_STACK_SLOTS_FOR_ARGS
@hook TARGET_CONST_ANCHOR
sets them. */
HARD_REG_SET asm_clobbers;
+ /* All hard registers that need to be zeroed at the return of the routine. */
+ HARD_REG_SET must_be_zero_on_return;
+
/* The highest address seen during shorten_branches. */
int max_insn_address;
};
#ifndef GCC_FLAG_TYPES_H
#define GCC_FLAG_TYPES_H
+#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+
enum debug_info_type
{
NO_DEBUG, /* Write no debug info. */
| SANITIZE_BOUNDS_STRICT
};
+/* Different settings for zeroing subset of registers. */
+namespace zero_regs_flags {
+ const unsigned int UNSET = 0;
+ const unsigned int SKIP = 1UL << 0;
+ const unsigned int ONLY_USED = 1UL << 1;
+ const unsigned int ONLY_GPR = 1UL << 2;
+ const unsigned int ONLY_ARG = 1UL << 3;
+ const unsigned int ENABLED = 1UL << 4;
+ const unsigned int USED_GPR_ARG = ENABLED | ONLY_USED | ONLY_GPR | ONLY_ARG;
+ const unsigned int USED_GPR = ENABLED | ONLY_USED | ONLY_GPR;
+ const unsigned int USED_ARG = ENABLED | ONLY_USED | ONLY_ARG;
+ const unsigned int USED = ENABLED | ONLY_USED;
+ const unsigned int ALL_GPR_ARG = ENABLED | ONLY_GPR | ONLY_ARG;
+ const unsigned int ALL_GPR = ENABLED | ONLY_GPR;
+ const unsigned int ALL_ARG = ENABLED | ONLY_ARG;
+ const unsigned int ALL = ENABLED;
+}
+
/* Settings of flag_incremental_link. */
enum incremental_link {
INCREMENTAL_LINK_NONE,
EVRP_MODE_RVRP_DEBUG = EVRP_MODE_RVRP_ONLY | EVRP_MODE_DEBUG
};
+#endif
+
#endif /* ! GCC_FLAG_TYPES_H */
#include "stringpool.h"
#include "expmed.h"
#include "optabs.h"
+#include "opts.h"
#include "regs.h"
#include "emit-rtl.h"
#include "recog.h"
#include "rtl-error.h"
+#include "hard-reg-set.h"
#include "alias.h"
#include "fold-const.h"
#include "stor-layout.h"
return seq;
}
+/* Emit a sequence of insns to zero the call-used registers before RET
+ according to ZERO_REGS_TYPE. */
+
+static void
+gen_call_used_regs_seq (rtx_insn *ret, unsigned int zero_regs_type)
+{
+ bool only_gpr = true;
+ bool only_used = true;
+ bool only_arg = true;
+
+ /* No need to zero call-used-regs in main (). */
+ if (MAIN_NAME_P (DECL_NAME (current_function_decl)))
+ return;
+
+ /* No need to zero call-used-regs if __builtin_eh_return is called
+ since it isn't a normal function return. */
+ if (crtl->calls_eh_return)
+ return;
+
+ /* If only_gpr is true, only zero call-used registers that are
+ general-purpose registers; if only_used is true, only zero
+ call-used registers that are used in the current function;
+ if only_arg is true, only zero call-used registers that pass
+ parameters defined by the flatform's calling conversion. */
+
+ using namespace zero_regs_flags;
+
+ only_gpr = zero_regs_type & ONLY_GPR;
+ only_used = zero_regs_type & ONLY_USED;
+ only_arg = zero_regs_type & ONLY_ARG;
+
+ /* For each of the hard registers, we should zero it if:
+ 1. it is a call-used register;
+ and 2. it is not a fixed register;
+ and 3. it is not live at the return of the routine;
+ and 4. it is general registor if only_gpr is true;
+ and 5. it is used in the routine if only_used is true;
+ and 6. it is a register that passes parameter if only_arg is true. */
+
+ /* First, prepare the data flow information. */
+ basic_block bb = BLOCK_FOR_INSN (ret);
+ auto_bitmap live_out;
+ bitmap_copy (live_out, df_get_live_out (bb));
+ df_simulate_initialize_backwards (bb, live_out);
+ df_simulate_one_insn_backwards (bb, ret, live_out);
+
+ HARD_REG_SET selected_hardregs;
+ CLEAR_HARD_REG_SET (selected_hardregs);
+ for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ if (!crtl->abi->clobbers_full_reg_p (regno))
+ continue;
+ if (fixed_regs[regno])
+ continue;
+ if (REGNO_REG_SET_P (live_out, regno))
+ continue;
+ if (only_gpr
+ && !TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], regno))
+ continue;
+ if (only_used && !df_regs_ever_live_p (regno))
+ continue;
+ if (only_arg && !FUNCTION_ARG_REGNO_P (regno))
+ continue;
+
+ /* Now this is a register that we might want to zero. */
+ SET_HARD_REG_BIT (selected_hardregs, regno);
+ }
+
+ if (hard_reg_set_empty_p (selected_hardregs))
+ return;
+
+ /* Now that we have a hard register set that needs to be zeroed, pass it to
+ target to generate zeroing sequence. */
+ HARD_REG_SET zeroed_hardregs;
+ start_sequence ();
+ zeroed_hardregs = targetm.calls.zero_call_used_regs (selected_hardregs);
+ rtx_insn *seq = get_insns ();
+ end_sequence ();
+ if (seq)
+ {
+ /* Emit the memory blockage and register clobber asm volatile before
+ the whole sequence. */
+ start_sequence ();
+ expand_asm_reg_clobber_mem_blockage (zeroed_hardregs);
+ rtx_insn *seq_barrier = get_insns ();
+ end_sequence ();
+
+ emit_insn_before (seq_barrier, ret);
+ emit_insn_before (seq, ret);
+
+ /* Update the data flow information. */
+ crtl->must_be_zero_on_return |= zeroed_hardregs;
+ df_set_bb_dirty (EXIT_BLOCK_PTR_FOR_FN (cfun));
+ }
+}
+
+
/* Return a sequence to be used as the epilogue for the current function,
or NULL. */
{
return new pass_thread_prologue_and_epilogue (ctxt);
}
-\f
+
+namespace {
+
+const pass_data pass_data_zero_call_used_regs =
+{
+ RTL_PASS, /* type */
+ "zero_call_used_regs", /* name */
+ OPTGROUP_NONE, /* optinfo_flags */
+ TV_NONE, /* tv_id */
+ 0, /* properties_required */
+ 0, /* properties_provided */
+ 0, /* properties_destroyed */
+ 0, /* todo_flags_start */
+ 0, /* todo_flags_finish */
+};
+
+class pass_zero_call_used_regs: public rtl_opt_pass
+{
+public:
+ pass_zero_call_used_regs (gcc::context *ctxt)
+ : rtl_opt_pass (pass_data_zero_call_used_regs, ctxt)
+ {}
+
+ /* opt_pass methods: */
+ virtual unsigned int execute (function *);
+
+}; // class pass_zero_call_used_regs
+
+unsigned int
+pass_zero_call_used_regs::execute (function *fun)
+{
+ using namespace zero_regs_flags;
+ unsigned int zero_regs_type = UNSET;
+
+ tree attr_zero_regs = lookup_attribute ("zero_call_used_regs",
+ DECL_ATTRIBUTES (fun->decl));
+
+ /* Get the type of zero_call_used_regs from function attribute.
+ We have filtered out invalid attribute values already at this point. */
+ if (attr_zero_regs)
+ {
+ /* The TREE_VALUE of an attribute is a TREE_LIST whose TREE_VALUE
+ is the attribute argument's value. */
+ attr_zero_regs = TREE_VALUE (attr_zero_regs);
+ gcc_assert (TREE_CODE (attr_zero_regs) == TREE_LIST);
+ attr_zero_regs = TREE_VALUE (attr_zero_regs);
+ gcc_assert (TREE_CODE (attr_zero_regs) == STRING_CST);
+
+ for (unsigned int i = 0; zero_call_used_regs_opts[i].name != NULL; ++i)
+ if (strcmp (TREE_STRING_POINTER (attr_zero_regs),
+ zero_call_used_regs_opts[i].name) == 0)
+ {
+ zero_regs_type = zero_call_used_regs_opts[i].flag;
+ break;
+ }
+ }
+
+ if (!zero_regs_type)
+ zero_regs_type = flag_zero_call_used_regs;
+
+ /* No need to zero call-used-regs when no user request is present. */
+ if (!(zero_regs_type & ENABLED))
+ return 0;
+
+ edge_iterator ei;
+ edge e;
+
+ /* This pass needs data flow information. */
+ df_analyze ();
+
+ /* Iterate over the function's return instructions and insert any
+ register zeroing required by the -fzero-call-used-regs command-line
+ option or the "zero_call_used_regs" function attribute. */
+ FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
+ {
+ rtx_insn *insn = BB_END (e->src);
+ if (JUMP_P (insn) && ANY_RETURN_P (JUMP_LABEL (insn)))
+ gen_call_used_regs_seq (insn, zero_regs_type);
+ }
+
+ return 0;
+}
+
+} // anon namespace
+
+rtl_opt_pass *
+make_pass_zero_call_used_regs (gcc::context *ctxt)
+{
+ return new pass_zero_call_used_regs (ctxt);
+}
/* If CONSTRAINT is a matching constraint, then return its number.
Otherwise, return -1. */
expand_asm_memory_blockage ();
}
+/* Generate asm volatile("" : : : "memory") as a memory blockage, at the
+ same time clobbering the register set specified by REGS. */
+
+void
+expand_asm_reg_clobber_mem_blockage (HARD_REG_SET regs)
+{
+ rtx asm_op, clob_mem;
+
+ unsigned int num_of_regs = 0;
+ for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (regs, i))
+ num_of_regs++;
+
+ asm_op = gen_rtx_ASM_OPERANDS (VOIDmode, "", "", 0,
+ rtvec_alloc (0), rtvec_alloc (0),
+ rtvec_alloc (0), UNKNOWN_LOCATION);
+ MEM_VOLATILE_P (asm_op) = 1;
+
+ rtvec v = rtvec_alloc (num_of_regs + 2);
+
+ clob_mem = gen_rtx_SCRATCH (VOIDmode);
+ clob_mem = gen_rtx_MEM (BLKmode, clob_mem);
+ clob_mem = gen_rtx_CLOBBER (VOIDmode, clob_mem);
+
+ RTVEC_ELT (v, 0) = asm_op;
+ RTVEC_ELT (v, 1) = clob_mem;
+
+ if (num_of_regs > 0)
+ {
+ unsigned int j = 2;
+ for (unsigned int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+ if (TEST_HARD_REG_BIT (regs, i))
+ {
+ RTVEC_ELT (v, j) = gen_rtx_CLOBBER (VOIDmode, regno_reg_rtx[i]);
+ j++;
+ }
+ gcc_assert (j == (num_of_regs + 2));
+ }
+
+ emit_insn (gen_rtx_PARALLEL (VOIDmode, v));
+}
+
/* This routine will either emit the mem_thread_fence pattern or issue a
sync_synchronize to generate a fence for memory model MEMMODEL. */
rtx expand_atomic_fetch_op (rtx, rtx, rtx, enum rtx_code, enum memmodel,
bool);
+extern void expand_asm_reg_clobber_mem_blockage (HARD_REG_SET);
+
extern bool insn_operand_matches (enum insn_code icode, unsigned int opno,
rtx operand);
extern bool valid_multiword_target_p (rtx);
{ NULL, 0U, 0UL, false }
};
+/* -fzero-call-used-regs= suboptions. */
+const struct zero_call_used_regs_opts_s zero_call_used_regs_opts[] =
+{
+#define ZERO_CALL_USED_REGS_OPT(name, flags) \
+ { #name, flags }
+ ZERO_CALL_USED_REGS_OPT (skip, zero_regs_flags::SKIP),
+ ZERO_CALL_USED_REGS_OPT (used-gpr-arg, zero_regs_flags::USED_GPR_ARG),
+ ZERO_CALL_USED_REGS_OPT (used-gpr, zero_regs_flags::USED_GPR),
+ ZERO_CALL_USED_REGS_OPT (used-arg, zero_regs_flags::USED_ARG),
+ ZERO_CALL_USED_REGS_OPT (used, zero_regs_flags::USED),
+ ZERO_CALL_USED_REGS_OPT (all-gpr-arg, zero_regs_flags::ALL_GPR_ARG),
+ ZERO_CALL_USED_REGS_OPT (all-gpr, zero_regs_flags::ALL_GPR),
+ ZERO_CALL_USED_REGS_OPT (all-arg, zero_regs_flags::ALL_ARG),
+ ZERO_CALL_USED_REGS_OPT (all, zero_regs_flags::ALL),
+#undef ZERO_CALL_USED_REGS_OPT
+ {NULL, 0U}
+};
+
/* A struct for describing a run of chars within a string. */
class string_fragment
return flags;
}
+/* Parse -fzero-call-used-regs suboptions from ARG, return the FLAGS. */
+
+unsigned int
+parse_zero_call_used_regs_options (const char *arg)
+{
+ unsigned int flags = 0;
+
+ /* Check to see if the string matches a sub-option name. */
+ for (unsigned int i = 0; zero_call_used_regs_opts[i].name != NULL; ++i)
+ if (strcmp (arg, zero_call_used_regs_opts[i].name) == 0)
+ {
+ flags = zero_call_used_regs_opts[i].flag;
+ break;
+ }
+
+ if (!flags)
+ error ("unrecognized argument to %<-fzero-call-used-regs=%>: %qs", arg);
+
+ return flags;
+}
+
/* Parse -falign-NAME format for a FLAG value. Return individual
parsed integer values into RESULT_VALUES array. If REPORT_ERROR is
set, print error message at LOC location. */
/* Automatically sets -ftree-loop-vectorize and
-ftree-slp-vectorize. Nothing more to do here. */
break;
+ case OPT_fzero_call_used_regs_:
+ opts->x_flag_zero_call_used_regs
+ = parse_zero_call_used_regs_options (arg);
+ break;
+
case OPT_fshow_column:
dc->show_column = value;
break;
bool can_recover;
} sanitizer_opts[];
+extern const struct zero_call_used_regs_opts_s
+{
+ const char *const name;
+ unsigned int flag;
+} zero_call_used_regs_opts[];
+
extern vec<const char *> help_option_arguments;
extern void add_misspelling_candidates (auto_vec<char *> *candidates,
POP_INSERT_PASSES ()
NEXT_PASS (pass_late_compilation);
PUSH_INSERT_PASSES_WITHIN (pass_late_compilation)
+ NEXT_PASS (pass_zero_call_used_regs);
NEXT_PASS (pass_compute_alignments);
NEXT_PASS (pass_variable_tracking);
NEXT_PASS (pass_free_cfg);
}
return ((num_changes_pending () > 0) && (apply_change_group () > 0));
}
-\f
+
+/* Check whether INSN matches a specific alternative of an .md pattern. */
+
+bool
+valid_insn_p (rtx_insn *insn)
+{
+ recog_memoized (insn);
+ if (INSN_CODE (insn) < 0)
+ return false;
+ extract_insn (insn);
+ /* We don't know whether the insn will be in code that is optimized
+ for size or speed, so consider all enabled alternatives. */
+ if (!constrain_operands (1, get_enabled_alternatives (insn)))
+ return false;
+ return true;
+}
+
/* Return 1 if OP is a valid general operand for machine mode MODE.
This is either a register reference, a memory reference,
or a constant. In the case of a memory reference, the address
extern bool validate_simplify_insn (rtx_insn *insn);
extern int num_changes_pending (void);
extern bool reg_fits_class_p (const_rtx, reg_class_t, int, machine_mode);
+extern bool valid_insn_p (rtx_insn *);
extern int offsettable_memref_p (rtx);
extern int offsettable_nonstrict_memref_p (rtx);
&end_of_function_needs, true);
for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
- if (global_regs[i] || EPILOGUE_USES (i))
+ if (global_regs[i] || df_epilogue_uses_p (i))
SET_HARD_REG_BIT (end_of_function_needs.regs, i);
/* The registers required to be live at the end of the function are
is needed.",
rtx, (void), NULL)
+/* Generate instruction sequence to zero call used registers. */
+DEFHOOK
+(zero_call_used_regs,
+ "This target hook emits instructions to zero the subset of @var{selected_regs}\n\
+that could conceivably contain values that are useful to an attacker.\n\
+Return the set of registers that were actually cleared.\n\
+\n\
+The default implementation uses normal move instructions to zero\n\
+all the registers in @var{selected_regs}. Define this hook if the\n\
+target has more efficient ways of zeroing certain registers,\n\
+or if you believe that certain registers would never contain\n\
+values that are useful to an attacker.",
+ HARD_REG_SET, (HARD_REG_SET selected_regs),
+default_zero_call_used_regs)
+
/* Return true if all function parameters should be spilled to the
stack. */
DEFHOOK
#include "tree-ssa-alias.h"
#include "gimple-expr.h"
#include "memmodel.h"
+#include "backend.h"
+#include "emit-rtl.h"
+#include "df.h"
#include "tm_p.h"
#include "stringpool.h"
#include "tree-vrp.h"
#endif
}
+/* The default hook for TARGET_ZERO_CALL_USED_REGS. */
+
+HARD_REG_SET
+default_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
+{
+ gcc_assert (!hard_reg_set_empty_p (need_zeroed_hardregs));
+
+ for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
+ {
+ rtx_insn *last_insn = get_last_insn ();
+ machine_mode mode = GET_MODE (regno_reg_rtx[regno]);
+ rtx zero = CONST0_RTX (mode);
+ rtx_insn *insn = emit_move_insn (regno_reg_rtx[regno], zero);
+ if (!valid_insn_p (insn))
+ {
+ static bool issued_error;
+ if (!issued_error)
+ {
+ issued_error = true;
+ sorry ("%qs not supported on this target",
+ "-fzero-call-used_regs");
+ }
+ delete_insns_since (last_insn);
+ }
+ }
+ return need_zeroed_hardregs;
+}
+
rtx
default_internal_arg_pointer (void)
{
const_tree);
extern bool hook_bool_const_rtx_commutative_p (const_rtx, int);
extern rtx default_function_value (const_tree, const_tree, bool);
+extern HARD_REG_SET default_zero_call_used_regs (HARD_REG_SET);
extern rtx default_libcall_value (machine_mode, const_rtx);
extern bool default_function_value_regno_p (const unsigned int);
extern rtx default_internal_arg_pointer (void);
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fzero-call-used-regs=skip" } */
+
+volatile int result = 0;
+int
+__attribute__((noipa))
+foo (int x)
+{
+ return x;
+}
+int main()
+{
+ result = foo (2);
+ return 0;
+}
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2" } */
+
+#include <assert.h>
+int result = 0;
+
+int
+__attribute__((noipa))
+__attribute__ ((zero_call_used_regs("skip")))
+foo1 (int x)
+{
+ return (x + 1);
+}
+
+int
+__attribute__((noipa))
+__attribute__ ((zero_call_used_regs("used-gpr-arg")))
+foo2 (int x)
+{
+ return (x + 2);
+}
+
+int
+__attribute__((noipa))
+__attribute__ ((zero_call_used_regs("used-gpr")))
+foo3 (int x)
+{
+ return (x + 3);
+}
+
+int
+__attribute__((noipa))
+__attribute__ ((zero_call_used_regs("used-arg")))
+foo4 (int x)
+{
+ return (x + 4);
+}
+
+int
+__attribute__((noipa))
+__attribute__ ((zero_call_used_regs("used")))
+foo5 (int x)
+{
+ return (x + 5);
+}
+
+int
+__attribute__((noipa))
+__attribute__ ((zero_call_used_regs("all-gpr-arg")))
+foo6 (int x)
+{
+ return (x + 6);
+}
+
+int
+__attribute__((noipa))
+__attribute__ ((zero_call_used_regs("all-gpr")))
+foo7 (int x)
+{
+ return (x + 7);
+}
+
+int
+__attribute__((noipa))
+__attribute__ ((zero_call_used_regs("all-arg")))
+foo8 (int x)
+{
+ return (x + 8);
+}
+
+int
+__attribute__((noipa))
+__attribute__ ((zero_call_used_regs("all")))
+foo9 (int x)
+{
+ return (x + 9);
+}
+
+int main()
+{
+ result = foo1 (1);
+ result += foo2 (1);
+ result += foo3 (1);
+ result += foo4 (1);
+ result += foo5 (1);
+ result += foo6 (1);
+ result += foo7 (1);
+ result += foo8 (1);
+ result += foo9 (1);
+ assert (result == 54);
+ return 0;
+}
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fzero-call-used-regs=all" } */
+
+#include "zero-scratch-regs-10.c"
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fzero-call-used-regs=used-gpr-arg" } */
+
+#include "zero-scratch-regs-1.c"
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fzero-call-used-regs=used-gpr" } */
+
+#include "zero-scratch-regs-1.c"
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fzero-call-used-regs=used-arg" } */
+
+#include "zero-scratch-regs-1.c"
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fzero-call-used-regs=used" } */
+
+#include "zero-scratch-regs-1.c"
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fzero-call-used-regs=all-gpr-arg" } */
+
+#include "zero-scratch-regs-1.c"
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fzero-call-used-regs=all-gpr" } */
+
+#include "zero-scratch-regs-1.c"
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fzero-call-used-regs=all-arg" } */
+
+#include "zero-scratch-regs-1.c"
--- /dev/null
+/* { dg-do run } */
+/* { dg-options "-O2 -fzero-call-used-regs=all" } */
+
+#include "zero-scratch-regs-1.c"
--- /dev/null
+/* { dg-do compile } */
+/* { dg-options "-O2" } */
+
+int result __attribute__ ((zero_call_used_regs("all"))); /* { dg-error "attribute applies only to functions" } */
+int
+__attribute__ ((zero_call_used_regs("gpr-arg-all")))
+foo1 (int x) /* { dg-error "unrecognized 'zero_call_used_regs' attribute argument" } */
+{
+ return (x + 1);
+}
+int
+__attribute__ ((zero_call_used_regs(1)))
+foo2 (int x) /* { dg-error "argument not a string" } */
+{
+ return (x + 2);
+}
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=used" } */
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" } } */
+/* { dg-final { scan-assembler-not "movl\[ \t\]*%" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=skip" } */
+
+extern int foo (int) __attribute__ ((zero_call_used_regs("all-gpr")));
+
+int
+foo (int x)
+{
+ return x;
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%edx, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %esi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %edi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r8d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r9d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r10d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r11d" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do run { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=used-gpr" } */
+
+struct S { int i; };
+__attribute__((const, noinline, noclone))
+struct S foo (int x)
+{
+ struct S s;
+ s.i = x;
+ return s;
+}
+
+int a[2048], b[2048], c[2048], d[2048];
+struct S e[2048];
+
+__attribute__((noinline, noclone)) void
+bar (void)
+{
+ int i;
+ for (i = 0; i < 1024; i++)
+ {
+ e[i] = foo (i);
+ a[i+2] = a[i] + a[i+1];
+ b[10] = b[10] + i;
+ c[i] = c[2047 - i];
+ d[i] = d[i + 1];
+ }
+}
+
+int
+main ()
+{
+ int i;
+ bar ();
+ for (i = 0; i < 1024; i++)
+ if (e[i].i != i)
+ __builtin_abort ();
+ return 0;
+}
--- /dev/null
+/* { dg-do run { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all-gpr" } */
+
+struct S { int i; };
+__attribute__((const, noinline, noclone))
+struct S foo (int x)
+{
+ struct S s;
+ s.i = x;
+ return s;
+}
+
+int a[2048], b[2048], c[2048], d[2048];
+struct S e[2048];
+
+__attribute__((noinline, noclone)) void
+bar (void)
+{
+ int i;
+ for (i = 0; i < 1024; i++)
+ {
+ e[i] = foo (i);
+ a[i+2] = a[i] + a[i+1];
+ b[10] = b[10] + i;
+ c[i] = c[2047 - i];
+ d[i] = d[i + 1];
+ }
+}
+
+int
+main ()
+{
+ int i;
+ bar ();
+ for (i = 0; i < 1024; i++)
+ if (e[i].i != i)
+ __builtin_abort ();
+ return 0;
+}
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all -march=corei7" } */
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler "pxor\[ \t\]*%xmm0, %xmm0" } } */
+/* { dg-final { scan-assembler-times "movaps\[ \t\]*%xmm0, %xmm\[0-9\]+" 7 { target { ia32 } } } } */
+/* { dg-final { scan-assembler-times "movaps\[ \t\]*%xmm0, %xmm\[0-9\]+" 15 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%eax, %eax" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %esi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r8d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r9d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r10d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r11d" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all -march=corei7 -mavx" } */
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler-times "vzeroall" 1 } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%eax, %eax" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %esi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r8d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r9d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r10d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r11d" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=skip" } */
+
+extern void foo (void) __attribute__ ((zero_call_used_regs("used")));
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" } } */
+/* { dg-final { scan-assembler-not "movl\[ \t\]*%" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all" } */
+
+extern void foo (void) __attribute__ ((zero_call_used_regs("skip")));
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" } } */
+/* { dg-final { scan-assembler-not "movl\[ \t\]*%" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=used" } */
+
+int
+foo (int x)
+{
+ return x;
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" { target ia32 } } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%edi, %edi" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=used -march=corei7" } */
+
+float
+foo (float z, float y, float x)
+{
+ return x + y;
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler "pxor\[ \t\]*%xmm1, %xmm1" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movaps\[ \t\]*%xmm1, %xmm2" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=used -march=corei7" } */
+
+float
+foo (float z, float y, float x)
+{
+ return x;
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler "pxor\[ \t\]*%xmm2, %xmm2" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all-gpr" } */
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%eax, %eax" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %esi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r8d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r9d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r10d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r11d" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all -march=corei7" } */
+
+float
+foo (float z, float y, float x)
+{
+ return x + y;
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler "pxor\[ \t\]*%xmm0, %xmm0" { target { ia32 } } } } */
+/* { dg-final { scan-assembler "pxor\[ \t\]*%xmm1, %xmm1" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "movaps\[ \t\]*%xmm0, %xmm\[0-9\]+" 7 { target { ia32 } } } } */
+/* { dg-final { scan-assembler-times "movaps\[ \t\]*%xmm1, %xmm\[0-9\]+" 14 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%eax, %eax" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %esi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r8d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r9d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r10d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r11d" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=skip -march=corei7" } */
+
+__attribute__ ((zero_call_used_regs("used")))
+float
+foo (float z, float y, float x)
+{
+ return x + y;
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler "pxor\[ \t\]*%xmm1, %xmm1" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movaps\[ \t\]*%xmm1, %xmm2" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all -march=corei7 -mavx" } */
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler "vzeroall" } } */
+/* { dg-final { scan-assembler-times "fldz" 8 } } */
+/* { dg-final { scan-assembler-times "fstp" 8 } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%eax, %eax" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %esi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r8d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r9d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r10d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r11d" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all -march=corei7 -mavx512f" } */
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler "vzeroall" } } */
+/* { dg-final { scan-assembler-times "fldz" 8 } } */
+/* { dg-final { scan-assembler-times "fstp" 8 } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%eax, %eax" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %esi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r8d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r9d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r10d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r11d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "kxorw\[ \t\]*%k0, %k0, %k0" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "kmovw\[ \t\]*%k0, %k1" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "kmovw\[ \t\]*%k0, %k2" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "kmovw\[ \t\]*%k0, %k3" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "kmovw\[ \t\]*%k0, %k4" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "kmovw\[ \t\]*%k0, %k5" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "kmovw\[ \t\]*%k0, %k6" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "kmovw\[ \t\]*%k0, %k7" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=used-gpr-arg" } */
+
+int
+foo (int x)
+{
+ return x;
+}
+
+/* { dg-final { scan-assembler "xorl\[ \t\]*%edi, %edi" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=used-arg" } */
+
+int
+foo (int x)
+{
+ return x;
+}
+
+/* { dg-final { scan-assembler "xorl\[ \t\]*%edi, %edi" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all-arg" } */
+
+int
+foo (int x)
+{
+ return x;
+}
+
+/* { dg-final { scan-assembler "xorl\[ \t\]*%edx, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %esi" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %edi" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r8d" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r9d" } } */
+/* { dg-final { scan-assembler "pxor\[ \t\]*%xmm0, %xmm0" } } */
+/* { dg-final { scan-assembler "movaps\[ \t\]*%xmm0, %xmm1" } } */
+/* { dg-final { scan-assembler "movaps\[ \t\]*%xmm0, %xmm2" } } */
+/* { dg-final { scan-assembler "movaps\[ \t\]*%xmm0, %xmm3" } } */
+/* { dg-final { scan-assembler "movaps\[ \t\]*%xmm0, %xmm4" } } */
+/* { dg-final { scan-assembler "movaps\[ \t\]*%xmm0, %xmm5" } } */
+/* { dg-final { scan-assembler "movaps\[ \t\]*%xmm0, %xmm6" } } */
+/* { dg-final { scan-assembler "movaps\[ \t\]*%xmm0, %xmm7" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all-gpr-arg" } */
+
+int
+foo (int x)
+{
+ return x;
+}
+
+/* { dg-final { scan-assembler "xorl\[ \t\]*%edx, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %esi" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %edi" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r8d" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r9d" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -mmmx -fzero-call-used-regs=all" } */
+/* { dg-require-effective-target ia32 } */
+
+__v2si ret_mmx (void)
+{
+ return (__v2si) { 123, 345 };
+}
+
+/* { dg-final { scan-assembler "pxor\[ \t\]*%mm1, %mm1" } } */
+/* { dg-final { scan-assembler "movq\[ \t\]*%mm1, %mm2" } } */
+/* { dg-final { scan-assembler "movq\[ \t\]*%mm1, %mm3" } } */
+/* { dg-final { scan-assembler "movq\[ \t\]*%mm1, %mm4" } } */
+/* { dg-final { scan-assembler "movq\[ \t\]*%mm1, %mm5" } } */
+/* { dg-final { scan-assembler "movq\[ \t\]*%mm1, %mm6" } } */
+/* { dg-final { scan-assembler "movq\[ \t\]*%mm1, %mm7" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all" } */
+
+long double ret_x87 (void)
+{
+ return 1.1L;
+}
+
+/* { dg-final { scan-assembler-times "fldz" 7 } } */
+/* { dg-final { scan-assembler-times "fstp" 7 } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=skip" } */
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" } } */
+/* { dg-final { scan-assembler-not "movl\[ \t\]*%" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all" } */
+
+_Complex long double ret_x87_cplx (void)
+{
+ return 1.1L + 1.2iL;
+}
+
+/* { dg-final { scan-assembler-times "fldz" 8 { target ia32 } } } */
+/* { dg-final { scan-assembler-times "fstp" 8 { target ia32 } } } */
+/* { dg-final { scan-assembler-times "fldz" 6 { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler-times "fstp" 6 { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -mmmx -fzero-call-used-regs=all-arg" } */
+/* { dg-require-effective-target ia32 } */
+
+__v2si ret_mmx (void)
+{
+ return (__v2si) { 123, 345 };
+}
+
+/* { dg-final { scan-assembler "pxor\[ \t\]*%mm1, %mm1" } } */
+/* { dg-final { scan-assembler "movq\[ \t\]*%mm1, %mm2" } } */
+/* { dg-final { scan-assembler-not "movq\[ \t\]*%mm1, %mm\[34567\]" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=skip" } */
+
+extern void foo (void) __attribute__ ((zero_call_used_regs("used-gpr")));
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" } } */
+/* { dg-final { scan-assembler-not "movl\[ \t\]*%" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=skip" } */
+
+__attribute__ ((zero_call_used_regs("all-gpr")))
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%eax, %eax" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %esi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %edi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r8d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r9d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r10d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%eax, %r11d" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all-gpr" } */
+
+extern void foo (void) __attribute__ ((zero_call_used_regs("skip")));
+
+void
+foo (void)
+{
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" } } */
+/* { dg-final { scan-assembler-not "movl\[ \t\]*%" } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=used-gpr" } */
+
+int
+foo (int x)
+{
+ return x;
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" { target ia32 } } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%edi, %edi" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=all-gpr" } */
+
+int
+foo (int x)
+{
+ return x;
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%edx, %edx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %ecx" } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %esi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %edi" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r8d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r9d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r10d" { target { ! ia32 } } } } */
+/* { dg-final { scan-assembler "movl\[ \t\]*%edx, %r11d" { target { ! ia32 } } } } */
--- /dev/null
+/* { dg-do compile { target *-*-linux* } } */
+/* { dg-options "-O2 -fzero-call-used-regs=skip" } */
+
+extern int foo (int) __attribute__ ((zero_call_used_regs("used-gpr")));
+
+int
+foo (int x)
+{
+ return x;
+}
+
+/* { dg-final { scan-assembler-not "vzeroall" } } */
+/* { dg-final { scan-assembler-not "%xmm" } } */
+/* { dg-final { scan-assembler-not "xorl\[ \t\]*%" { target ia32 } } } */
+/* { dg-final { scan-assembler "xorl\[ \t\]*%edi, %edi" { target { ! ia32 } } } } */
extern rtl_opt_pass *make_pass_split_after_reload (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_thread_prologue_and_epilogue (gcc::context
*ctxt);
+extern rtl_opt_pass *make_pass_zero_call_used_regs (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_stack_adjustments (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_sched_fusion (gcc::context *ctxt);
extern rtl_opt_pass *make_pass_peephole2 (gcc::context *ctxt);