riscv_align_data_type_natural
};
+/* Where to get the canary for the stack protector. */
+enum stack_protector_guard {
+ SSP_TLS, /* per-thread canary in TLS block */
+ SSP_GLOBAL /* global canary */
+};
+
#endif /* ! GCC_RISCV_OPTS_H */
" [%<-mriscv-attribute%>]");
#endif
+ if (riscv_stack_protector_guard == SSP_GLOBAL
+ && global_options_set.x_riscv_stack_protector_guard_offset_str)
+ {
+ error ("incompatible options %<-mstack-protector-guard=global%> and "
+ "%<-mstack-protector-guard-offset=%s%>",
+ riscv_stack_protector_guard_offset_str);
+ }
+
+ if (riscv_stack_protector_guard == SSP_TLS
+ && !(global_options_set.x_riscv_stack_protector_guard_offset_str
+ && global_options_set.x_riscv_stack_protector_guard_reg_str))
+ {
+ error ("both %<-mstack-protector-guard-offset%> and "
+ "%<-mstack-protector-guard-reg%> must be used "
+ "with %<-mstack-protector-guard=sysreg%>");
+ }
+
+ if (global_options_set.x_riscv_stack_protector_guard_reg_str)
+ {
+ const char *str = riscv_stack_protector_guard_reg_str;
+ int reg = decode_reg_name (str);
+
+ if (!IN_RANGE (reg, GP_REG_FIRST + 1, GP_REG_LAST))
+ error ("%qs is not a valid base register in %qs", str,
+ "-mstack-protector-guard-reg=");
+
+ riscv_stack_protector_guard_reg = reg;
+ }
+
+ if (global_options_set.x_riscv_stack_protector_guard_offset_str)
+ {
+ char *end;
+ const char *str = riscv_stack_protector_guard_offset_str;
+ errno = 0;
+ long offs = strtol (riscv_stack_protector_guard_offset_str, &end, 0);
+
+ if (!*str || *end || errno)
+ error ("%qs is not a valid number in %qs", str,
+ "-mstack-protector-guard-offset=");
+
+ if (!SMALL_OPERAND (offs))
+ error ("%qs is not a valid offset in %qs", str,
+ "-mstack-protector-guard-offset=");
+
+ riscv_stack_protector_guard_offset = offs;
+ }
+
}
/* Implement TARGET_CONDITIONAL_REGISTER_USAGE. */
UNSPECV_BLOCKAGE
UNSPECV_FENCE
UNSPECV_FENCE_I
+
+ ;; Stack Smash Protector
+ UNSPEC_SSP_SET
+ UNSPEC_SSP_TEST
])
(define_constants
""
{})
+;; Named patterns for stack smashing protection.
+
+(define_expand "stack_protect_set"
+ [(match_operand 0 "memory_operand")
+ (match_operand 1 "memory_operand")]
+ ""
+{
+ machine_mode mode = GET_MODE (operands[0]);
+ if (riscv_stack_protector_guard == SSP_TLS)
+ {
+ rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
+ rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
+ rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
+ operands[1] = gen_rtx_MEM (Pmode, addr);
+ }
+
+ emit_insn ((mode == DImode
+ ? gen_stack_protect_set_di
+ : gen_stack_protect_set_si) (operands[0], operands[1]));
+ DONE;
+})
+
+;; DO NOT SPLIT THIS PATTERN. It is important for security reasons that the
+;; canary value does not live beyond the life of this sequence.
+(define_insn "stack_protect_set_<mode>"
+ [(set (match_operand:GPR 0 "memory_operand" "=m")
+ (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")]
+ UNSPEC_SSP_SET))
+ (set (match_scratch:GPR 2 "=&r") (const_int 0))]
+ ""
+ "<load>\\t%2, %1\;<store>\\t%2, %0\;li\t%2, 0"
+ [(set_attr "length" "12")])
+
+(define_expand "stack_protect_test"
+ [(match_operand 0 "memory_operand")
+ (match_operand 1 "memory_operand")
+ (match_operand 2)]
+ ""
+{
+ rtx result;
+ machine_mode mode = GET_MODE (operands[0]);
+
+ result = gen_reg_rtx(mode);
+ if (riscv_stack_protector_guard == SSP_TLS)
+ {
+ rtx reg = gen_rtx_REG (Pmode, riscv_stack_protector_guard_reg);
+ rtx offset = GEN_INT (riscv_stack_protector_guard_offset);
+ rtx addr = gen_rtx_PLUS (Pmode, reg, offset);
+ operands[1] = gen_rtx_MEM (Pmode, addr);
+ }
+ emit_insn ((mode == DImode
+ ? gen_stack_protect_test_di
+ : gen_stack_protect_test_si) (result,
+ operands[0],
+ operands[1]));
+
+ if (mode == DImode)
+ emit_jump_insn (gen_cbranchdi4 (gen_rtx_EQ (VOIDmode, result, const0_rtx),
+ result, const0_rtx, operands[2]));
+ else
+ emit_jump_insn (gen_cbranchsi4 (gen_rtx_EQ (VOIDmode, result, const0_rtx),
+ result, const0_rtx, operands[2]));
+
+ DONE;
+})
+
+(define_insn "stack_protect_test_<mode>"
+ [(set (match_operand:GPR 0 "register_operand" "=r")
+ (unspec:GPR [(match_operand:GPR 1 "memory_operand" "m")
+ (match_operand:GPR 2 "memory_operand" "m")]
+ UNSPEC_SSP_TEST))
+ (clobber (match_scratch:GPR 3 "=&r"))]
+ ""
+ "<load>\t%3, %1\;<load>\t%0, %2\;xor\t%0, %3, %0\;li\t%3, 0"
+ [(set_attr "length" "12")])
+
(include "sync.md")
(include "peephole.md")
(include "pic.md")
EnumValue
Enum(riscv_align_data) String(natural) Value(riscv_align_data_type_natural)
+
+mstack-protector-guard=
+Target RejectNegative Joined Enum(stack_protector_guard) Var(riscv_stack_protector_guard) Init(SSP_GLOBAL)
+Use given stack-protector guard.
+
+Enum
+Name(stack_protector_guard) Type(enum stack_protector_guard)
+Valid arguments to -mstack-protector-guard=:
+
+EnumValue
+Enum(stack_protector_guard) String(tls) Value(SSP_TLS)
+
+EnumValue
+Enum(stack_protector_guard) String(global) Value(SSP_GLOBAL)
+
+mstack-protector-guard-reg=
+Target RejectNegative Joined Var(riscv_stack_protector_guard_reg_str)
+Use the given base register for addressing the stack-protector guard.
+
+TargetVariable
+int riscv_stack_protector_guard_reg = 0
+
+mstack-protector-guard-offset=
+Target RejectNegative Joined Integer Var(riscv_stack_protector_guard_offset_str)
+Use the given offset for addressing the stack-protector guard.
+
+TargetVariable
+long riscv_stack_protector_guard_offset = 0
-mexplicit-relocs -mno-explicit-relocs @gol
-mrelax -mno-relax @gol
-mriscv-attribute -mmo-riscv-attribute @gol
--malign-data=@var{type}}
+-malign-data=@var{type} @gol
++-mstack-protector-guard=@var{guard} -mstack-protector-guard-reg=@var{reg} @gol
++-mstack-protector-guard-offset=@var{offset}}
@emph{RL78 Options}
@gccoptlist{-msim -mmul=none -mmul=g13 -mmul=g14 -mallregs @gol
types. Supported values for @var{type} are @samp{xlen} which uses x register
width as the alignment value, and @samp{natural} which uses natural alignment.
@samp{xlen} is the default.
+
+@item -mstack-protector-guard=@var{guard}
+@itemx -mstack-protector-guard-reg=@var{reg}
+@itemx -mstack-protector-guard-offset=@var{offset}
+@opindex mstack-protector-guard
+@opindex mstack-protector-guard-reg
+@opindex mstack-protector-guard-offset
+Generate stack protection code using canary at @var{guard}. Supported
+locations are @samp{global} for a global canary or @samp{tls} for per-thread
+canary in the TLS block.
+
+With the latter choice the options
+@option{-mstack-protector-guard-reg=@var{reg}} and
+@option{-mstack-protector-guard-offset=@var{offset}} furthermore specify
+which register to use as base register for reading the canary,
+and from what offset from that base register. There is no default
+register or offset as this is entirely for use within the Linux
+kernel.
@end table
@node RL78 Options