+2017-05-04 Andrew Waterman <andrew@sifive.com>
+
+ * config/riscv/riscv.opt (mstrict-align): New option.
+ * config/riscv/riscv.h (STRICT_ALIGNMENT): Use it. Update comment.
+ (SLOW_UNALIGNED_ACCESS): Define.
+ (riscv_slow_unaligned_access): Declare.
+ * config/riscv/riscv.c (riscv_tune_info): Add slow_unaligned_access
+ field.
+ (riscv_slow_unaligned_access): New variable.
+ (rocket_tune_info): Set slow_unaligned_access to true.
+ (optimize_size_tune_info): Set slow_unaligned_access to false.
+ (riscv_cpu_info_table): Add entry for optimize_size_tune_info.
+ (riscv_valid_lo_sum_p): Use TARGET_STRICT_ALIGN.
+ (riscv_option_override): Set riscv_slow_unaligned_access.
+ * doc/invoke.texi: Add -mstrict-align to RISC-V.
+
2017-05-04 Kito Cheng <kito.cheng@gmail.com>
* config/riscv/riscv.md: Unify indentation.
unsigned short issue_rate;
unsigned short branch_cost;
unsigned short memory_cost;
+ bool slow_unaligned_access;
};
/* Information about one CPU we know about. */
/* Global variables for machine-dependent things. */
+/* Whether unaligned accesses execute very slowly. */
+bool riscv_slow_unaligned_access;
+
/* Which tuning parameters to use. */
static const struct riscv_tune_info *tune_info;
{COSTS_N_INSNS (6), COSTS_N_INSNS (6)}, /* int_div */
1, /* issue_rate */
3, /* branch_cost */
- 5 /* memory_cost */
+ 5, /* memory_cost */
+ true, /* slow_unaligned_access */
};
/* Costs to use when optimizing for size. */
{COSTS_N_INSNS (1), COSTS_N_INSNS (1)}, /* int_div */
1, /* issue_rate */
1, /* branch_cost */
- 2 /* memory_cost */
+ 2, /* memory_cost */
+ false, /* slow_unaligned_access */
};
/* A table describing all the processors GCC knows about. */
static const struct riscv_cpu_info riscv_cpu_info_table[] = {
{ "rocket", &rocket_tune_info },
+ { "size", &optimize_size_tune_info },
};
/* Return the riscv_cpu_info entry for the given name string. */
/* We may need to split multiword moves, so make sure that each word
can be accessed without inducing a carry. */
if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
- && GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode))
+ && (!TARGET_STRICT_ALIGN
+ || GET_MODE_BITSIZE (mode) > GET_MODE_ALIGNMENT (mode)))
return false;
return true;
RISCV_TUNE_STRING_DEFAULT);
tune_info = optimize_size ? &optimize_size_tune_info : cpu->tune_info;
+ /* Use -mtune's setting for slow_unaligned_access, even when optimizing
+ for size. For architectures that trap and emulate unaligned accesses,
+ the performance cost is too great, even for -Os. */
+ riscv_slow_unaligned_access = (cpu->tune_info->slow_unaligned_access
+ || TARGET_STRICT_ALIGN);
+
/* If the user hasn't specified a branch cost, use the processor's
default. */
if (riscv_branch_cost == 0)
/* There is no point aligning anything to a rounder boundary than this. */
#define BIGGEST_ALIGNMENT 128
-/* The user-level ISA permits misaligned accesses, but they may execute
- extremely slowly and non-atomically. Some privileged architectures
- do not permit them at all. It is best to enforce strict alignment. */
-#define STRICT_ALIGNMENT 1
+/* The user-level ISA permits unaligned accesses, but they are not required
+ of the privileged architecture. */
+#define STRICT_ALIGNMENT TARGET_STRICT_ALIGN
+
+#define SLOW_UNALIGNED_ACCESS(MODE, ALIGN) riscv_slow_unaligned_access
/* Define this if you wish to imitate the way many other C compilers
handle alignment of bitfields and the structures that contain
#ifndef USED_FOR_TARGET
extern const enum reg_class riscv_regno_to_class[];
extern bool riscv_hard_regno_mode_ok[][FIRST_PSEUDO_REGISTER];
+extern bool riscv_slow_unaligned_access;
#endif
#define ASM_PREFERRED_EH_DATA_FORMAT(CODE,GLOBAL) \
Target Report RejectNegative Joined Enum(code_model) Var(riscv_cmodel) Init(TARGET_DEFAULT_CMODEL)
Specify the code model.
+mstrict-align
+Target Report Mask(STRICT_ALIGN) Save
+Do not generate unaligned memory accesses.
+
Enum
Name(code_model) Type(enum riscv_code_model)
Known code models (for use with the -mcmodel= option):
-mtune=@var{processor-string} @gol
-msmall-data-limit=@var{N-bytes} @gol
-msave-restore -mno-save-restore @gol
+-mstrict-align -mno-strict-align @gol
-mcmodel=@var{code-model} @gol
-mexplicit-relocs -mno-explicit-relocs @gol}
@opindex msave-restore
Use smaller but slower prologue and epilogue code.
+@item -mstrict-align
+@itemx -mno-strict-align
+@opindex mstrict-align
+Do not generate unaligned memory accesses.
+
@item -mcmodel=@var{code-model}
@opindex mcmodel
Specify the code model.