From 35e58efb7a86a66c6e93dac3d197eef95963582d Mon Sep 17 00:00:00 2001 From: Wilco Dijkstra Date: Mon, 20 Jan 2020 13:09:15 +0000 Subject: [PATCH] [AArch64] Set SLOW_BYTE_ACCESS Contrary to all documentation, SLOW_BYTE_ACCESS simply means accessing bitfields by their declared type, which results in better codegeneration. gcc/ * config/aarch64/aarch64.h (SLOW_BYTE_ACCESS): Set to 1. --- gcc/ChangeLog | 4 ++++ gcc/config/aarch64/aarch64.h | 10 ++-------- 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index f955514fe0c..ce26404812a 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,7 @@ +2020-01-20 Wilco Dijkstra + + * config/aarch64/aarch64.h (SLOW_BYTE_ACCESS): Set to 1. + 2020-01-20 Richard Sandiford * config/aarch64/aarch64-sve-builtins-base.cc diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h index eac2d62d63c..342fe29bd91 100644 --- a/gcc/config/aarch64/aarch64.h +++ b/gcc/config/aarch64/aarch64.h @@ -1006,14 +1006,8 @@ typedef struct if given data not on the nominal alignment. */ #define STRICT_ALIGNMENT TARGET_STRICT_ALIGN -/* Define this macro to be non-zero if accessing less than a word of - memory is no faster than accessing a word of memory, i.e., if such - accesses require more than one instruction or if there is no - difference in cost. - Although there's no difference in instruction count or cycles, - in AArch64 we don't want to expand to a sub-word to a 64-bit access - if we don't have to, for power-saving reasons. */ -#define SLOW_BYTE_ACCESS 0 +/* Enable wide bitfield accesses for more efficient bitfield code. */ +#define SLOW_BYTE_ACCESS 1 #define NO_FUNCTION_CSE 1 -- 2.30.2