* gcc/config/aarch64-protos.h (struct cpu_branch_cost): New.
(tune_params): Add field branch_costs.
(aarch64_branch_cost): Declare.
* gcc/config/aarch64.c (generic_branch_cost): New.
(generic_tunings): Set field cpu_branch_cost to generic_branch_cost.
(cortexa53_tunings): Likewise.
(cortexa57_tunings): Likewise.
(thunderx_tunings): Likewise.
(xgene1_tunings): Likewise.
(aarch64_branch_cost): Define.
* gcc/config/aarch64/aarch64.h (BRANCH_COST): Redefine.
From-SVN: r222805
+2015-05-05 Matthew Wahab <matthew.wahab@arm.com>
+
+ * gcc/config/aarch64-protos.h (struct cpu_branch_cost): New.
+ (tune_params): Add field branch_costs.
+ (aarch64_branch_cost): Declare.
+ * gcc/config/aarch64.c (generic_branch_cost): New.
+ (generic_tunings): Set field cpu_branch_cost to generic_branch_cost.
+ (cortexa53_tunings): Likewise.
+ (cortexa57_tunings): Likewise.
+ (thunderx_tunings): Likewise.
+ (xgene1_tunings): Likewise.
+ (aarch64_branch_cost): Define.
+ * gcc/config/aarch64/aarch64.h (BRANCH_COST): Redefine.
+
2015-05-05 Uros Bizjak <ubizjak@gmail.com>
* config/i386/i386.c: Use HOST_WIDE_INT_1 instead of (HOST_WIDE_INT) 1
const int cond_not_taken_branch_cost; /* Cost of not taken branch. */
};
+/* Branch costs. */
+struct cpu_branch_cost
+{
+ const int predictable; /* Predictable branch or optimizing for size. */
+ const int unpredictable; /* Unpredictable branch or optimizing for speed. */
+};
+
struct tune_params
{
const struct cpu_cost_table *const insn_extra_cost;
const struct cpu_addrcost_table *const addr_cost;
const struct cpu_regmove_cost *const regmove_cost;
const struct cpu_vector_cost *const vec_costs;
+ const struct cpu_branch_cost *const branch_costs;
const int memmov_cost;
const int issue_rate;
const unsigned int fuseable_ops;
HOST_WIDE_INT aarch64_initial_elimination_offset (unsigned, unsigned);
int aarch64_get_condition_code (rtx);
bool aarch64_bitmask_imm (HOST_WIDE_INT val, machine_mode);
+int aarch64_branch_cost (bool, bool);
enum aarch64_symbol_type
aarch64_classify_symbolic_expression (rtx, enum aarch64_symbol_context);
bool aarch64_const_vec_all_same_int_p (rtx, HOST_WIDE_INT);
#define AARCH64_FUSE_ADRP_LDR (1 << 3)
#define AARCH64_FUSE_CMP_BRANCH (1 << 4)
+/* Generic costs for branch instructions. */
+static const struct cpu_branch_cost generic_branch_cost =
+{
+ 2, /* Predictable. */
+ 2 /* Unpredictable. */
+};
+
static const struct tune_params generic_tunings =
{
&cortexa57_extra_costs,
&generic_addrcost_table,
&generic_regmove_cost,
&generic_vector_cost,
+ &generic_branch_cost,
4, /* memmov_cost */
2, /* issue_rate */
AARCH64_FUSE_NOTHING, /* fuseable_ops */
&generic_addrcost_table,
&cortexa53_regmove_cost,
&generic_vector_cost,
+ &generic_branch_cost,
4, /* memmov_cost */
2, /* issue_rate */
(AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
&cortexa57_addrcost_table,
&cortexa57_regmove_cost,
&cortexa57_vector_cost,
+ &generic_branch_cost,
4, /* memmov_cost */
3, /* issue_rate */
(AARCH64_FUSE_MOV_MOVK | AARCH64_FUSE_ADRP_ADD
&generic_addrcost_table,
&thunderx_regmove_cost,
&generic_vector_cost,
+ &generic_branch_cost,
6, /* memmov_cost */
2, /* issue_rate */
AARCH64_FUSE_CMP_BRANCH, /* fuseable_ops */
&xgene1_addrcost_table,
&xgene1_regmove_cost,
&xgene1_vector_cost,
+ &generic_branch_cost,
6, /* memmov_cost */
4, /* issue_rate */
AARCH64_FUSE_NOTHING, /* fuseable_ops */
return cost;
}
+/* Return the cost of a branch. If SPEED_P is true then the compiler is
+ optimizing for speed. If PREDICTABLE_P is true then the branch is predicted
+ to be taken. */
+
+int
+aarch64_branch_cost (bool speed_p, bool predictable_p)
+{
+ /* When optimizing for speed, use the cost of unpredictable branches. */
+ const struct cpu_branch_cost *branch_costs =
+ aarch64_tune_params->branch_costs;
+
+ if (!speed_p || predictable_p)
+ return branch_costs->predictable;
+ else
+ return branch_costs->unpredictable;
+}
+
/* Return true if the RTX X in mode MODE is a zero or sign extract
usable in an ADD or SUB (extended register) instruction. */
static bool
#define TRAMPOLINE_SECTION text_section
/* To start with. */
-#define BRANCH_COST(SPEED_P, PREDICTABLE_P) 2
+#define BRANCH_COST(SPEED_P, PREDICTABLE_P) \
+ (aarch64_branch_cost (SPEED_P, PREDICTABLE_P))
\f
/* Assembly output. */