aarch64_emit_move (dest, base);
return;
}
+
mem = force_const_mem (ptr_mode, imm);
gcc_assert (mem);
+
+ /* If we aren't generating PC relative literals, then
+ we need to expand the literal pool access carefully.
+ This is something that needs to be done in a number
+ of places, so could well live as a separate function. */
+ if (nopcrelative_literal_loads)
+ {
+ gcc_assert (can_create_pseudo_p ());
+ base = gen_reg_rtx (ptr_mode);
+ aarch64_expand_mov_immediate (base, XEXP (mem, 0));
+ mem = gen_rtx_MEM (ptr_mode, base);
+ }
+
if (mode != ptr_mode)
mem = gen_rtx_ZERO_EXTEND (mode, mem);
+
emit_insn (gen_rtx_SET (dest, mem));
+
return;
case SYMBOL_SMALL_TLSGD:
rtx sym, addend;
split_const (x, &sym, &addend);
- return (GET_CODE (sym) == LABEL_REF
- || (GET_CODE (sym) == SYMBOL_REF
- && CONSTANT_POOL_ADDRESS_P (sym)));
+ return ((GET_CODE (sym) == LABEL_REF
+ || (GET_CODE (sym) == SYMBOL_REF
+ && CONSTANT_POOL_ADDRESS_P (sym)
+ && !nopcrelative_literal_loads)));
}
return false;
}
+/* Return the reload icode required for a constant pool in mode. */
+static enum insn_code
+aarch64_constant_pool_reload_icode (machine_mode mode)
+{
+ switch (mode)
+ {
+ case SFmode:
+ return CODE_FOR_aarch64_reload_movcpsfdi;
+
+ case DFmode:
+ return CODE_FOR_aarch64_reload_movcpdfdi;
+
+ case TFmode:
+ return CODE_FOR_aarch64_reload_movcptfdi;
+
+ case V8QImode:
+ return CODE_FOR_aarch64_reload_movcpv8qidi;
+
+ case V16QImode:
+ return CODE_FOR_aarch64_reload_movcpv16qidi;
+
+ case V4HImode:
+ return CODE_FOR_aarch64_reload_movcpv4hidi;
+
+ case V8HImode:
+ return CODE_FOR_aarch64_reload_movcpv8hidi;
+
+ case V2SImode:
+ return CODE_FOR_aarch64_reload_movcpv2sidi;
+
+ case V4SImode:
+ return CODE_FOR_aarch64_reload_movcpv4sidi;
+
+ case V2DImode:
+ return CODE_FOR_aarch64_reload_movcpv2didi;
+
+ case V2DFmode:
+ return CODE_FOR_aarch64_reload_movcpv2dfdi;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ gcc_unreachable ();
+}
static reg_class_t
aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
reg_class_t rclass,
machine_mode mode,
secondary_reload_info *sri)
{
+
+ /* If we have to disable direct literal pool loads and stores because the
+ function is too big, then we need a scratch register. */
+ if (MEM_P (x) && GET_CODE (x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (x)
+ && (SCALAR_FLOAT_MODE_P (GET_MODE (x))
+ || targetm.vector_mode_supported_p (GET_MODE (x)))
+ && nopcrelative_literal_loads)
+ {
+ sri->icode = aarch64_constant_pool_reload_icode (mode);
+ return NO_REGS;
+ }
+
/* Without the TARGET_SIMD instructions we cannot move a Q register
to a Q register directly. We need a scratch. */
if (REG_P (x) && (mode == TFmode || mode == TImode) && mode == GET_MODE (x)
if (opts->x_align_functions <= 0)
opts->x_align_functions = aarch64_tune_params.function_align;
}
+
+ /* If nopcrelative_literal_loads is set on the command line, this
+ implies that the user asked for PC relative literal loads. */
+ if (nopcrelative_literal_loads == 1)
+ nopcrelative_literal_loads = 0;
+
+ /* If it is not set on the command line, we default to no
+ pc relative literal loads. */
+ if (nopcrelative_literal_loads == 2)
+ nopcrelative_literal_loads = 1;
+
+ /* In the tiny memory model it makes no sense
+ to disallow non PC relative literal pool loads
+ as many other things will break anyway. */
+ if (nopcrelative_literal_loads
+ && (aarch64_cmodel == AARCH64_CMODEL_TINY
+ || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC))
+ nopcrelative_literal_loads = 0;
}
/* 'Unpack' up the internal tuning structs and update the options
if (GET_CODE (x) == SYMBOL_REF)
{
if (aarch64_cmodel == AARCH64_CMODEL_LARGE)
- return SYMBOL_FORCE_TO_MEM;
+ {
+ /* This is alright even in PIC code as the constant
+ pool reference is always PC relative and within
+ the same translation unit. */
+ if (nopcrelative_literal_loads
+ && CONSTANT_POOL_ADDRESS_P (x))
+ return SYMBOL_SMALL_ABSOLUTE;
+ else
+ return SYMBOL_FORCE_TO_MEM;
+ }
if (aarch64_tls_symbol_p (x))
return aarch64_classify_tls_symbol (x);
;; -------------------------------------------------------------------
;; Reload support
;; -------------------------------------------------------------------
+;; Reload Scalar Floating point modes from constant pool.
+;; The AArch64 port doesn't have __int128 constant move support.
+(define_expand "aarch64_reload_movcp<GPF_TF:mode><P:mode>"
+ [(set (match_operand:GPF_TF 0 "register_operand" "=w")
+ (mem:GPF_TF (match_operand 1 "aarch64_constant_pool_symref" "S")))
+ (clobber (match_operand:P 2 "register_operand" "=&r"))]
+ "TARGET_FLOAT && nopcrelative_literal_loads"
+ {
+ aarch64_expand_mov_immediate (operands[2], XEXP (operands[1], 0));
+ emit_move_insn (operands[0], gen_rtx_MEM (<GPF_TF:MODE>mode, operands[2]));
+ DONE;
+ }
+)
+
+;; Reload Vector modes from constant pool.
+(define_expand "aarch64_reload_movcp<VALL:mode><P:mode>"
+ [(set (match_operand:VALL 0 "register_operand" "=w")
+ (mem:VALL (match_operand 1 "aarch64_constant_pool_symref" "S")))
+ (clobber (match_operand:P 2 "register_operand" "=&r"))]
+ "TARGET_FLOAT && nopcrelative_literal_loads"
+ {
+ aarch64_expand_mov_immediate (operands[2], XEXP (operands[1], 0));
+ emit_move_insn (operands[0], gen_rtx_MEM (<VALL:MODE>mode, operands[2]));
+ DONE;
+ }
+)
(define_expand "aarch64_reload_mov<mode>"
[(set (match_operand:TX 0 "register_operand" "=w")