rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
/* -mpower9-dform turns on both -mpower9-dform-scalar and
- -mpower9-dform-vector. There are currently problems if
- -mpower9-dform-vector instructions are enabled when we use the RELOAD
- register allocator. */
+ -mpower9-dform-vector. */
if (TARGET_P9_DFORM_BOTH > 0)
{
- if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR)
- && TARGET_LRA)
+ if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
}
- /* There have been bugs with both -mvsx-timode and -mpower9-dform-vector that
- don't show up with -mlra, but do show up with -mno-lra. Given -mlra will
- become the default once PR 69847 is fixed, turn off the options with
- problems by default if -mno-lra was used, and warn if the user explicitly
- asked for the option.
+ /* There have been bugs with -mvsx-timode that don't show up with -mlra,
+ but do show up with -mno-lra. Given -mlra will become the default once
+ PR 69847 is fixed, turn off the options with problems by default if
+ -mno-lra was used, and warn if the user explicitly asked for the option.
Enable -mpower9-dform-vector by default if LRA and other power9 options.
Enable -mvsx-timode by default if LRA and VSX. */
else
rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
}
-
- if (TARGET_P9_DFORM_VECTOR)
- {
- if ((rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR) != 0)
- warning (0, "-mpower9-dform-vector might need -mlra");
-
- else
- rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
- }
}
else
if (TARGET_VSX && !TARGET_VSX_TIMODE
&& (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
-
- if (TARGET_VSX && TARGET_P9_VECTOR && !TARGET_P9_DFORM_VECTOR
- && TARGET_P9_DFORM_SCALAR && TARGET_P9_DFORM_BOTH < 0
- && (rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR) == 0)
- rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
}
/* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
3.0 LXV/STXV instruction. */
bool
-quad_address_p (rtx addr, machine_mode mode, bool gpr_p)
+quad_address_p (rtx addr, machine_mode mode, bool strict)
{
rtx op0, op1;
if (GET_MODE_SIZE (mode) != 16)
return false;
- if (gpr_p)
- {
- if (!TARGET_QUAD_MEMORY && !TARGET_SYNC_TI)
- return false;
-
- /* LQ/STQ can handle indirect addresses. */
- if (base_reg_operand (addr, Pmode))
- return true;
- }
+ if (legitimate_indirect_address_p (addr, strict))
+ return true;
- else
- {
- if (!mode_supports_vsx_dform_quad (mode))
- return false;
- }
+ if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
+ return false;
if (GET_CODE (addr) != PLUS)
return false;
op0 = XEXP (addr, 0);
- if (!base_reg_operand (op0, Pmode))
+ if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
return false;
op1 = XEXP (addr, 1);
if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
return false;
if (mode_supports_vsx_dform_quad (mode))
- return (virtual_stack_registers_memory_p (x)
- || quad_address_p (x, mode, false));
+ return quad_address_p (x, mode, strict);
if (!reg_offset_addressing_ok_p (mode))
return virtual_stack_registers_memory_p (x);
if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
int ind_levels ATTRIBUTE_UNUSED, int *win)
{
bool reg_offset_p = reg_offset_addressing_ok_p (mode);
+ bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
/* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
if (TARGET_CMODEL != CMODEL_SMALL
&& reg_offset_p
+ && !quad_offset_p
&& small_toc_ref (x, VOIDmode))
{
rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
}
if (GET_CODE (x) == PLUS
- && GET_CODE (XEXP (x, 0)) == REG
+ && REG_P (XEXP (x, 0))
&& REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
&& INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
- && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && CONST_INT_P (XEXP (x, 1))
&& reg_offset_p
&& !SPE_VECTOR_MODE (mode)
&& !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
- && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
+ && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
{
HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
HOST_WIDE_INT high
= (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
- /* Check for 32-bit overflow. */
- if (high + low != val)
+ /* Check for 32-bit overflow or quad addresses with one of the
+ four least significant bits set. */
+ if (high + low != val
+ || (quad_offset_p && (low & 0xf)))
{
*win = 0;
return x;
if (GET_CODE (x) == SYMBOL_REF
&& reg_offset_p
+ && !quad_offset_p
&& (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
&& !SPE_VECTOR_MODE (mode)
#if TARGET_MACHO
if (TARGET_TOC
&& reg_offset_p
+ && !quad_offset_p
&& GET_CODE (x) == SYMBOL_REF
&& use_toc_relative_ref (x, mode))
{
&& mode_supports_pre_incdec_p (mode)
&& legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
return 1;
- if (virtual_stack_registers_memory_p (x))
- return 1;
-
/* Handle restricted vector d-form offsets in ISA 3.0. */
if (quad_offset_p)
{
- if (quad_address_p (x, mode, false))
+ if (quad_address_p (x, mode, reg_ok_strict))
return 1;
}
+ else if (virtual_stack_registers_memory_p (x))
+ return 1;
else if (reg_offset_p)
{
else if (TARGET_VSX && dest_vsx_p)
{
if (mode_supports_vsx_dform_quad (mode)
- && quad_address_p (XEXP (src, 0), mode, false))
+ && quad_address_p (XEXP (src, 0), mode, true))
return "lxv %x0,%1";
else if (TARGET_P9_VECTOR)
else if (TARGET_VSX && src_vsx_p)
{
if (mode_supports_vsx_dform_quad (mode)
- && quad_address_p (XEXP (dest, 0), mode, false))
+ && quad_address_p (XEXP (dest, 0), mode, true))
return "stxv %x1,%0";
else if (TARGET_P9_VECTOR)