From: Richard Sandiford Date: Mon, 6 Nov 2017 20:02:10 +0000 (+0000) Subject: [AArch64] Add an endian_lane_rtx helper routine X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=7ac29c0fa046d6018bad07ab17ec17585b5ef4ce;p=gcc.git [AArch64] Add an endian_lane_rtx helper routine Later patches turn the number of vector units into a poly_int. We deliberately don't support applying GEN_INT to those (except in target code that doesn't distinguish between poly_ints and normal constants); gen_int_mode needs to be used instead. This patch therefore replaces instances of: GEN_INT (ENDIAN_LANE_N (builtin_mode, INTVAL (op[opc]))) with uses of a new endian_lane_rtx function. 2017-11-06 Richard Sandiford Alan Hayward David Sherwood gcc/ * config/aarch64/aarch64-protos.h (aarch64_endian_lane_rtx): Declare. * config/aarch64/aarch64.c (aarch64_endian_lane_rtx): New function. * config/aarch64/aarch64.h (ENDIAN_LANE_N): Take the number of units rather than the mode. * config/aarch64/iterators.md (nunits): New mode attribute. * config/aarch64/aarch64-builtins.c (aarch64_simd_expand_args): Use aarch64_endian_lane_rtx instead of GEN_INT (ENDIAN_LANE_N ...). * config/aarch64/aarch64-simd.md (aarch64_dup_lane) (aarch64_dup_lane_, *aarch64_mul3_elt) (*aarch64_mul3_elt_): Likewise. (*aarch64_mul3_elt_to_64v2df, *aarch64_mla_elt): Likewise. (*aarch64_mla_elt_, *aarch64_mls_elt) (*aarch64_mls_elt_, *aarch64_fma4_elt) (*aarch64_fma4_elt_):: Likewise. (*aarch64_fma4_elt_to_64v2df, *aarch64_fnma4_elt): Likewise. (*aarch64_fnma4_elt_): Likewise. (*aarch64_fnma4_elt_to_64v2df, reduc_plus_scal_): Likewise. (reduc_plus_scal_v4sf, reduc__scal_): Likewise. (reduc__scal_): Likewise. (*aarch64_get_lane_extend): Likewise. (*aarch64_get_lane_zero_extendsi): Likewise. (aarch64_get_lane, *aarch64_mulx_elt_) (*aarch64_mulx_elt, *aarch64_vgetfmulx): Likewise. (aarch64_sqdmulh_lane, aarch64_sqdmulh_laneq) (aarch64_sqrdmlh_lane): Likewise. (aarch64_sqrdmlh_laneq): Likewise. (aarch64_sqdmll_lane): Likewise. (aarch64_sqdmll_laneq): Likewise. (aarch64_sqdmll2_lane_internal): Likewise. (aarch64_sqdmll2_laneq_internal): Likewise. (aarch64_sqdmull_lane, aarch64_sqdmull_laneq): Likewise. (aarch64_sqdmull2_lane_internal): Likewise. (aarch64_sqdmull2_laneq_internal): Likewise. (aarch64_vec_load_lanesoi_lane): Likewise. (aarch64_vec_store_lanesoi_lane): Likewise. (aarch64_vec_load_lanesci_lane): Likewise. (aarch64_vec_store_lanesci_lane): Likewise. (aarch64_vec_load_lanesxi_lane): Likewise. (aarch64_vec_store_lanesxi_lane): Likewise. (aarch64_simd_vec_set): Update use of ENDIAN_LANE_N. (aarch64_simd_vec_setv2di): Likewise. Reviewed-by: James Greenhalgh Co-Authored-By: Alan Hayward Co-Authored-By: David Sherwood From-SVN: r254466 --- diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 889e36af049..04f4d13d271 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,3 +1,49 @@ +2017-11-06 Richard Sandiford + Alan Hayward + David Sherwood + + * config/aarch64/aarch64-protos.h (aarch64_endian_lane_rtx): Declare. + * config/aarch64/aarch64.c (aarch64_endian_lane_rtx): New function. + * config/aarch64/aarch64.h (ENDIAN_LANE_N): Take the number + of units rather than the mode. + * config/aarch64/iterators.md (nunits): New mode attribute. + * config/aarch64/aarch64-builtins.c (aarch64_simd_expand_args): + Use aarch64_endian_lane_rtx instead of GEN_INT (ENDIAN_LANE_N ...). + * config/aarch64/aarch64-simd.md (aarch64_dup_lane) + (aarch64_dup_lane_, *aarch64_mul3_elt) + (*aarch64_mul3_elt_): Likewise. + (*aarch64_mul3_elt_to_64v2df, *aarch64_mla_elt): Likewise. + (*aarch64_mla_elt_, *aarch64_mls_elt) + (*aarch64_mls_elt_, *aarch64_fma4_elt) + (*aarch64_fma4_elt_):: Likewise. + (*aarch64_fma4_elt_to_64v2df, *aarch64_fnma4_elt): Likewise. + (*aarch64_fnma4_elt_): Likewise. + (*aarch64_fnma4_elt_to_64v2df, reduc_plus_scal_): Likewise. + (reduc_plus_scal_v4sf, reduc__scal_): Likewise. + (reduc__scal_): Likewise. + (*aarch64_get_lane_extend): Likewise. + (*aarch64_get_lane_zero_extendsi): Likewise. + (aarch64_get_lane, *aarch64_mulx_elt_) + (*aarch64_mulx_elt, *aarch64_vgetfmulx): Likewise. + (aarch64_sqdmulh_lane, aarch64_sqdmulh_laneq) + (aarch64_sqrdmlh_lane): Likewise. + (aarch64_sqrdmlh_laneq): Likewise. + (aarch64_sqdmll_lane): Likewise. + (aarch64_sqdmll_laneq): Likewise. + (aarch64_sqdmll2_lane_internal): Likewise. + (aarch64_sqdmll2_laneq_internal): Likewise. + (aarch64_sqdmull_lane, aarch64_sqdmull_laneq): Likewise. + (aarch64_sqdmull2_lane_internal): Likewise. + (aarch64_sqdmull2_laneq_internal): Likewise. + (aarch64_vec_load_lanesoi_lane): Likewise. + (aarch64_vec_store_lanesoi_lane): Likewise. + (aarch64_vec_load_lanesci_lane): Likewise. + (aarch64_vec_store_lanesci_lane): Likewise. + (aarch64_vec_load_lanesxi_lane): Likewise. + (aarch64_vec_store_lanesxi_lane): Likewise. + (aarch64_simd_vec_set): Update use of ENDIAN_LANE_N. + (aarch64_simd_vec_setv2di): Likewise. + 2017-11-06 Carl Love * config/rs6000/rs6000-c.c (P8V_BUILTIN_VEC_REVB): Add power 8 diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c index 242b2e3dc31..ff4ab60e1ff 100644 --- a/gcc/config/aarch64/aarch64-builtins.c +++ b/gcc/config/aarch64/aarch64-builtins.c @@ -1067,8 +1067,8 @@ aarch64_simd_expand_args (rtx target, int icode, int have_retval, GET_MODE_NUNITS (builtin_mode), exp); /* Keep to GCC-vector-extension lane indices in the RTL. */ - op[opc] = - GEN_INT (ENDIAN_LANE_N (builtin_mode, INTVAL (op[opc]))); + op[opc] = aarch64_endian_lane_rtx (builtin_mode, + INTVAL (op[opc])); } goto constant_arg; @@ -1081,7 +1081,7 @@ aarch64_simd_expand_args (rtx target, int icode, int have_retval, aarch64_simd_lane_bounds (op[opc], 0, GET_MODE_NUNITS (vmode), exp); /* Keep to GCC-vector-extension lane indices in the RTL. */ - op[opc] = GEN_INT (ENDIAN_LANE_N (vmode, INTVAL (op[opc]))); + op[opc] = aarch64_endian_lane_rtx (vmode, INTVAL (op[opc])); } /* Fall through - if the lane index isn't a constant then the next case will error. */ diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h index 345bfe8f3ae..4df2ee00f8f 100644 --- a/gcc/config/aarch64/aarch64-protos.h +++ b/gcc/config/aarch64/aarch64-protos.h @@ -425,6 +425,7 @@ void aarch64_simd_emit_reg_reg_move (rtx *, machine_mode, unsigned int); rtx aarch64_simd_expand_builtin (int, tree, rtx); void aarch64_simd_lane_bounds (rtx, HOST_WIDE_INT, HOST_WIDE_INT, const_tree); +rtx aarch64_endian_lane_rtx (machine_mode, unsigned int); void aarch64_split_128bit_move (rtx, rtx); diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md index a3600b3d647..445503d8449 100644 --- a/gcc/config/aarch64/aarch64-simd.md +++ b/gcc/config/aarch64/aarch64-simd.md @@ -80,7 +80,7 @@ )))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "dup\\t%0., %1.[%2]"; } [(set_attr "type" "neon_dup")] @@ -95,8 +95,7 @@ )))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, - INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "dup\\t%0., %1.[%2]"; } [(set_attr "type" "neon_dup")] @@ -502,7 +501,7 @@ (match_operand:VMUL 3 "register_operand" "w")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "mul\\t%0., %3., %1.[%2]"; } [(set_attr "type" "neon_mul__scalar")] @@ -518,8 +517,7 @@ (match_operand:VMUL_CHANGE_NLANES 3 "register_operand" "w")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, - INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "mul\\t%0., %3., %1.[%2]"; } [(set_attr "type" "neon_mul__scalar")] @@ -572,7 +570,7 @@ (match_operand:DF 3 "register_operand" "w")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (V2DFmode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (V2DFmode, INTVAL (operands[2])); return "fmul\\t%0.2d, %3.2d, %1.d[%2]"; } [(set_attr "type" "neon_fp_mul_d_scalar_q")] @@ -707,7 +705,7 @@ (match_operand:SI 2 "immediate_operand" "i,i,i")))] "TARGET_SIMD" { - int elt = ENDIAN_LANE_N (mode, exact_log2 (INTVAL (operands[2]))); + int elt = ENDIAN_LANE_N (, exact_log2 (INTVAL (operands[2]))); operands[2] = GEN_INT ((HOST_WIDE_INT) 1 << elt); switch (which_alternative) { @@ -1073,7 +1071,7 @@ (match_operand:SI 2 "immediate_operand" "i,i")))] "TARGET_SIMD" { - int elt = ENDIAN_LANE_N (V2DImode, exact_log2 (INTVAL (operands[2]))); + int elt = ENDIAN_LANE_N (2, exact_log2 (INTVAL (operands[2]))); operands[2] = GEN_INT ((HOST_WIDE_INT) 1 << elt); switch (which_alternative) { @@ -1110,7 +1108,7 @@ (match_operand:SI 2 "immediate_operand" "i")))] "TARGET_SIMD" { - int elt = ENDIAN_LANE_N (mode, exact_log2 (INTVAL (operands[2]))); + int elt = ENDIAN_LANE_N (, exact_log2 (INTVAL (operands[2]))); operands[2] = GEN_INT ((HOST_WIDE_INT)1 << elt); return "ins\t%0.[%p2], %1.[0]"; @@ -1155,7 +1153,7 @@ (match_operand:VDQHS 4 "register_operand" "0")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "mla\t%0., %3., %1.[%2]"; } [(set_attr "type" "neon_mla__scalar")] @@ -1173,8 +1171,7 @@ (match_operand:VDQHS 4 "register_operand" "0")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, - INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "mla\t%0., %3., %1.[%2]"; } [(set_attr "type" "neon_mla__scalar")] @@ -1214,7 +1211,7 @@ (match_operand:VDQHS 3 "register_operand" "w"))))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "mls\t%0., %3., %1.[%2]"; } [(set_attr "type" "neon_mla__scalar")] @@ -1232,8 +1229,7 @@ (match_operand:VDQHS 3 "register_operand" "w"))))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, - INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "mls\t%0., %3., %1.[%2]"; } [(set_attr "type" "neon_mla__scalar")] @@ -1803,7 +1799,7 @@ (match_operand:VDQF 4 "register_operand" "0")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "fmla\\t%0., %3., %1.[%2]"; } [(set_attr "type" "neon_fp_mla__scalar")] @@ -1820,8 +1816,7 @@ (match_operand:VDQSF 4 "register_operand" "0")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, - INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "fmla\\t%0., %3., %1.[%2]"; } [(set_attr "type" "neon_fp_mla__scalar")] @@ -1849,7 +1844,7 @@ (match_operand:DF 4 "register_operand" "0")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (V2DFmode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (V2DFmode, INTVAL (operands[2])); return "fmla\\t%0.2d, %3.2d, %1.2d[%2]"; } [(set_attr "type" "neon_fp_mla_d_scalar_q")] @@ -1879,7 +1874,7 @@ (match_operand:VDQF 4 "register_operand" "0")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "fmls\\t%0., %3., %1.[%2]"; } [(set_attr "type" "neon_fp_mla__scalar")] @@ -1897,8 +1892,7 @@ (match_operand:VDQSF 4 "register_operand" "0")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, - INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "fmls\\t%0., %3., %1.[%2]"; } [(set_attr "type" "neon_fp_mla__scalar")] @@ -1928,7 +1922,7 @@ (match_operand:DF 4 "register_operand" "0")))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (V2DFmode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (V2DFmode, INTVAL (operands[2])); return "fmls\\t%0.2d, %3.2d, %1.2d[%2]"; } [(set_attr "type" "neon_fp_mla_d_scalar_q")] @@ -2261,7 +2255,7 @@ UNSPEC_ADDV)] "TARGET_SIMD" { - rtx elt = GEN_INT (ENDIAN_LANE_N (mode, 0)); + rtx elt = aarch64_endian_lane_rtx (mode, 0); rtx scratch = gen_reg_rtx (mode); emit_insn (gen_aarch64_reduc_plus_internal (scratch, operands[1])); emit_insn (gen_aarch64_get_lane (operands[0], scratch, elt)); @@ -2312,7 +2306,7 @@ UNSPEC_FADDV))] "TARGET_SIMD" { - rtx elt = GEN_INT (ENDIAN_LANE_N (V4SFmode, 0)); + rtx elt = aarch64_endian_lane_rtx (V4SFmode, 0); rtx scratch = gen_reg_rtx (V4SFmode); emit_insn (gen_aarch64_faddpv4sf (scratch, operands[1], operands[1])); emit_insn (gen_aarch64_faddpv4sf (scratch, scratch, scratch)); @@ -2354,7 +2348,7 @@ FMAXMINV)] "TARGET_SIMD" { - rtx elt = GEN_INT (ENDIAN_LANE_N (mode, 0)); + rtx elt = aarch64_endian_lane_rtx (mode, 0); rtx scratch = gen_reg_rtx (mode); emit_insn (gen_aarch64_reduc__internal (scratch, operands[1])); @@ -2370,7 +2364,7 @@ MAXMINV)] "TARGET_SIMD" { - rtx elt = GEN_INT (ENDIAN_LANE_N (mode, 0)); + rtx elt = aarch64_endian_lane_rtx (mode, 0); rtx scratch = gen_reg_rtx (mode); emit_insn (gen_aarch64_reduc__internal (scratch, operands[1])); @@ -2895,7 +2889,7 @@ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "smov\\t%0, %1.[%2]"; } [(set_attr "type" "neon_to_gp")] @@ -2909,7 +2903,7 @@ (parallel [(match_operand:SI 2 "immediate_operand" "i")]))))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "umov\\t%w0, %1.[%2]"; } [(set_attr "type" "neon_to_gp")] @@ -2925,7 +2919,7 @@ (parallel [(match_operand:SI 2 "immediate_operand" "i, i, i")])))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); switch (which_alternative) { case 0: @@ -3301,8 +3295,7 @@ UNSPEC_FMULX))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, - INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "fmulx\t%0, %1, %2.[%3]"; } [(set_attr "type" "neon_fp_mul__scalar")] @@ -3321,7 +3314,7 @@ UNSPEC_FMULX))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "fmulx\t%0, %1, %2.[%3]"; } [(set_attr "type" "neon_fp_mul_")] @@ -3355,7 +3348,7 @@ UNSPEC_FMULX))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "fmulx\t%0, %1, %2.[%3]"; } [(set_attr "type" "fmul")] @@ -3441,7 +3434,7 @@ VQDMULH))] "TARGET_SIMD" "* - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return \"sqdmulh\\t%0., %1., %2.[%3]\";" [(set_attr "type" "neon_sat_mul__scalar")] ) @@ -3456,7 +3449,7 @@ VQDMULH))] "TARGET_SIMD" "* - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return \"sqdmulh\\t%0., %1., %2.[%3]\";" [(set_attr "type" "neon_sat_mul__scalar")] ) @@ -3471,7 +3464,7 @@ VQDMULH))] "TARGET_SIMD" "* - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return \"sqdmulh\\t%0, %1, %2.[%3]\";" [(set_attr "type" "neon_sat_mul__scalar")] ) @@ -3486,7 +3479,7 @@ VQDMULH))] "TARGET_SIMD" "* - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return \"sqdmulh\\t%0, %1, %2.[%3]\";" [(set_attr "type" "neon_sat_mul__scalar")] ) @@ -3518,7 +3511,7 @@ SQRDMLH_AS))] "TARGET_SIMD_RDMA" { - operands[4] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[4]))); + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); return "sqrdmlh\\t%0., %2., %3.[%4]"; } @@ -3536,7 +3529,7 @@ SQRDMLH_AS))] "TARGET_SIMD_RDMA" { - operands[4] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[4]))); + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); return "sqrdmlh\\t%0, %2, %3.[%4]"; } @@ -3556,7 +3549,7 @@ SQRDMLH_AS))] "TARGET_SIMD_RDMA" { - operands[4] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[4]))); + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); return "sqrdmlh\\t%0., %2., %3.[%4]"; } @@ -3574,7 +3567,7 @@ SQRDMLH_AS))] "TARGET_SIMD_RDMA" { - operands[4] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[4]))); + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); return "sqrdmlh\\t%0, %2, %3.[%4]"; } @@ -3618,7 +3611,7 @@ (const_int 1))))] "TARGET_SIMD" { - operands[4] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[4]))); + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); return "sqdmll\\t%0, %2, %3.[%4]"; } @@ -3642,7 +3635,7 @@ (const_int 1))))] "TARGET_SIMD" { - operands[4] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[4]))); + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); return "sqdmll\\t%0, %2, %3.[%4]"; } @@ -3665,7 +3658,7 @@ (const_int 1))))] "TARGET_SIMD" { - operands[4] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[4]))); + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); return "sqdmll\\t%0, %2, %3.[%4]"; } @@ -3688,7 +3681,7 @@ (const_int 1))))] "TARGET_SIMD" { - operands[4] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[4]))); + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); return "sqdmll\\t%0, %2, %3.[%4]"; } @@ -3783,7 +3776,7 @@ (const_int 1))))] "TARGET_SIMD" { - operands[4] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[4]))); + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); return "sqdmll2\\t%0, %2, %3.[%4]"; } @@ -3809,7 +3802,7 @@ (const_int 1))))] "TARGET_SIMD" { - operands[4] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[4]))); + operands[4] = aarch64_endian_lane_rtx (mode, INTVAL (operands[4])); return "sqdmll2\\t%0, %2, %3.[%4]"; } @@ -3956,7 +3949,7 @@ (const_int 1)))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "sqdmull\\t%0, %1, %2.[%3]"; } [(set_attr "type" "neon_sat_mul__scalar_long")] @@ -3977,7 +3970,7 @@ (const_int 1)))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "sqdmull\\t%0, %1, %2.[%3]"; } [(set_attr "type" "neon_sat_mul__scalar_long")] @@ -3997,7 +3990,7 @@ (const_int 1)))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "sqdmull\\t%0, %1, %2.[%3]"; } [(set_attr "type" "neon_sat_mul__scalar_long")] @@ -4017,7 +4010,7 @@ (const_int 1)))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "sqdmull\\t%0, %1, %2.[%3]"; } [(set_attr "type" "neon_sat_mul__scalar_long")] @@ -4095,7 +4088,7 @@ (const_int 1)))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "sqdmull2\\t%0, %1, %2.[%3]"; } [(set_attr "type" "neon_sat_mul__scalar_long")] @@ -4118,7 +4111,7 @@ (const_int 1)))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "sqdmull2\\t%0, %1, %2.[%3]"; } [(set_attr "type" "neon_sat_mul__scalar_long")] @@ -4624,7 +4617,7 @@ UNSPEC_LD2_LANE))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "ld2\\t{%S0. - %T0.}[%3], %1"; } [(set_attr "type" "neon_load2_one_lane")] @@ -4668,7 +4661,7 @@ UNSPEC_ST2_LANE))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "st2\\t{%S1. - %T1.}[%2], %0"; } [(set_attr "type" "neon_store2_one_lane")] @@ -4722,7 +4715,7 @@ UNSPEC_LD3_LANE))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "ld3\\t{%S0. - %U0.}[%3], %1"; } [(set_attr "type" "neon_load3_one_lane")] @@ -4766,7 +4759,7 @@ UNSPEC_ST3_LANE))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "st3\\t{%S1. - %U1.}[%2], %0"; } [(set_attr "type" "neon_store3_one_lane")] @@ -4820,7 +4813,7 @@ UNSPEC_LD4_LANE))] "TARGET_SIMD" { - operands[3] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[3]))); + operands[3] = aarch64_endian_lane_rtx (mode, INTVAL (operands[3])); return "ld4\\t{%S0. - %V0.}[%3], %1"; } [(set_attr "type" "neon_load4_one_lane")] @@ -4864,7 +4857,7 @@ UNSPEC_ST4_LANE))] "TARGET_SIMD" { - operands[2] = GEN_INT (ENDIAN_LANE_N (mode, INTVAL (operands[2]))); + operands[2] = aarch64_endian_lane_rtx (mode, INTVAL (operands[2])); return "st4\\t{%S1. - %V1.}[%2], %0"; } [(set_attr "type" "neon_store4_one_lane")] diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index 71c3693ad7d..ffcca32cd3c 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -11833,6 +11833,15 @@ aarch64_simd_lane_bounds (rtx operand, HOST_WIDE_INT low, HOST_WIDE_INT high, } } +/* Peform endian correction on lane number N, which indexes a vector + of mode MODE, and return the result as an SImode rtx. */ + +rtx +aarch64_endian_lane_rtx (machine_mode mode, unsigned int n) +{ + return gen_int_mode (ENDIAN_LANE_N (GET_MODE_NUNITS (mode), n), SImode); +} + /* Return TRUE if OP is a valid vector addressing mode. */ bool aarch64_simd_mem_operand_p (rtx op) diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h index 5e2e2fccff3..93d29b84d47 100644 --- a/gcc/config/aarch64/aarch64.h +++ b/gcc/config/aarch64/aarch64.h @@ -913,8 +913,8 @@ extern enum aarch64_code_model aarch64_cmodel; || (MODE) == V4SFmode || (MODE) == V8HFmode || (MODE) == V2DImode \ || (MODE) == V2DFmode) -#define ENDIAN_LANE_N(mode, n) \ - (BYTES_BIG_ENDIAN ? GET_MODE_NUNITS (mode) - 1 - n : n) +#define ENDIAN_LANE_N(NUNITS, N) \ + (BYTES_BIG_ENDIAN ? NUNITS - 1 - N : N) /* Support for a configure-time default CPU, etc. We currently support --with-arch and --with-cpu. Both are ignored if either is specified diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md index 46aa6fde6b6..5d7b0f3540f 100644 --- a/gcc/config/aarch64/iterators.md +++ b/gcc/config/aarch64/iterators.md @@ -450,6 +450,17 @@ (define_mode_attr rtn [(DI "d") (SI "")]) (define_mode_attr vas [(DI "") (SI ".2s")]) +;; Map a vector to the number of units in it, if the size of the mode +;; is constant. +(define_mode_attr nunits [(V8QI "8") (V16QI "16") + (V4HI "4") (V8HI "8") + (V2SI "2") (V4SI "4") + (V2DI "2") + (V4HF "4") (V8HF "8") + (V2SF "2") (V4SF "4") + (V1DF "1") (V2DF "2") + (DI "1") (DF "1")]) + ;; Map a floating point or integer mode to the appropriate register name prefix (define_mode_attr s [(HF "h") (SF "s") (DF "d") (SI "s") (DI "d")])