+2016-08-01 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ * config/rs6000/rs6000-c.c (altivec_resolve_overloaded_builtin):
+ Add support for vec_extract on vector float, vector int, vector
+ short, and vector char vector types.
+ * config/rs6000/rs6000.c (rs6000_expand_vector_extract): Add
+ vector float, vector int, vector short, and vector char
+ optimizations on 64-bit ISA 2.07 systems for both constant and
+ variable element numbers.
+ (rs6000_split_vec_extract_var): Likewise.
+ * config/rs6000/vsx.md (vsx_xscvspdp_scalar2): Allow SFmode to be
+ Altivec registers on ISA 2.07 and above.
+ (vsx_extract_v4sf): Delete alternative that hard coded element 0,
+ which never was matched due to the split occuring before register
+ allocation (and the code would not have worked on little endian
+ systems if it did match). Allow extracts to go to the Altivec
+ registers if ISA 2.07 (power8). Change from using "" around the
+ C++ code to using {}'s.
+ (vsx_extract_v4sf_<mode>_load): New insn to optimize vector float
+ vec_extracts when the vector is in memory.
+ (vsx_extract_v4sf_var): New insn to optimize vector float
+ vec_extracts when the element number is variable on 64-bit ISA
+ 2.07 systems.
+ (vsx_extract_<mode>, VSX_EXTRACT_I iterator): Add optimizations
+ for 64-bit ISA 2.07 as well as ISA 3.0.
+ (vsx_extract_<mode>_p9, VSX_EXTRACT_I iterator): Likewise.
+ (vsx_extract_<mode>_p8, VSX_EXTRACT_I iterator): Likewise.
+ (vsx_extract_<mode>_load, VSX_EXTRACT_I iterator): New insn to
+ optimize vector int, vector short, and vector char vec_extracts
+ when the vector is in memory.
+ (vsx_extract_<mode>_var, VSX_EXTRACT_I iterator): New insn to
+ optimize vector int, vector short, and vector char vec_extracts
+ when the element number is variable.
+
2016-08-01 Georg-Johann Lay <avr@gjlay.de>
PR target/71948
case V2DImode:
call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
break;
+
+ case V4SFmode:
+ call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
+ break;
+
+ case V4SImode:
+ if (TARGET_DIRECT_MOVE_64BIT)
+ call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
+ break;
+
+ case V8HImode:
+ if (TARGET_DIRECT_MOVE_64BIT)
+ call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
+ break;
+
+ case V16QImode:
+ if (TARGET_DIRECT_MOVE_64BIT)
+ call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
+ break;
}
}
case V2DImode:
call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
break;
+
+ case V4SFmode:
+ call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
+ break;
+
+ case V4SImode:
+ call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
+ break;
+
+ case V8HImode:
+ call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
+ break;
+
+ case V16QImode:
+ call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
+ break;
}
}
emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
return;
case V16QImode:
- if (TARGET_VEXTRACTUB)
+ if (TARGET_DIRECT_MOVE_64BIT)
{
emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
return;
else
break;
case V8HImode:
- if (TARGET_VEXTRACTUB)
+ if (TARGET_DIRECT_MOVE_64BIT)
{
emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
return;
else
break;
case V4SImode:
- if (TARGET_VEXTRACTUB)
+ if (TARGET_DIRECT_MOVE_64BIT)
{
emit_insn (gen_vsx_extract_v4si (target, vec, elt));
return;
emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
return;
+ case V4SFmode:
+ if (TARGET_UPPER_REGS_SF)
+ {
+ emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
+ return;
+ }
+ break;
+
+ case V4SImode:
+ emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
+ return;
+
+ case V8HImode:
+ emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
+ return;
+
+ case V16QImode:
+ emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
+ return;
+
default:
gcc_unreachable ();
}
emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
return;
+ case V4SFmode:
+ {
+ rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
+ rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
+ rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
+ emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
+ tmp_altivec));
+
+ emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
+ return;
+ }
+
+ case V4SImode:
+ case V8HImode:
+ case V16QImode:
+ {
+ rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
+ rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
+ rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
+ emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
+ tmp_altivec));
+ emit_move_insn (tmp_gpr_di, tmp_altivec_di);
+ emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
+ GEN_INT (64 - (8 * scalar_size))));
+ return;
+ }
+
default:
gcc_unreachable ();
}
;; Same as vsx_xscvspdp, but use SF as the type
(define_insn "vsx_xscvspdp_scalar2"
- [(set (match_operand:SF 0 "vsx_register_operand" "=f")
+ [(set (match_operand:SF 0 "vsx_register_operand" "=ww")
(unspec:SF [(match_operand:V4SF 1 "vsx_register_operand" "wa")]
UNSPEC_VSX_CVSPDP))]
"VECTOR_UNIT_VSX_P (V4SFmode)"
;; Extract a SF element from V4SF
(define_insn_and_split "vsx_extract_v4sf"
- [(set (match_operand:SF 0 "vsx_register_operand" "=f,f")
+ [(set (match_operand:SF 0 "vsx_register_operand" "=ww")
(vec_select:SF
- (match_operand:V4SF 1 "vsx_register_operand" "wa,wa")
- (parallel [(match_operand:QI 2 "u5bit_cint_operand" "O,i")])))
- (clobber (match_scratch:V4SF 3 "=X,0"))]
+ (match_operand:V4SF 1 "vsx_register_operand" "wa")
+ (parallel [(match_operand:QI 2 "u5bit_cint_operand" "n")])))
+ (clobber (match_scratch:V4SF 3 "=0"))]
"VECTOR_UNIT_VSX_P (V4SFmode)"
- "@
- xscvspdp %x0,%x1
- #"
- ""
+ "#"
+ "&& 1"
[(const_int 0)]
- "
{
rtx op0 = operands[0];
rtx op1 = operands[1];
}
emit_insn (gen_vsx_xscvspdp_scalar2 (op0, tmp));
DONE;
-}"
- [(set_attr "length" "4,8")
+}
+ [(set_attr "length" "8")
(set_attr "type" "fp")])
+(define_insn_and_split "*vsx_extract_v4sf_<mode>_load"
+ [(set (match_operand:SF 0 "register_operand" "=f,wv,wb,?r")
+ (vec_select:SF
+ (match_operand:V4SF 1 "memory_operand" "m,Z,m,m")
+ (parallel [(match_operand:QI 2 "const_0_to_3_operand" "n,n,n,n")])))
+ (clobber (match_scratch:P 3 "=&b,&b,&b,&b"))]
+ "VECTOR_MEM_VSX_P (V4SFmode)"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0) (match_dup 4))]
+{
+ operands[4] = rs6000_adjust_vec_address (operands[0], operands[1], operands[2],
+ operands[3], SFmode);
+}
+ [(set_attr "type" "fpload,fpload,fpload,load")
+ (set_attr "length" "8")])
+
+;; Variable V4SF extract
+(define_insn_and_split "vsx_extract_v4sf_var"
+ [(set (match_operand:SF 0 "gpc_reg_operand" "=ww,ww,?r")
+ (unspec:SF [(match_operand:V4SF 1 "input_operand" "v,m,m")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r,r")]
+ UNSPEC_VSX_EXTRACT))
+ (clobber (match_scratch:DI 3 "=r,&b,&b"))
+ (clobber (match_scratch:V2DI 4 "=&v,X,X"))]
+ "VECTOR_MEM_VSX_P (V4SFmode) && TARGET_DIRECT_MOVE_64BIT
+ && TARGET_UPPER_REGS_SF"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_vec_extract_var (operands[0], operands[1], operands[2],
+ operands[3], operands[4]);
+ DONE;
+})
+
;; Expand the builtin form of xxpermdi to canonical rtl.
(define_expand "vsx_xxpermdi_<mode>"
[(match_operand:VSX_L 0 "vsx_register_operand" "")
;; Extraction of a single element in a small integer vector. None of the small
;; types are currently allowed in a vector register, so we extract to a DImode
;; and either do a direct move or store.
-(define_insn_and_split "vsx_extract_<mode>"
+(define_expand "vsx_extract_<mode>"
+ [(parallel [(set (match_operand:<VS_scalar> 0 "nonimmediate_operand")
+ (vec_select:<VS_scalar>
+ (match_operand:VSX_EXTRACT_I 1 "gpc_reg_operand")
+ (parallel [(match_operand:QI 2 "const_int_operand")])))
+ (clobber (match_dup 3))])]
+ "VECTOR_MEM_VSX_P (<MODE>mode) && TARGET_DIRECT_MOVE_64BIT"
+{
+ operands[3] = gen_rtx_SCRATCH ((TARGET_VEXTRACTUB) ? DImode : <MODE>mode);
+})
+
+;; Under ISA 3.0, we can use the byte/half-word/word integer stores if we are
+;; extracting a vector element and storing it to memory, rather than using
+;; direct move to a GPR and a GPR store.
+(define_insn_and_split "*vsx_extract_<mode>_p9"
[(set (match_operand:<VS_scalar> 0 "nonimmediate_operand" "=r,Z")
(vec_select:<VS_scalar>
(match_operand:VSX_EXTRACT_I 1 "gpc_reg_operand" "<VSX_EX>,<VSX_EX>")
}
[(set_attr "type" "vecsimple")])
+(define_insn_and_split "*vsx_extract_<mode>_p8"
+ [(set (match_operand:<VS_scalar> 0 "nonimmediate_operand" "=r")
+ (vec_select:<VS_scalar>
+ (match_operand:VSX_EXTRACT_I 1 "gpc_reg_operand" "v")
+ (parallel [(match_operand:QI 2 "<VSX_EXTRACT_PREDICATE>" "n")])))
+ (clobber (match_scratch:VSX_EXTRACT_I 3 "=v"))]
+ "VECTOR_MEM_VSX_P (<MODE>mode) && TARGET_DIRECT_MOVE_64BIT"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rtx dest = operands[0];
+ rtx src = operands[1];
+ rtx element = operands[2];
+ rtx vec_tmp = operands[3];
+ int value;
+
+ if (!VECTOR_ELT_ORDER_BIG)
+ element = GEN_INT (GET_MODE_NUNITS (<MODE>mode) - 1 - INTVAL (element));
+
+ /* If the value is in the correct position, we can avoid doing the VSPLT<x>
+ instruction. */
+ value = INTVAL (element);
+ if (<MODE>mode == V16QImode)
+ {
+ if (value != 7)
+ emit_insn (gen_altivec_vspltb_direct (vec_tmp, src, element));
+ else
+ vec_tmp = src;
+ }
+ else if (<MODE>mode == V8HImode)
+ {
+ if (value != 3)
+ emit_insn (gen_altivec_vsplth_direct (vec_tmp, src, element));
+ else
+ vec_tmp = src;
+ }
+ else if (<MODE>mode == V4SImode)
+ {
+ if (value != 1)
+ emit_insn (gen_altivec_vspltw_direct (vec_tmp, src, element));
+ else
+ vec_tmp = src;
+ }
+ else
+ gcc_unreachable ();
+
+ emit_move_insn (gen_rtx_REG (DImode, REGNO (dest)),
+ gen_rtx_REG (DImode, REGNO (vec_tmp)));
+ DONE;
+}
+ [(set_attr "type" "mftgpr")])
+
+;; Optimize extracting a single scalar element from memory.
+(define_insn_and_split "*vsx_extract_<mode>_load"
+ [(set (match_operand:<VS_scalar> 0 "register_operand" "=r")
+ (vec_select:<VS_scalar>
+ (match_operand:VSX_EXTRACT_I 1 "memory_operand" "m")
+ (parallel [(match_operand:QI 2 "<VSX_EXTRACT_PREDICATE>" "n")])))
+ (clobber (match_scratch:DI 3 "=&b"))]
+ "VECTOR_MEM_VSX_P (<MODE>mode) && TARGET_DIRECT_MOVE_64BIT"
+ "#"
+ "&& reload_completed"
+ [(set (match_dup 0) (match_dup 4))]
+{
+ operands[4] = rs6000_adjust_vec_address (operands[0], operands[1], operands[2],
+ operands[3], <VS_scalar>mode);
+}
+ [(set_attr "type" "load")
+ (set_attr "length" "8")])
+
+;; Variable V16QI/V8HI/V4SI extract
+(define_insn_and_split "vsx_extract_<mode>_var"
+ [(set (match_operand:<VS_scalar> 0 "gpc_reg_operand" "=r,r")
+ (unspec:<VS_scalar>
+ [(match_operand:VSX_EXTRACT_I 1 "input_operand" "v,m")
+ (match_operand:DI 2 "gpc_reg_operand" "r,r")]
+ UNSPEC_VSX_EXTRACT))
+ (clobber (match_scratch:DI 3 "=r,&b"))
+ (clobber (match_scratch:V2DI 4 "=&v,X"))]
+ "VECTOR_MEM_VSX_P (<MODE>mode) && TARGET_DIRECT_MOVE_64BIT"
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+{
+ rs6000_split_vec_extract_var (operands[0], operands[1], operands[2],
+ operands[3], operands[4]);
+ DONE;
+})
;; Expanders for builtins
(define_expand "vsx_mergel_<mode>"
+2016-08-01 Michael Meissner <meissner@linux.vnet.ibm.com>
+
+ * gcc.target/powerpc/vec-extract-5.c: New tests to test
+ vec_extract for vector float, vector int, vector short, and vector
+ char.
+ * gcc.target/powerpc/vec-extract-6.c: Likewise.
+ * gcc.target/powerpc/vec-extract-7.c: Likewise.
+ * gcc.target/powerpc/vec-extract-8.c: Likewise.
+ * gcc.target/powerpc/vec-extract-9.c: Likewise.
+
2016-08-01 Wilco Dijkstra <wdijkstr@arm.com>
* gcc.target/aarch64/test_frame_10.c: Fix test to check for a
- single stack adjustment, no writeback.
+ single stack adjustment, no writeback.
* gcc.target/aarch64/test_frame_12.c: Likewise.
* gcc.target/aarch64/test_frame_13.c: Likewise.
* gcc.target/aarch64/test_frame_15.c: Likewise.
--- /dev/null
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+/* { dg-options "-O2 -mcpu=power8" } */
+
+#include <altivec.h>
+
+signed char
+add_signed_char_0 (vector signed char *p)
+{
+ return vec_extract (*p, 0) + 1;
+}
+
+signed char
+add_signed_char_1 (vector signed char *p)
+{
+ return vec_extract (*p, 1) + 1;
+}
+
+signed char
+add_signed_char_2 (vector signed char *p)
+{
+ return vec_extract (*p, 2) + 1;
+}
+
+signed char
+add_signed_char_3 (vector signed char *p)
+{
+ return vec_extract (*p, 3) + 1;
+}
+
+signed char
+add_signed_char_4 (vector signed char *p)
+{
+ return vec_extract (*p, 4) + 1;
+}
+
+signed char
+add_signed_char_5 (vector signed char *p)
+{
+ return vec_extract (*p, 5) + 1;
+}
+
+signed char
+add_signed_char_6 (vector signed char *p)
+{
+ return vec_extract (*p, 6) + 1;
+}
+
+signed char
+add_signed_char_7 (vector signed char *p)
+{
+ return vec_extract (*p, 7) + 1;
+}
+
+signed char
+add_signed_char_n (vector signed char *p, int n)
+{
+ return vec_extract (*p, n) + 1;
+}
+
+/* { dg-final { scan-assembler-not "lxvd2x" } } */
+/* { dg-final { scan-assembler-not "lxvw4x" } } */
+/* { dg-final { scan-assembler-not "lxvx" } } */
+/* { dg-final { scan-assembler-not "lxv" } } */
+/* { dg-final { scan-assembler-not "lvx" } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */
--- /dev/null
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+/* { dg-options "-O2 -mcpu=power8" } */
+
+#include <altivec.h>
+
+unsigned char
+add_unsigned_char_0 (vector unsigned char *p)
+{
+ return vec_extract (*p, 0) + 1;
+}
+
+unsigned char
+add_unsigned_char_1 (vector unsigned char *p)
+{
+ return vec_extract (*p, 1) + 1;
+}
+
+unsigned char
+add_unsigned_char_2 (vector unsigned char *p)
+{
+ return vec_extract (*p, 2) + 1;
+}
+
+unsigned char
+add_unsigned_char_3 (vector unsigned char *p)
+{
+ return vec_extract (*p, 3) + 1;
+}
+
+unsigned char
+add_unsigned_char_4 (vector unsigned char *p)
+{
+ return vec_extract (*p, 4) + 1;
+}
+
+unsigned char
+add_unsigned_char_5 (vector unsigned char *p)
+{
+ return vec_extract (*p, 5) + 1;
+}
+
+unsigned char
+add_unsigned_char_6 (vector unsigned char *p)
+{
+ return vec_extract (*p, 6) + 1;
+}
+
+unsigned char
+add_unsigned_char_7 (vector unsigned char *p)
+{
+ return vec_extract (*p, 7) + 1;
+}
+
+unsigned char
+add_unsigned_char_n (vector unsigned char *p, int n)
+{
+ return vec_extract (*p, n) + 1;
+}
+
+/* { dg-final { scan-assembler-not "lxvd2x" } } */
+/* { dg-final { scan-assembler-not "lxvw4x" } } */
+/* { dg-final { scan-assembler-not "lxvx" } } */
+/* { dg-final { scan-assembler-not "lxv" } } */
+/* { dg-final { scan-assembler-not "lvx" } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */
--- /dev/null
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+/* { dg-options "-O2 -mcpu=power8" } */
+
+#include <altivec.h>
+
+float
+add_float_0 (vector float *p)
+{
+ return vec_extract (*p, 0) + 1.0f;
+}
+
+float
+add_float_1 (vector float *p)
+{
+ return vec_extract (*p, 1) + 1.0f;
+}
+
+float
+add_float_2 (vector float *p)
+{
+ return vec_extract (*p, 2) + 1.0f;
+}
+
+float
+add_float_3 (vector float *p)
+{
+ return vec_extract (*p, 3) + 1.0f;
+}
+
+float
+add_float_n (vector float *p, long n)
+{
+ return vec_extract (*p, n) + 1.0f;
+}
+
+/* { dg-final { scan-assembler-not "lxvd2x" } } */
+/* { dg-final { scan-assembler-not "lxvw4x" } } */
+/* { dg-final { scan-assembler-not "lxvx" } } */
+/* { dg-final { scan-assembler-not "lxv" } } */
+/* { dg-final { scan-assembler-not "lvx" } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */
--- /dev/null
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+/* { dg-options "-O2 -mcpu=power8" } */
+
+#include <altivec.h>
+
+int
+add_int_0 (vector int *p)
+{
+ return vec_extract (*p, 0) + 1;
+}
+
+int
+add_int_1 (vector int *p)
+{
+ return vec_extract (*p, 1) + 1;
+}
+
+int
+add_int_2 (vector int *p)
+{
+ return vec_extract (*p, 2) + 1;
+}
+
+int
+add_int_3 (vector int *p)
+{
+ return vec_extract (*p, 3) + 1;
+}
+
+int
+add_int_n (vector int *p, int n)
+{
+ return vec_extract (*p, n) + 1;
+}
+
+/* { dg-final { scan-assembler-not "lxvd2x" } } */
+/* { dg-final { scan-assembler-not "lxvw4x" } } */
+/* { dg-final { scan-assembler-not "lxvx" } } */
+/* { dg-final { scan-assembler-not "lxv" } } */
+/* { dg-final { scan-assembler-not "lvx" } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */
--- /dev/null
+/* { dg-do compile { target { powerpc*-*-* && lp64 } } } */
+/* { dg-skip-if "" { powerpc*-*-darwin* } { "*" } { "" } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+/* { dg-options "-O2 -mcpu=power8" } */
+
+#include <altivec.h>
+
+short
+add_short_0 (vector short *p)
+{
+ return vec_extract (*p, 0) + 1;
+}
+
+short
+add_short_1 (vector short *p)
+{
+ return vec_extract (*p, 1) + 1;
+}
+
+short
+add_short_2 (vector short *p)
+{
+ return vec_extract (*p, 2) + 1;
+}
+
+short
+add_short_3 (vector short *p)
+{
+ return vec_extract (*p, 3) + 1;
+}
+
+short
+add_short_4 (vector short *p)
+{
+ return vec_extract (*p, 4) + 1;
+}
+
+short
+add_short_5 (vector short *p)
+{
+ return vec_extract (*p, 5) + 1;
+}
+
+short
+add_short_6 (vector short *p)
+{
+ return vec_extract (*p, 6) + 1;
+}
+
+short
+add_short_7 (vector short *p)
+{
+ return vec_extract (*p, 7) + 1;
+}
+
+short
+add_short_n (vector short *p, int n)
+{
+ return vec_extract (*p, n) + 1;
+}
+
+/* { dg-final { scan-assembler-not "lxvd2x" } } */
+/* { dg-final { scan-assembler-not "lxvw4x" } } */
+/* { dg-final { scan-assembler-not "lxvx" } } */
+/* { dg-final { scan-assembler-not "lxv" } } */
+/* { dg-final { scan-assembler-not "lvx" } } */
+/* { dg-final { scan-assembler-not "xxpermdi" } } */