UNSPEC_VSUM2SWS
UNSPEC_VSUMSWS
UNSPEC_VPERM
+ UNSPEC_VPERMR
UNSPEC_VPERM_UNS
UNSPEC_VRFIN
UNSPEC_VCFUX
;; Slightly prefer vperm, since the target does not overlap the source
(define_insn "*altivec_vperm_<mode>_internal"
- [(set (match_operand:VM 0 "register_operand" "=v,?wo,?&wo")
- (unspec:VM [(match_operand:VM 1 "register_operand" "v,0,wo")
- (match_operand:VM 2 "register_operand" "v,wo,wo")
- (match_operand:V16QI 3 "register_operand" "v,wo,wo")]
+ [(set (match_operand:VM 0 "register_operand" "=v,?wo")
+ (unspec:VM [(match_operand:VM 1 "register_operand" "v,0")
+ (match_operand:VM 2 "register_operand" "v,wo")
+ (match_operand:V16QI 3 "register_operand" "v,wo")]
UNSPEC_VPERM))]
"TARGET_ALTIVEC"
"@
vperm %0,%1,%2,%3
- xxperm %x0,%x2,%x3
- xxlor %x0,%x1,%x1\t\t# xxperm fusion\;xxperm %x0,%x2,%x3"
+ xxperm %x0,%x2,%x3"
[(set_attr "type" "vecperm")
- (set_attr "length" "4,4,8")])
+ (set_attr "length" "4")])
(define_insn "altivec_vperm_v8hiv16qi"
- [(set (match_operand:V16QI 0 "register_operand" "=v,?wo,?&wo")
- (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v,0,wo")
- (match_operand:V8HI 2 "register_operand" "v,wo,wo")
- (match_operand:V16QI 3 "register_operand" "v,wo,wo")]
+ [(set (match_operand:V16QI 0 "register_operand" "=v,?wo")
+ (unspec:V16QI [(match_operand:V8HI 1 "register_operand" "v,0")
+ (match_operand:V8HI 2 "register_operand" "v,wo")
+ (match_operand:V16QI 3 "register_operand" "v,wo")]
UNSPEC_VPERM))]
"TARGET_ALTIVEC"
"@
vperm %0,%1,%2,%3
- xxperm %x0,%x2,%x3
- xxlor %x0,%x1,%x1\t\t# xxperm fusion\;xxperm %x0,%x2,%x3"
+ xxperm %x0,%x2,%x3"
[(set_attr "type" "vecperm")
- (set_attr "length" "4,4,8")])
+ (set_attr "length" "4")])
(define_expand "altivec_vperm_<mode>_uns"
[(set (match_operand:VM 0 "register_operand" "")
})
(define_insn "*altivec_vperm_<mode>_uns_internal"
- [(set (match_operand:VM 0 "register_operand" "=v,?wo,?&wo")
- (unspec:VM [(match_operand:VM 1 "register_operand" "v,0,wo")
- (match_operand:VM 2 "register_operand" "v,wo,wo")
- (match_operand:V16QI 3 "register_operand" "v,wo,wo")]
+ [(set (match_operand:VM 0 "register_operand" "=v,?wo")
+ (unspec:VM [(match_operand:VM 1 "register_operand" "v,0")
+ (match_operand:VM 2 "register_operand" "v,wo")
+ (match_operand:V16QI 3 "register_operand" "v,wo")]
UNSPEC_VPERM_UNS))]
"TARGET_ALTIVEC"
"@
vperm %0,%1,%2,%3
- xxperm %x0,%x2,%x3
- xxlor %x0,%x1,%x1\t\t# xxperm fusion\;xxperm %x0,%x2,%x3"
+ xxperm %x0,%x2,%x3"
[(set_attr "type" "vecperm")
- (set_attr "length" "4,4,8")])
+ (set_attr "length" "4")])
(define_expand "vec_permv16qi"
[(set (match_operand:V16QI 0 "register_operand" "")
FAIL;
})
+(define_insn "*altivec_vpermr_<mode>_internal"
+ [(set (match_operand:VM 0 "register_operand" "=v,?wo")
+ (unspec:VM [(match_operand:VM 1 "register_operand" "v,0")
+ (match_operand:VM 2 "register_operand" "v,wo")
+ (match_operand:V16QI 3 "register_operand" "v,wo")]
+ UNSPEC_VPERMR))]
+ "TARGET_P9_VECTOR"
+ "@
+ vpermr %0,%1,%2,%3
+ xxpermr %x0,%x2,%x3"
+ [(set_attr "type" "vecperm")
+ (set_attr "length" "4")])
+
(define_insn "altivec_vrfip" ; ceil
[(set (match_operand:V4SF 0 "register_operand" "=v")
(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")]
"")
(define_insn "vperm_v8hiv4si"
- [(set (match_operand:V4SI 0 "register_operand" "=v,?wo,?&wo")
- (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v,0,wo")
- (match_operand:V4SI 2 "register_operand" "v,wo,wo")
- (match_operand:V16QI 3 "register_operand" "v,wo,wo")]
+ [(set (match_operand:V4SI 0 "register_operand" "=v,?wo")
+ (unspec:V4SI [(match_operand:V8HI 1 "register_operand" "v,0")
+ (match_operand:V4SI 2 "register_operand" "v,wo")
+ (match_operand:V16QI 3 "register_operand" "v,wo")]
UNSPEC_VPERMSI))]
"TARGET_ALTIVEC"
"@
vperm %0,%1,%2,%3
- xxperm %x0,%x2,%x3
- xxlor %x0,%x1,%x1\t\t# xxperm fusion\;xxperm %x0,%x2,%x3"
+ xxperm %x0,%x2,%x3"
[(set_attr "type" "vecperm")
- (set_attr "length" "4,4,8")])
+ (set_attr "length" "4")])
(define_insn "vperm_v16qiv8hi"
- [(set (match_operand:V8HI 0 "register_operand" "=v,?wo,?&wo")
- (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v,0,wo")
- (match_operand:V8HI 2 "register_operand" "v,wo,wo")
- (match_operand:V16QI 3 "register_operand" "v,wo,wo")]
+ [(set (match_operand:V8HI 0 "register_operand" "=v,?wo")
+ (unspec:V8HI [(match_operand:V16QI 1 "register_operand" "v,0")
+ (match_operand:V8HI 2 "register_operand" "v,wo")
+ (match_operand:V16QI 3 "register_operand" "v,wo")]
UNSPEC_VPERMHI))]
"TARGET_ALTIVEC"
"@
vperm %0,%1,%2,%3
- xxperm %x0,%x2,%x3
- xxlor %x0,%x1,%x1\t\t# xxperm fusion\;xxperm %x0,%x2,%x3"
+ xxperm %x0,%x2,%x3"
[(set_attr "type" "vecperm")
- (set_attr "length" "4,4,8")])
+ (set_attr "length" "4")])
(define_expand "vec_unpacku_hi_v16qi"
gen_rtvec (3, target, reg,
force_reg (V16QImode, x)),
UNSPEC_VPERM);
- else
+ else
{
- /* Invert selector. We prefer to generate VNAND on P8 so
- that future fusion opportunities can kick in, but must
- generate VNOR elsewhere. */
- rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
- rtx iorx = (TARGET_P8_VECTOR
- ? gen_rtx_IOR (V16QImode, notx, notx)
- : gen_rtx_AND (V16QImode, notx, notx));
- rtx tmp = gen_reg_rtx (V16QImode);
- emit_insn (gen_rtx_SET (tmp, iorx));
+ if (TARGET_P9_VECTOR)
+ x = gen_rtx_UNSPEC (mode,
+ gen_rtvec (3, target, reg,
+ force_reg (V16QImode, x)),
+ UNSPEC_VPERMR);
+ else
+ {
+ /* Invert selector. We prefer to generate VNAND on P8 so
+ that future fusion opportunities can kick in, but must
+ generate VNOR elsewhere. */
+ rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
+ rtx iorx = (TARGET_P8_VECTOR
+ ? gen_rtx_IOR (V16QImode, notx, notx)
+ : gen_rtx_AND (V16QImode, notx, notx));
+ rtx tmp = gen_reg_rtx (V16QImode);
+ emit_insn (gen_rtx_SET (tmp, iorx));
- /* Permute with operands reversed and adjusted selector. */
- x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
- UNSPEC_VPERM);
+ /* Permute with operands reversed and adjusted selector. */
+ x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
+ UNSPEC_VPERM);
+ }
}
emit_insn (gen_rtx_SET (target, x));
if (!REG_P (target))
tmp = gen_reg_rtx (mode);
- /* Invert the selector with a VNAND if available, else a VNOR.
- The VNAND is preferred for future fusion opportunities. */
- notx = gen_rtx_NOT (V16QImode, sel);
- iorx = (TARGET_P8_VECTOR
- ? gen_rtx_IOR (V16QImode, notx, notx)
- : gen_rtx_AND (V16QImode, notx, notx));
- emit_insn (gen_rtx_SET (norreg, iorx));
+ if (TARGET_P9_VECTOR)
+ {
+ unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
+ UNSPEC_VPERMR);
+ }
+ else
+ {
+ /* Invert the selector with a VNAND if available, else a VNOR.
+ The VNAND is preferred for future fusion opportunities. */
+ notx = gen_rtx_NOT (V16QImode, sel);
+ iorx = (TARGET_P8_VECTOR
+ ? gen_rtx_IOR (V16QImode, notx, notx)
+ : gen_rtx_AND (V16QImode, notx, notx));
+ emit_insn (gen_rtx_SET (norreg, iorx));
- /* Permute with operands reversed and adjusted selector. */
- unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
- UNSPEC_VPERM);
+ /* Permute with operands reversed and adjusted selector. */
+ unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
+ UNSPEC_VPERM);
+ }
/* Copy into target, possibly by way of a register. */
if (!REG_P (target))