+2017-08-17 Bill Schmidt <wschmidt@linux.vnet.ibm.com>
+
+ * config/rs6000/altivec.md (UNSPEC_VMRGOW_DIRECT): New constant.
+ (p8_vmrgew_v4sf_direct): Generalize to p8_vmrgew_<mode>_direct.
+ (p8_vmrgow_<mode>_direct): New define_insn.
+ * config/rs6000/rs6000.c (altivec_expand_vec_perm_const): Properly
+ handle endianness for vmrgew and vmrgow permute patterns.
+
2017-08-17 Peter Bergner <bergner@vnet.ibm.com>
* config/rs6000/altivec.md (VParity): Remove TARGET_VSX_TIMODE.
UNSPEC_VMRGL_DIRECT
UNSPEC_VSPLT_DIRECT
UNSPEC_VMRGEW_DIRECT
+ UNSPEC_VMRGOW_DIRECT
UNSPEC_VSUMSWS_DIRECT
UNSPEC_VADDCUQ
UNSPEC_VADDEUQM
}
[(set_attr "type" "vecperm")])
-(define_insn "p8_vmrgew_v4sf_direct"
- [(set (match_operand:V4SF 0 "register_operand" "=v")
- (unspec:V4SF [(match_operand:V4SF 1 "register_operand" "v")
- (match_operand:V4SF 2 "register_operand" "v")]
+(define_insn "p8_vmrgew_<mode>_direct"
+ [(set (match_operand:VSX_W 0 "register_operand" "=v")
+ (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
+ (match_operand:VSX_W 2 "register_operand" "v")]
UNSPEC_VMRGEW_DIRECT))]
"TARGET_P8_VECTOR"
"vmrgew %0,%1,%2"
[(set_attr "type" "vecperm")])
+(define_insn "p8_vmrgow_<mode>_direct"
+ [(set (match_operand:VSX_W 0 "register_operand" "=v")
+ (unspec:VSX_W [(match_operand:VSX_W 1 "register_operand" "v")
+ (match_operand:VSX_W 2 "register_operand" "v")]
+ UNSPEC_VMRGOW_DIRECT))]
+ "TARGET_P8_VECTOR"
+ "vmrgow %0,%1,%2"
+ [(set_attr "type" "vecperm")])
+
(define_expand "vec_widen_umult_even_v16qi"
[(use (match_operand:V8HI 0 "register_operand" ""))
(use (match_operand:V16QI 1 "register_operand" ""))
(BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
: CODE_FOR_altivec_vmrghw_direct),
{ 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
- { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew_v4si,
+ { OPTION_MASK_P8_VECTOR,
+ (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgew_v4sf_direct
+ : CODE_FOR_p8_vmrgow_v4sf_direct),
{ 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
- { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
+ { OPTION_MASK_P8_VECTOR,
+ (BYTES_BIG_ENDIAN ? CODE_FOR_p8_vmrgow_v4sf_direct
+ : CODE_FOR_p8_vmrgew_v4sf_direct),
{ 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
};