[(set_attr "type" "fga")
(set_attr "fptype" "double")])
+;; The rtl expanders will happily convert constant permutations on other
+;; modes down to V8QI. Rely on this to avoid the complexity of the byte
+;; order of the permutation.
+(define_expand "vec_perm_constv8qi"
+ [(match_operand:V8QI 0 "register_operand" "")
+ (match_operand:V8QI 1 "register_operand" "")
+ (match_operand:V8QI 2 "register_operand" "")
+ (match_operand:V8QI 3 "" "")]
+ "TARGET_VIS2"
+{
+ unsigned int i, mask;
+ rtx sel = operands[3];
+
+ for (i = mask = 0; i < 8; ++i)
+ mask |= (INTVAL (XVECEXP (sel, 0, i)) & 0xf) << (28 - i*4);
+ sel = force_reg (SImode, gen_int_mode (mask, SImode));
+
+ emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
+ emit_insn (gen_bshufflev8qi_vis (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+;; Unlike constant permutation, we can vastly simplify the compression of
+;; the 64-bit selector input to the 32-bit %gsr value by knowing what the
+;; width of the input is.
+(define_expand "vec_perm<mode>"
+ [(match_operand:VM64 0 "register_operand" "")
+ (match_operand:VM64 1 "register_operand" "")
+ (match_operand:VM64 2 "register_operand" "")
+ (match_operand:VM64 3 "register_operand" "")]
+ "TARGET_VIS2"
+{
+ sparc_expand_vec_perm_bmask (<MODE>mode, operands[3]);
+ emit_insn (gen_bshuffle<mode>_vis (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
;; VIS 2.0 adds edge variants which do not set the condition codes
(define_insn "edge8n<P:mode>_vis"
[(set (match_operand:P 0 "register_operand" "=r")