+;; Edge instructions produce condition codes equivalent to a 'subcc'
+;; with the same operands.
+(define_insn "edge8<P:mode>_vis"
+ [(set (reg:CC_NOOV CC_REG)
+ (compare:CC_NOOV (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ"))
+ (const_int 0)))
+ (set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE8))]
+ "TARGET_VIS"
+ "edge8\t%r1, %r2, %0"
+ [(set_attr "type" "edge")])
+
+(define_insn "edge8l<P:mode>_vis"
+ [(set (reg:CC_NOOV CC_REG)
+ (compare:CC_NOOV (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ"))
+ (const_int 0)))
+ (set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE8L))]
+ "TARGET_VIS"
+ "edge8l\t%r1, %r2, %0"
+ [(set_attr "type" "edge")])
+
+(define_insn "edge16<P:mode>_vis"
+ [(set (reg:CC_NOOV CC_REG)
+ (compare:CC_NOOV (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ"))
+ (const_int 0)))
+ (set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE16))]
+ "TARGET_VIS"
+ "edge16\t%r1, %r2, %0"
+ [(set_attr "type" "edge")])
+
+(define_insn "edge16l<P:mode>_vis"
+ [(set (reg:CC_NOOV CC_REG)
+ (compare:CC_NOOV (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ"))
+ (const_int 0)))
+ (set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE16L))]
+ "TARGET_VIS"
+ "edge16l\t%r1, %r2, %0"
+ [(set_attr "type" "edge")])
+
+(define_insn "edge32<P:mode>_vis"
+ [(set (reg:CC_NOOV CC_REG)
+ (compare:CC_NOOV (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ"))
+ (const_int 0)))
+ (set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE32))]
+ "TARGET_VIS"
+ "edge32\t%r1, %r2, %0"
+ [(set_attr "type" "edge")])
+
+(define_insn "edge32l<P:mode>_vis"
+ [(set (reg:CC_NOOV CC_REG)
+ (compare:CC_NOOV (minus:P (match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ"))
+ (const_int 0)))
+ (set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_dup 1) (match_dup 2)] UNSPEC_EDGE32L))]
+ "TARGET_VIS"
+ "edge32l\t%r1, %r2, %0"
+ [(set_attr "type" "edge")])
+
+(define_code_iterator gcond [le ne gt eq])
+(define_mode_iterator GCM [V4HI V2SI])
+(define_mode_attr gcm_name [(V4HI "16") (V2SI "32")])
+
+(define_insn "fcmp<code><GCM:gcm_name><P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(gcond:GCM (match_operand:GCM 1 "register_operand" "e")
+ (match_operand:GCM 2 "register_operand" "e"))]
+ UNSPEC_FCMP))]
+ "TARGET_VIS"
+ "fcmp<code><GCM:gcm_name>\t%1, %2, %0"
+ [(set_attr "type" "fpmul")
+ (set_attr "fptype" "double")])
+
+(define_insn "array8<P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ")]
+ UNSPEC_ARRAY8))]
+ "TARGET_VIS"
+ "array8\t%r1, %r2, %0"
+ [(set_attr "type" "array")])
+
+(define_insn "array16<P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ")]
+ UNSPEC_ARRAY16))]
+ "TARGET_VIS"
+ "array16\t%r1, %r2, %0"
+ [(set_attr "type" "array")])
+
+(define_insn "array32<P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ")]
+ UNSPEC_ARRAY32))]
+ "TARGET_VIS"
+ "array32\t%r1, %r2, %0"
+ [(set_attr "type" "array")])
+
+(define_insn "bmaskdi_vis"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (plus:DI (match_operand:DI 1 "register_or_zero_operand" "rJ")
+ (match_operand:DI 2 "register_or_zero_operand" "rJ")))
+ (set (zero_extract:DI (reg:DI GSR_REG) (const_int 32) (const_int 32))
+ (plus:DI (match_dup 1) (match_dup 2)))]
+ "TARGET_VIS2"
+ "bmask\t%r1, %r2, %0"
+ [(set_attr "type" "array")])
+
+(define_insn "bmasksi_vis"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (plus:SI (match_operand:SI 1 "register_or_zero_operand" "rJ")
+ (match_operand:SI 2 "register_or_zero_operand" "rJ")))
+ (set (zero_extract:DI (reg:DI GSR_REG) (const_int 32) (const_int 32))
+ (zero_extend:DI (plus:SI (match_dup 1) (match_dup 2))))]
+ "TARGET_VIS2"
+ "bmask\t%r1, %r2, %0"
+ [(set_attr "type" "array")])
+
+(define_insn "bshuffle<VM64:mode>_vis"
+ [(set (match_operand:VM64 0 "register_operand" "=e")
+ (unspec:VM64 [(match_operand:VM64 1 "register_operand" "e")
+ (match_operand:VM64 2 "register_operand" "e")
+ (reg:DI GSR_REG)]
+ UNSPEC_BSHUFFLE))]
+ "TARGET_VIS2"
+ "bshuffle\t%1, %2, %0"
+ [(set_attr "type" "fga")
+ (set_attr "fptype" "double")])
+
+;; The rtl expanders will happily convert constant permutations on other
+;; modes down to V8QI. Rely on this to avoid the complexity of the byte
+;; order of the permutation.
+(define_expand "vec_perm_constv8qi"
+ [(match_operand:V8QI 0 "register_operand" "")
+ (match_operand:V8QI 1 "register_operand" "")
+ (match_operand:V8QI 2 "register_operand" "")
+ (match_operand:V8QI 3 "" "")]
+ "TARGET_VIS2"
+{
+ unsigned int i, mask;
+ rtx sel = operands[3];
+
+ for (i = mask = 0; i < 8; ++i)
+ mask |= (INTVAL (XVECEXP (sel, 0, i)) & 0xf) << (28 - i*4);
+ sel = force_reg (SImode, gen_int_mode (mask, SImode));
+
+ emit_insn (gen_bmasksi_vis (gen_reg_rtx (SImode), sel, const0_rtx));
+ emit_insn (gen_bshufflev8qi_vis (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+;; Unlike constant permutation, we can vastly simplify the compression of
+;; the 64-bit selector input to the 32-bit %gsr value by knowing what the
+;; width of the input is.
+(define_expand "vec_perm<mode>"
+ [(match_operand:VM64 0 "register_operand" "")
+ (match_operand:VM64 1 "register_operand" "")
+ (match_operand:VM64 2 "register_operand" "")
+ (match_operand:VM64 3 "register_operand" "")]
+ "TARGET_VIS2"
+{
+ sparc_expand_vec_perm_bmask (<MODE>mode, operands[3]);
+ emit_insn (gen_bshuffle<mode>_vis (operands[0], operands[1], operands[2]));
+ DONE;
+})
+
+;; VIS 2.0 adds edge variants which do not set the condition codes
+(define_insn "edge8n<P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ")]
+ UNSPEC_EDGE8N))]
+ "TARGET_VIS2"
+ "edge8n\t%r1, %r2, %0"
+ [(set_attr "type" "edgen")])
+
+(define_insn "edge8ln<P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ")]
+ UNSPEC_EDGE8LN))]
+ "TARGET_VIS2"
+ "edge8ln\t%r1, %r2, %0"
+ [(set_attr "type" "edgen")])
+
+(define_insn "edge16n<P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ")]
+ UNSPEC_EDGE16N))]
+ "TARGET_VIS2"
+ "edge16n\t%r1, %r2, %0"
+ [(set_attr "type" "edgen")])
+
+(define_insn "edge16ln<P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ")]
+ UNSPEC_EDGE16LN))]
+ "TARGET_VIS2"
+ "edge16ln\t%r1, %r2, %0"
+ [(set_attr "type" "edgen")])
+
+(define_insn "edge32n<P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ")]
+ UNSPEC_EDGE32N))]
+ "TARGET_VIS2"
+ "edge32n\t%r1, %r2, %0"
+ [(set_attr "type" "edgen")])
+
+(define_insn "edge32ln<P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:P 1 "register_or_zero_operand" "rJ")
+ (match_operand:P 2 "register_or_zero_operand" "rJ")]
+ UNSPEC_EDGE32LN))]
+ "TARGET_VIS2"
+ "edge32ln\t%r1, %r2, %0"
+ [(set_attr "type" "edge")])
+
+;; Conditional moves are possible via fcmpX --> cmaskX -> bshuffle
+(define_insn "cmask8<P:mode>_vis"
+ [(set (reg:DI GSR_REG)
+ (unspec:DI [(match_operand:P 0 "register_operand" "r")
+ (reg:DI GSR_REG)]
+ UNSPEC_CMASK8))]
+ "TARGET_VIS3"
+ "cmask8\t%r0")
+
+(define_insn "cmask16<P:mode>_vis"
+ [(set (reg:DI GSR_REG)
+ (unspec:DI [(match_operand:P 0 "register_operand" "r")
+ (reg:DI GSR_REG)]
+ UNSPEC_CMASK16))]
+ "TARGET_VIS3"
+ "cmask16\t%r0")
+
+(define_insn "cmask32<P:mode>_vis"
+ [(set (reg:DI GSR_REG)
+ (unspec:DI [(match_operand:P 0 "register_operand" "r")
+ (reg:DI GSR_REG)]
+ UNSPEC_CMASK32))]
+ "TARGET_VIS3"
+ "cmask32\t%r0")
+
+(define_insn "fchksm16_vis"
+ [(set (match_operand:V4HI 0 "register_operand" "=e")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "e")
+ (match_operand:V4HI 2 "register_operand" "e")]
+ UNSPEC_FCHKSM16))]
+ "TARGET_VIS3"
+ "fchksm16\t%1, %2, %0")
+
+(define_code_iterator vis3_shift [ashift ss_ashift lshiftrt ashiftrt])
+(define_code_attr vis3_shift_insn
+ [(ashift "fsll") (ss_ashift "fslas") (lshiftrt "fsrl") (ashiftrt "fsra")])
+(define_code_attr vis3_shift_patname
+ [(ashift "ashl") (ss_ashift "ssashl") (lshiftrt "lshr") (ashiftrt "ashr")])
+
+(define_insn "v<vis3_shift_patname><mode>3"
+ [(set (match_operand:GCM 0 "register_operand" "=<vconstr>")
+ (vis3_shift:GCM (match_operand:GCM 1 "register_operand" "<vconstr>")
+ (match_operand:GCM 2 "register_operand" "<vconstr>")))]
+ "TARGET_VIS3"
+ "<vis3_shift_insn><vbits>\t%1, %2, %0")
+
+(define_insn "pdistn<mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(match_operand:V8QI 1 "register_operand" "e")
+ (match_operand:V8QI 2 "register_operand" "e")]
+ UNSPEC_PDISTN))]
+ "TARGET_VIS3"
+ "pdistn\t%1, %2, %0")
+
+(define_insn "fmean16_vis"
+ [(set (match_operand:V4HI 0 "register_operand" "=e")
+ (truncate:V4HI
+ (lshiftrt:V4SI
+ (plus:V4SI
+ (plus:V4SI
+ (zero_extend:V4SI
+ (match_operand:V4HI 1 "register_operand" "e"))
+ (zero_extend:V4SI
+ (match_operand:V4HI 2 "register_operand" "e")))
+ (const_vector:V4SI [(const_int 1) (const_int 1)
+ (const_int 1) (const_int 1)]))
+ (const_int 1))))]
+ "TARGET_VIS3"
+ "fmean16\t%1, %2, %0")
+
+(define_insn "fp<plusminus_insn>64_vis"
+ [(set (match_operand:V1DI 0 "register_operand" "=e")
+ (plusminus:V1DI (match_operand:V1DI 1 "register_operand" "e")
+ (match_operand:V1DI 2 "register_operand" "e")))]
+ "TARGET_VIS3"
+ "fp<plusminus_insn>64\t%1, %2, %0")
+
+(define_mode_iterator VASS [V4HI V2SI V2HI V1SI])
+(define_code_iterator vis3_addsub_ss [ss_plus ss_minus])
+(define_code_attr vis3_addsub_ss_insn
+ [(ss_plus "fpadds") (ss_minus "fpsubs")])
+(define_code_attr vis3_addsub_ss_patname
+ [(ss_plus "ssadd") (ss_minus "sssub")])
+
+(define_insn "<vis3_addsub_ss_patname><mode>3"
+ [(set (match_operand:VASS 0 "register_operand" "=<vconstr>")
+ (vis3_addsub_ss:VASS (match_operand:VASS 1 "register_operand" "<vconstr>")
+ (match_operand:VASS 2 "register_operand" "<vconstr>")))]
+ "TARGET_VIS3"
+ "<vis3_addsub_ss_insn><vbits>\t%1, %2, %0")
+
+(define_insn "fucmp<code>8<P:mode>_vis"
+ [(set (match_operand:P 0 "register_operand" "=r")
+ (unspec:P [(gcond:V8QI (match_operand:V8QI 1 "register_operand" "e")
+ (match_operand:V8QI 2 "register_operand" "e"))]
+ UNSPEC_FUCMP))]
+ "TARGET_VIS3"
+ "fucmp<code>8\t%1, %2, %0")
+
+(define_insn "*naddsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (plus:SF (match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f"))))]
+ "TARGET_VIS3"
+ "fnadds\t%1, %2, %0"
+ [(set_attr "type" "fp")])
+
+(define_insn "*nadddf3"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (neg:DF (plus:DF (match_operand:DF 1 "register_operand" "e")
+ (match_operand:DF 2 "register_operand" "e"))))]
+ "TARGET_VIS3"
+ "fnaddd\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "fptype" "double")])
+
+(define_insn "*nmulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (mult:SF (neg:SF (match_operand:SF 1 "register_operand" "f"))
+ (match_operand:SF 2 "register_operand" "f")))]
+ "TARGET_VIS3"
+ "fnmuls\t%1, %2, %0"
+ [(set_attr "type" "fpmul")])
+
+(define_insn "*nmuldf3"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (mult:DF (neg:DF (match_operand:DF 1 "register_operand" "e"))
+ (match_operand:DF 2 "register_operand" "e")))]
+ "TARGET_VIS3"
+ "fnmuld\t%1, %2, %0"
+ [(set_attr "type" "fpmul")
+ (set_attr "fptype" "double")])
+
+(define_insn "*nmuldf3_extend"
+ [(set (match_operand:DF 0 "register_operand" "=e")
+ (mult:DF (neg:DF (float_extend:DF
+ (match_operand:SF 1 "register_operand" "f")))
+ (float_extend:DF
+ (match_operand:SF 2 "register_operand" "f"))))]
+ "TARGET_VIS3"
+ "fnsmuld\t%1, %2, %0"
+ [(set_attr "type" "fpmul")
+ (set_attr "fptype" "double")])
+
+(define_insn "fhaddsf_vis"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")]
+ UNSPEC_FHADD))]
+ "TARGET_VIS3"
+ "fhadds\t%1, %2, %0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fhadddf_vis"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "f")
+ (match_operand:DF 2 "register_operand" "f")]
+ UNSPEC_FHADD))]
+ "TARGET_VIS3"
+ "fhaddd\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "fptype" "double")])
+
+(define_insn "fhsubsf_vis"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")]
+ UNSPEC_FHSUB))]
+ "TARGET_VIS3"
+ "fhsubs\t%1, %2, %0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fhsubdf_vis"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "f")
+ (match_operand:DF 2 "register_operand" "f")]
+ UNSPEC_FHSUB))]
+ "TARGET_VIS3"
+ "fhsubd\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "fptype" "double")])
+
+(define_insn "fnhaddsf_vis"
+ [(set (match_operand:SF 0 "register_operand" "=f")
+ (neg:SF (unspec:SF [(match_operand:SF 1 "register_operand" "f")
+ (match_operand:SF 2 "register_operand" "f")]
+ UNSPEC_FHADD)))]
+ "TARGET_VIS3"
+ "fnhadds\t%1, %2, %0"
+ [(set_attr "type" "fp")])
+
+(define_insn "fnhadddf_vis"
+ [(set (match_operand:DF 0 "register_operand" "=f")
+ (neg:DF (unspec:DF [(match_operand:DF 1 "register_operand" "f")
+ (match_operand:DF 2 "register_operand" "f")]
+ UNSPEC_FHADD)))]
+ "TARGET_VIS3"
+ "fnhaddd\t%1, %2, %0"
+ [(set_attr "type" "fp")
+ (set_attr "fptype" "double")])
+
+(define_expand "umulxhi_vis"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "arith_operand" ""))
+ (zero_extend:TI
+ (match_operand:DI 2 "arith_operand" "")))
+ (const_int 64))))]
+ "TARGET_VIS3"
+{
+ if (! TARGET_ARCH64)
+ {
+ emit_insn (gen_umulxhi_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+})
+
+(define_insn "*umulxhi_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "arith_operand" "%r"))
+ (zero_extend:TI
+ (match_operand:DI 2 "arith_operand" "rI")))
+ (const_int 64))))]
+ "TARGET_VIS3 && TARGET_ARCH64"
+ "umulxhi\t%1, %2, %0"
+ [(set_attr "type" "imul")])
+
+(define_insn "umulxhi_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=r,h")
+ (truncate:DI
+ (lshiftrt:TI
+ (mult:TI (zero_extend:TI
+ (match_operand:DI 1 "arith_operand" "%r,0"))
+ (zero_extend:TI
+ (match_operand:DI 2 "arith_operand" "rI,rI")))
+ (const_int 64))))
+ (clobber (match_scratch:SI 3 "=&h,X"))
+ (clobber (match_scratch:SI 4 "=&h,X"))]
+ "TARGET_VIS3 && ! TARGET_ARCH64"
+ "* return output_v8plus_mult (insn, operands, \"umulxhi\");"
+ [(set_attr "type" "imul")
+ (set_attr "length" "9,8")])
+
+(define_expand "xmulx_vis"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (truncate:DI
+ (unspec:TI [(zero_extend:TI
+ (match_operand:DI 1 "arith_operand" ""))
+ (zero_extend:TI
+ (match_operand:DI 2 "arith_operand" ""))]
+ UNSPEC_XMUL)))]
+ "TARGET_VIS3"
+{
+ if (! TARGET_ARCH64)
+ {
+ emit_insn (gen_xmulx_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+})
+
+(define_insn "*xmulx_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (unspec:TI [(zero_extend:TI
+ (match_operand:DI 1 "arith_operand" "%r"))
+ (zero_extend:TI
+ (match_operand:DI 2 "arith_operand" "rI"))]
+ UNSPEC_XMUL)))]
+ "TARGET_VIS3 && TARGET_ARCH64"
+ "xmulx\t%1, %2, %0"
+ [(set_attr "type" "imul")])
+
+(define_insn "xmulx_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=r,h")
+ (truncate:DI
+ (unspec:TI [(zero_extend:TI
+ (match_operand:DI 1 "arith_operand" "%r,0"))
+ (zero_extend:TI
+ (match_operand:DI 2 "arith_operand" "rI,rI"))]
+ UNSPEC_XMUL)))
+ (clobber (match_scratch:SI 3 "=&h,X"))
+ (clobber (match_scratch:SI 4 "=&h,X"))]
+ "TARGET_VIS3 && ! TARGET_ARCH64"
+ "* return output_v8plus_mult (insn, operands, \"xmulx\");"
+ [(set_attr "type" "imul")
+ (set_attr "length" "9,8")])
+
+(define_expand "xmulxhi_vis"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (truncate:DI
+ (lshiftrt:TI
+ (unspec:TI [(zero_extend:TI
+ (match_operand:DI 1 "arith_operand" ""))
+ (zero_extend:TI
+ (match_operand:DI 2 "arith_operand" ""))]
+ UNSPEC_XMUL)
+ (const_int 64))))]
+ "TARGET_VIS3"
+{
+ if (! TARGET_ARCH64)
+ {
+ emit_insn (gen_xmulxhi_v8plus (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+})
+
+(define_insn "*xmulxhi_sp64"
+ [(set (match_operand:DI 0 "register_operand" "=r")
+ (truncate:DI
+ (lshiftrt:TI
+ (unspec:TI [(zero_extend:TI
+ (match_operand:DI 1 "arith_operand" "%r"))
+ (zero_extend:TI
+ (match_operand:DI 2 "arith_operand" "rI"))]
+ UNSPEC_XMUL)
+ (const_int 64))))]
+ "TARGET_VIS3 && TARGET_ARCH64"
+ "xmulxhi\t%1, %2, %0"
+ [(set_attr "type" "imul")])
+
+(define_insn "xmulxhi_v8plus"
+ [(set (match_operand:DI 0 "register_operand" "=r,h")
+ (truncate:DI
+ (lshiftrt:TI
+ (unspec:TI [(zero_extend:TI
+ (match_operand:DI 1 "arith_operand" "%r,0"))
+ (zero_extend:TI
+ (match_operand:DI 2 "arith_operand" "rI,rI"))]
+ UNSPEC_XMUL)
+ (const_int 64))))
+ (clobber (match_scratch:SI 3 "=&h,X"))
+ (clobber (match_scratch:SI 4 "=&h,X"))]
+ "TARGET_VIS3 && !TARGET_ARCH64"
+ "* return output_v8plus_mult (insn, operands, \"xmulxhi\");"
+ [(set_attr "type" "imul")
+ (set_attr "length" "9,8")])
+