rtx new_target;
rtx x, tmp;
bool use_vector_set = false;
+ rtx (*gen_vec_set_0) (rtx, rtx, rtx) = NULL;
switch (mode)
{
break;
case E_V32QImode:
case E_V16HImode:
+ use_vector_set = TARGET_AVX;
+ break;
case E_V8SImode:
+ use_vector_set = TARGET_AVX;
+ gen_vec_set_0 = gen_vec_setv8si_0;
+ break;
case E_V8SFmode:
+ use_vector_set = TARGET_AVX;
+ gen_vec_set_0 = gen_vec_setv8sf_0;
+ break;
case E_V4DFmode:
use_vector_set = TARGET_AVX;
+ gen_vec_set_0 = gen_vec_setv4df_0;
break;
case E_V4DImode:
/* Use ix86_expand_vector_set in 64bit mode only. */
use_vector_set = TARGET_AVX && TARGET_64BIT;
+ gen_vec_set_0 = gen_vec_setv4di_0;
+ break;
+ case E_V16SImode:
+ use_vector_set = TARGET_AVX512F && one_var == 0;
+ gen_vec_set_0 = gen_vec_setv16si_0;
+ break;
+ case E_V16SFmode:
+ use_vector_set = TARGET_AVX512F && one_var == 0;
+ gen_vec_set_0 = gen_vec_setv16sf_0;
+ break;
+ case E_V8DFmode:
+ use_vector_set = TARGET_AVX512F && one_var == 0;
+ gen_vec_set_0 = gen_vec_setv8df_0;
+ break;
+ case E_V8DImode:
+ /* Use ix86_expand_vector_set in 64bit mode only. */
+ use_vector_set = TARGET_AVX512F && TARGET_64BIT && one_var == 0;
+ gen_vec_set_0 = gen_vec_setv8di_0;
break;
default:
break;
if (use_vector_set)
{
+ if (gen_vec_set_0 && one_var == 0)
+ {
+ var = force_reg (GET_MODE_INNER (mode), var);
+ emit_insn (gen_vec_set_0 (target, CONST0_RTX (mode), var));
+ return true;
+ }
emit_insn (gen_rtx_SET (target, CONST0_RTX (mode)));
var = force_reg (GET_MODE_INNER (mode), var);
ix86_expand_vector_set (mmx_ok, target, var, one_var);
(define_mode_iterator VI8_AVX2_AVX512F
[(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX2") V2DI])
+(define_mode_iterator VI8_AVX_AVX512F
+ [(V8DI "TARGET_AVX512F") (V4DI "TARGET_AVX")])
+
(define_mode_iterator VI4_128_8_256
[V4SI V4DI])
(define_mode_iterator VI8F_128 [V2DI V2DF])
(define_mode_iterator VI4F_256 [V8SI V8SF])
(define_mode_iterator VI8F_256 [V4DI V4DF])
+(define_mode_iterator VI4F_256_512
+ [V8SI V8SF
+ (V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")])
(define_mode_iterator VI48F_256_512
[V8SI V8SF
(V16SI "TARGET_AVX512F") (V16SF "TARGET_AVX512F")
;; SSE scalar suffix for vector modes
(define_mode_attr ssescalarmodesuffix
[(SF "ss") (DF "sd")
+ (V16SF "ss") (V8DF "sd")
(V8SF "ss") (V4DF "sd")
(V4SF "ss") (V2DF "sd")
- (V8SI "ss") (V4DI "sd")
- (V4SI "d")])
+ (V16SI "d") (V8DI "q")
+ (V8SI "d") (V4DI "q")
+ (V4SI "d") (V2DI "q")])
;; Pack/unpack vector modes
(define_mode_attr sseunpackmode
(set_attr "prefix" "orig,orig,maybe_evex")
(set_attr "mode" "V4SF")])
+;; All of vinsertps, vmovss, vmovd clear also the higher bits.
+(define_insn "vec_set<mode>_0"
+ [(set (match_operand:VI4F_256_512 0 "register_operand" "=v,v,Yi")
+ (vec_merge:VI4F_256_512
+ (vec_duplicate:VI4F_256_512
+ (match_operand:<ssescalarmode> 2 "general_operand" "v,m,r"))
+ (match_operand:VI4F_256_512 1 "const0_operand" "C,C,C")
+ (const_int 1)))]
+ "TARGET_AVX"
+ "@
+ vinsertps\t{$0xe, %2, %2, %x0|%x0, %2, %2, 0xe}
+ vmov<ssescalarmodesuffix>\t{%x2, %x0|%x0, %2}
+ vmovd\t{%2, %x0|%x0, %2}"
+ [(set (attr "type")
+ (if_then_else (eq_attr "alternative" "0")
+ (const_string "sselog")
+ (const_string "ssemov")))
+ (set_attr "prefix" "maybe_evex")
+ (set_attr "mode" "SF,<ssescalarmode>,SI")])
+
(define_insn "sse4_1_insertps"
[(set (match_operand:V4SF 0 "register_operand" "=Yr,*x,v")
(unspec:V4SF [(match_operand:V4SF 2 "nonimmediate_operand" "Yrm,*xm,vm")
(const_string "orig")))
(set_attr "mode" "V2DF,V2DF,V2DF, DF, DF, V1DF,V1DF,DF,V4SF,V2SF")])
+;; vmovq clears also the higher bits.
+(define_insn "vec_set<mode>_0"
+ [(set (match_operand:VF2_512_256 0 "register_operand" "=v")
+ (vec_merge:VF2_512_256
+ (vec_duplicate:VF2_512_256
+ (match_operand:<ssescalarmode> 2 "general_operand" "xm"))
+ (match_operand:VF2_512_256 1 "const0_operand" "C")
+ (const_int 1)))]
+ "TARGET_AVX"
+ "vmovq\t{%2, %x0|%x0, %2}"
+ [(set_attr "type" "ssemov")
+ (set_attr "prefix" "maybe_evex")
+ (set_attr "mode" "DF")])
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Parallel integer down-conversion operations
(const_string "orig")))
(set_attr "mode" "TI,TI,TI,TI,TI,TI,TI,TI,TI,V4SF,V2SF,V2SF")])
+;; vmovq clears also the higher bits.
+(define_insn "vec_set<mode>_0"
+ [(set (match_operand:VI8_AVX_AVX512F 0 "register_operand" "=Yi,v")
+ (vec_merge:VI8_AVX_AVX512F
+ (vec_duplicate:VI8_AVX_AVX512F
+ (match_operand:<ssescalarmode> 2 "general_operand" "r,vm"))
+ (match_operand:VI8_AVX_AVX512F 1 "const0_operand" "C,C")
+ (const_int 1)))]
+ "TARGET_AVX"
+ "vmovq\t{%2, %x0|%x0, %2}"
+ [(set_attr "isa" "x64,*")
+ (set_attr "type" "ssemov")
+ (set_attr "prefix_rex" "1,*")
+ (set_attr "prefix" "maybe_evex")
+ (set_attr "mode" "TI")])
+
(define_expand "vec_unpacks_lo_<mode>"
[(match_operand:<sseunpackmode> 0 "register_operand")
(match_operand:VI124_AVX2_24_AVX512F_1_AVX512BW 1 "register_operand")]
;; Modes handled by AVX vec_dup patterns.
(define_mode_iterator AVX_VEC_DUP_MODE
[V8SI V8SF V4DI V4DF])
+(define_mode_attr vecdupssescalarmodesuffix
+ [(V8SF "ss") (V4DF "sd") (V8SI "ss") (V4DI "sd")])
;; Modes handled by AVX2 vec_dup patterns.
(define_mode_iterator AVX2_VEC_DUP_MODE
[V32QI V16QI V16HI V8HI V8SI V4SI])
"TARGET_AVX"
"@
v<sseintprefix>broadcast<bcstscalarsuff>\t{%1, %0|%0, %1}
- vbroadcast<ssescalarmodesuffix>\t{%1, %0|%0, %1}
+ vbroadcast<vecdupssescalarmodesuffix>\t{%1, %0|%0, %1}
v<sseintprefix>broadcast<bcstscalarsuff>\t{%x1, %0|%0, %x1}
v<sseintprefix>broadcast<bcstscalarsuff>\t{%x1, %g0|%g0, %x1}
#"