)
(define_insn "aarch64_dup_lane<mode>"
- [(set (match_operand:SDQ_I 0 "register_operand" "=w")
+ [(set (match_operand:ALLX 0 "register_operand" "=w")
(vec_select:<VEL>
(match_operand:<VCON> 1 "register_operand" "w")
(parallel [(match_operand:SI 2 "immediate_operand" "i")])
(set_attr "simd_mode" "<MODE>")]
)
+(define_insn "aarch64_dup_lanedi"
+ [(set (match_operand:DI 0 "register_operand" "=w,r")
+ (vec_select:DI
+ (match_operand:V2DI 1 "register_operand" "w,w")
+ (parallel [(match_operand:SI 2 "immediate_operand" "i,i")])))]
+ "TARGET_SIMD"
+ "@
+ dup\\t%<v>0<Vmtype>, %1.<Vetype>[%2]
+ umov\t%0, %1.d[%2]"
+ [(set_attr "simd_type" "simd_dup")
+ (set_attr "simd_mode" "DI")]
+)
+
(define_insn "aarch64_simd_dup<mode>"
[(set (match_operand:VDQF 0 "register_operand" "=w")
(vec_duplicate:VDQF (match_operand:<VEL> 1 "register_operand" "w")))]
case 0: return "ld1\t{%0.<Vtype>}, %1";
case 1: return "st1\t{%1.<Vtype>}, %0";
case 2: return "orr\t%0.<Vbtype>, %1.<Vbtype>, %1.<Vbtype>";
- case 3: return "umov\t%0, %1.d[0]\;umov\t%H0, %1.d[1]";
- case 4: return "ins\t%0.d[0], %1\;ins\t%0.d[1], %H1";
+ case 3: return "#";
+ case 4: return "#";
case 5: return "#";
case 6:
return aarch64_output_simd_mov_immediate (&operands[1],
aarch64_simd_disambiguate_copy (operands, dest, src, 2);
})
+(define_split
+ [(set (match_operand:VQ 0 "register_operand" "")
+ (match_operand:VQ 1 "register_operand" ""))]
+ "TARGET_SIMD && reload_completed
+ && ((FP_REGNUM_P (REGNO (operands[0])) && GP_REGNUM_P (REGNO (operands[1])))
+ || (GP_REGNUM_P (REGNO (operands[0])) && FP_REGNUM_P (REGNO (operands[1]))))"
+ [(const_int 0)]
+{
+ aarch64_split_simd_move (operands[0], operands[1]);
+ DONE;
+})
+
+(define_expand "aarch64_simd_mov<mode>"
+ [(set (match_operand:VQ 0)
+ (match_operand:VQ 1))]
+ "TARGET_SIMD"
+ {
+ rtx dst = operands[0];
+ rtx src = operands[1];
+
+ if (GP_REGNUM_P (REGNO (src)))
+ {
+ rtx low_part = gen_lowpart (<VHALF>mode, src);
+ rtx high_part = gen_highpart (<VHALF>mode, src);
+
+ emit_insn
+ (gen_aarch64_simd_mov_to_<mode>low (dst, low_part));
+ emit_insn
+ (gen_aarch64_simd_mov_to_<mode>high (dst, high_part));
+ }
+
+ else
+ {
+ rtx low_half = aarch64_simd_vect_par_cnst_half (<MODE>mode, false);
+ rtx high_half = aarch64_simd_vect_par_cnst_half (<MODE>mode, true);
+ rtx low_part = gen_lowpart (<VHALF>mode, dst);
+ rtx high_part = gen_highpart (<VHALF>mode, dst);
+
+ emit_insn
+ (gen_aarch64_simd_mov_from_<mode>low (low_part, src, low_half));
+ emit_insn
+ (gen_aarch64_simd_mov_from_<mode>high (high_part, src, high_half));
+ }
+ DONE;
+ }
+)
+
+(define_insn "aarch64_simd_mov_to_<mode>low"
+ [(set (zero_extract:VQ
+ (match_operand:VQ 0 "register_operand" "+w")
+ (const_int 64) (const_int 0))
+ (vec_concat:VQ
+ (match_operand:<VHALF> 1 "register_operand" "r")
+ (vec_duplicate:<VHALF> (const_int 0))))]
+ "TARGET_SIMD && reload_completed"
+ "ins\t%0.d[0], %1"
+ [(set_attr "simd_type" "simd_move")
+ (set_attr "simd_mode" "<MODE>")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_simd_mov_to_<mode>high"
+ [(set (zero_extract:VQ
+ (match_operand:VQ 0 "register_operand" "+w")
+ (const_int 64) (const_int 64))
+ (vec_concat:VQ
+ (match_operand:<VHALF> 1 "register_operand" "r")
+ (vec_duplicate:<VHALF> (const_int 0))))]
+ "TARGET_SIMD && reload_completed"
+ "ins\t%0.d[1], %1"
+ [(set_attr "simd_type" "simd_move")
+ (set_attr "simd_mode" "<MODE>")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_simd_mov_from_<mode>low"
+ [(set (match_operand:<VHALF> 0 "register_operand" "=r")
+ (vec_select:<VHALF>
+ (match_operand:VQ 1 "register_operand" "w")
+ (match_operand:VQ 2 "vect_par_cnst_lo_half" "")))]
+ "TARGET_SIMD && reload_completed"
+ "umov\t%0, %1.d[0]"
+ [(set_attr "simd_type" "simd_move")
+ (set_attr "simd_mode" "<MODE>")
+ (set_attr "length" "4")
+ ])
+
+(define_insn "aarch64_simd_mov_from_<mode>high"
+ [(set (match_operand:<VHALF> 0 "register_operand" "=r")
+ (vec_select:<VHALF>
+ (match_operand:VQ 1 "register_operand" "w")
+ (match_operand:VQ 2 "vect_par_cnst_hi_half" "")))]
+ "TARGET_SIMD && reload_completed"
+ "umov\t%0, %1.d[1]"
+ [(set_attr "simd_type" "simd_move")
+ (set_attr "simd_mode" "<MODE>")
+ (set_attr "length" "4")
+ ])
+
(define_insn "orn<mode>3"
[(set (match_operand:VDQ 0 "register_operand" "=w")
(ior:VDQ (not:VDQ (match_operand:VDQ 1 "register_operand" "w"))
|| ! (FP_REGNUM_P (REGNO (dst)) && FP_REGNUM_P (REGNO (src))));
}
+/* Split a complex SIMD move. */
+
+void
+aarch64_split_simd_move (rtx dst, rtx src)
+{
+ enum machine_mode src_mode = GET_MODE (src);
+ enum machine_mode dst_mode = GET_MODE (dst);
+
+ gcc_assert (VECTOR_MODE_P (dst_mode));
+
+ if (REG_P (dst) && REG_P (src))
+ {
+ gcc_assert (VECTOR_MODE_P (src_mode));
+
+ switch (src_mode)
+ {
+ case V16QImode:
+ emit_insn (gen_aarch64_simd_movv16qi (dst, src));
+ break;
+ case V8HImode:
+ emit_insn (gen_aarch64_simd_movv8hi (dst, src));
+ break;
+ case V4SImode:
+ emit_insn (gen_aarch64_simd_movv4si (dst, src));
+ break;
+ case V2DImode:
+ emit_insn (gen_aarch64_simd_movv2di (dst, src));
+ break;
+ case V4SFmode:
+ emit_insn (gen_aarch64_simd_movv4sf (dst, src));
+ break;
+ case V2DFmode:
+ emit_insn (gen_aarch64_simd_movv2df (dst, src));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ return;
+ }
+}
+
static rtx
aarch64_force_temporary (rtx x, rtx value)
{