+2018-11-07 Chenghua Xu <paul.hua.gm@gmail.com>
+
+ * config.gcc (extra_headers): Add loongson-mmiintrin.h.
+ * config/mips/loongson.md: Move to ...
+ * config/mips/loongson-mmi.md: here; Adjustment.
+ * config/mips/loongson.h: Move to ...
+ State as deprecated. Include loongson-mmiintrin.h for back
+ compatibility and warning.
+ * config/mips/loongson-mmiintrin.h: ... here.
+ * config/mips/mips.c (mips_hard_regno_mode_ok_uncached,
+ mips_vector_mode_supported_p, AVAIL_NON_MIPS16): Use
+ TARGET_LOONGSON_MMI instead of TARGET_LOONGSON_VECTORS.
+ (mips_option_override): Make sure MMI use hard float;
+ (mips_shift_truncation_mask, mips_expand_vpc_loongson_even_odd,
+ mips_expand_vpc_loongson_pshufh, mips_expand_vpc_loongson_bcast,
+ mips_expand_vector_init): Use TARGET_LOONGSON_MMI instead of
+ TARGET_LOONGSON_VECTORS.
+ * gcc/config/mips/mips.h (TARGET_LOONGSON_VECTORS): Delete.
+ (TARGET_CPU_CPP_BUILTINS): Add __mips_loongson_mmi.
+ (MIPS_ASE_DSP_SPEC, MIPS_ASE_LOONGSON_MMI_SPEC): New.
+ (BASE_DRIVER_SELF_SPECS): march=loongson2e/2f/3a implies
+ -mloongson-mmi.
+ (SHIFT_COUNT_TRUNCATED): Use TARGET_LOONGSON_MMI instead of
+ TARGET_LOONGSON_VECTORS.
+ * gcc/config/mips/mips.md (MOVE64, MOVE128): Use
+ TARGET_LOONGSON_MMI instead of TARGET_LOONGSON_VECTORS.
+ (Loongson MMI patterns): Include loongson-mmi.md instead of
+ loongson.md.
+ * gcc/config/mips/mips.opt (-mloongson-mmi): New option.
+ * gcc/doc/invoke.texi (-mloongson-mmi): Document.
+
2018-11-07 Richard Biener <rguenther@suse.de>
PR lto/87906
mips*-*-*)
cpu_type=mips
d_target_objs="mips-d.o"
- extra_headers="loongson.h msa.h"
+ extra_headers="loongson.h loongson-mmiintrin.h msa.h"
extra_objs="frame-header-opt.o"
extra_options="${extra_options} g.opt fused-madd.opt mips/mips-tables.opt"
;;
--- /dev/null
+;; Machine description for Loongson MultiMedia extensions Instructions (MMI).
+;; Copyright (C) 2008-2018 Free Software Foundation, Inc.
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_c_enum "unspec" [
+ UNSPEC_LOONGSON_PAVG
+ UNSPEC_LOONGSON_PCMPEQ
+ UNSPEC_LOONGSON_PCMPGT
+ UNSPEC_LOONGSON_PEXTR
+ UNSPEC_LOONGSON_PINSRH
+ UNSPEC_LOONGSON_VINIT
+ UNSPEC_LOONGSON_PMADD
+ UNSPEC_LOONGSON_PMOVMSK
+ UNSPEC_LOONGSON_PMULHU
+ UNSPEC_LOONGSON_PMULH
+ UNSPEC_LOONGSON_PMULU
+ UNSPEC_LOONGSON_PASUBUB
+ UNSPEC_LOONGSON_BIADD
+ UNSPEC_LOONGSON_PSADBH
+ UNSPEC_LOONGSON_PSHUFH
+ UNSPEC_LOONGSON_PUNPCKH
+ UNSPEC_LOONGSON_PUNPCKL
+ UNSPEC_LOONGSON_PADDD
+ UNSPEC_LOONGSON_PSUBD
+ UNSPEC_LOONGSON_DSLL
+ UNSPEC_LOONGSON_DSRL
+])
+
+;; Mode iterators and attributes.
+
+;; 64-bit vectors of bytes.
+(define_mode_iterator VB [V8QI])
+
+;; 64-bit vectors of halfwords.
+(define_mode_iterator VH [V4HI])
+
+;; 64-bit vectors of words.
+(define_mode_iterator VW [V2SI])
+
+;; 64-bit vectors of halfwords and bytes.
+(define_mode_iterator VHB [V4HI V8QI])
+
+;; 64-bit vectors of words and halfwords.
+(define_mode_iterator VWH [V2SI V4HI])
+
+;; 64-bit vectors of words and bytes
+(define_mode_iterator VWB [V2SI V8QI])
+
+;; 64-bit vectors of words, halfwords and bytes.
+(define_mode_iterator VWHB [V2SI V4HI V8QI])
+
+;; 64-bit vectors of words, halfwords and bytes; and DImode.
+(define_mode_iterator VWHBDI [V2SI V4HI V8QI DI])
+
+;; The Loongson instruction suffixes corresponding to the modes in the
+;; VWHBDI iterator.
+(define_mode_attr V_suffix [(V2SI "w") (V4HI "h") (V8QI "b") (DI "d")])
+
+;; Given a vector type T, the mode of a vector half the size of T
+;; and with the same number of elements.
+(define_mode_attr V_squash [(V2SI "V2HI") (V4HI "V4QI")])
+
+;; Given a vector type T, the mode of a vector the same size as T
+;; but with half as many elements.
+(define_mode_attr V_stretch_half [(V2SI "DI") (V4HI "V2SI") (V8QI "V4HI")])
+
+;; The Loongson instruction suffixes corresponding to the transformation
+;; expressed by V_stretch_half.
+(define_mode_attr V_stretch_half_suffix [(V2SI "wd") (V4HI "hw") (V8QI "bh")])
+
+;; Given a vector type T, the mode of a vector the same size as T
+;; but with twice as many elements.
+(define_mode_attr V_squash_double [(V2SI "V4HI") (V4HI "V8QI")])
+
+;; Given a vector type T, the inner mode.
+(define_mode_attr V_inner [(V8QI "QI") (V4HI "HI") (V2SI "SI")])
+
+;; The Loongson instruction suffixes corresponding to the conversions
+;; specified by V_half_width.
+(define_mode_attr V_squash_double_suffix [(V2SI "wh") (V4HI "hb")])
+
+;; Move patterns.
+
+;; Expander to legitimize moves involving values of vector modes.
+(define_expand "mov<mode>"
+ [(set (match_operand:VWHB 0)
+ (match_operand:VWHB 1))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ if (mips_legitimize_move (<MODE>mode, operands[0], operands[1]))
+ DONE;
+})
+
+;; Handle legitimized moves between values of vector modes.
+(define_insn "mov<mode>_internal"
+ [(set (match_operand:VWHB 0 "nonimmediate_operand" "=m,f,d,f, d, m, d")
+ (match_operand:VWHB 1 "move_operand" "f,m,f,dYG,dYG,dYG,m"))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ { return mips_output_move (operands[0], operands[1]); }
+ [(set_attr "move_type" "fpstore,fpload,mfc,mtc,move,store,load")
+ (set_attr "mode" "DI")])
+
+;; Initialization of a vector.
+
+(define_expand "vec_init<mode><unitmode>"
+ [(set (match_operand:VWHB 0 "register_operand")
+ (match_operand 1 ""))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ mips_expand_vector_init (operands[0], operands[1]);
+ DONE;
+})
+
+;; Helper for vec_init. Initialize element 0 of the output from the input.
+;; All other elements are undefined.
+(define_insn "loongson_vec_init1_<mode>"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (unspec:VHB [(truncate:<V_inner>
+ (match_operand:DI 1 "reg_or_0_operand" "Jd"))]
+ UNSPEC_LOONGSON_VINIT))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "dmtc1\t%z1,%0"
+ [(set_attr "move_type" "mtc")
+ (set_attr "mode" "DI")])
+
+;; Helper for vec_initv2si.
+(define_insn "*vec_concatv2si"
+ [(set (match_operand:V2SI 0 "register_operand" "=f")
+ (vec_concat:V2SI
+ (match_operand:SI 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpcklwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Instruction patterns for SIMD instructions.
+
+;; Pack with signed saturation.
+(define_insn "vec_pack_ssat_<mode>"
+ [(set (match_operand:<V_squash_double> 0 "register_operand" "=f")
+ (vec_concat:<V_squash_double>
+ (ss_truncate:<V_squash>
+ (match_operand:VWH 1 "register_operand" "f"))
+ (ss_truncate:<V_squash>
+ (match_operand:VWH 2 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "packss<V_squash_double_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Pack with unsigned saturation.
+(define_insn "vec_pack_usat_<mode>"
+ [(set (match_operand:<V_squash_double> 0 "register_operand" "=f")
+ (vec_concat:<V_squash_double>
+ (us_truncate:<V_squash>
+ (match_operand:VH 1 "register_operand" "f"))
+ (us_truncate:<V_squash>
+ (match_operand:VH 2 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "packus<V_squash_double_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Addition, treating overflow by wraparound.
+(define_insn "add<mode>3"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (plus:VWHB (match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "padd<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Addition of doubleword integers stored in FP registers.
+;; Overflow is treated by wraparound.
+;; We use 'unspec' instead of 'plus' here to avoid clash with
+;; mips.md::add<mode>3. If 'plus' was used, then such instruction
+;; would be recognized as adddi3 and reload would make it use
+;; GPRs instead of FPRs.
+(define_insn "loongson_paddd"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "f")
+ (match_operand:DI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PADDD))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "paddd\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Addition, treating overflow by signed saturation.
+(define_insn "ssadd<mode>3"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (ss_plus:VHB (match_operand:VHB 1 "register_operand" "f")
+ (match_operand:VHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "padds<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Addition, treating overflow by unsigned saturation.
+(define_insn "usadd<mode>3"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (us_plus:VHB (match_operand:VHB 1 "register_operand" "f")
+ (match_operand:VHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "paddus<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Logical AND NOT.
+(define_insn "loongson_pandn_<V_suffix>"
+ [(set (match_operand:VWHBDI 0 "register_operand" "=f")
+ (and:VWHBDI
+ (not:VWHBDI (match_operand:VWHBDI 1 "register_operand" "f"))
+ (match_operand:VWHBDI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pandn\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Logical AND.
+(define_insn "and<mode>3"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (and:VWHB (match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "and\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Logical OR.
+(define_insn "ior<mode>3"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (ior:VWHB (match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "or\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Logical XOR.
+(define_insn "xor<mode>3"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (xor:VWHB (match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "xor\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Logical NOR.
+(define_insn "*loongson_nor"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (and:VWHB
+ (not:VWHB (match_operand:VWHB 1 "register_operand" "f"))
+ (not:VWHB (match_operand:VWHB 2 "register_operand" "f"))))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "nor\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Logical NOT.
+(define_insn "one_cmpl<mode>2"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (not:VWHB (match_operand:VWHB 1 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "nor\t%0,%1,%1"
+ [(set_attr "type" "fmul")])
+
+;; Average.
+(define_insn "loongson_pavg<V_suffix>"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (unspec:VHB [(match_operand:VHB 1 "register_operand" "f")
+ (match_operand:VHB 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PAVG))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pavg<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Equality test.
+(define_insn "loongson_pcmpeq<V_suffix>"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (unspec:VWHB [(match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PCMPEQ))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pcmpeq<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Greater-than test.
+(define_insn "loongson_pcmpgt<V_suffix>"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (unspec:VWHB [(match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PCMPGT))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pcmpgt<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Extract halfword.
+(define_insn "loongson_pextrh"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PEXTR))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pextrh\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Insert halfword.
+(define_insn "loongson_pinsrh_0"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 4) (const_int 1)
+ (const_int 2) (const_int 3)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pinsrh_0\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_pinsrh_1"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 4)
+ (const_int 2) (const_int 3)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pinsrh_1\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_pinsrh_2"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 4) (const_int 3)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pinsrh_2\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_pinsrh_3"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 4)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pinsrh_3\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "*vec_setv4hi"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")
+ (match_operand:SI 3 "const_0_to_3_operand" "")]
+ UNSPEC_LOONGSON_PINSRH))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pinsrh_%3\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_expand "vec_setv4hi"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:HI 2 "register_operand" "f")
+ (match_operand:SI 3 "const_0_to_3_operand" "")]
+ UNSPEC_LOONGSON_PINSRH))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ rtx ext = gen_reg_rtx (SImode);
+ emit_move_insn (ext, gen_lowpart (SImode, operands[2]));
+ operands[2] = ext;
+})
+
+;; Multiply and add packed integers.
+(define_insn "loongson_pmaddhw"
+ [(set (match_operand:V2SI 0 "register_operand" "=f")
+ (unspec:V2SI [(match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PMADD))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pmaddhw\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+(define_expand "sdot_prodv4hi"
+ [(match_operand:V2SI 0 "register_operand" "")
+ (match_operand:V4HI 1 "register_operand" "")
+ (match_operand:V4HI 2 "register_operand" "")
+ (match_operand:V2SI 3 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ rtx t = gen_reg_rtx (V2SImode);
+ emit_insn (gen_loongson_pmaddhw (t, operands[1], operands[2]));
+ emit_insn (gen_addv2si3 (operands[0], t, operands[3]));
+ DONE;
+})
+
+;; Maximum of signed halfwords.
+(define_insn "smaxv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (smax:V4HI (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pmaxsh\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+(define_expand "smax<mode>3"
+ [(match_operand:VWB 0 "register_operand" "")
+ (match_operand:VWB 1 "register_operand" "")
+ (match_operand:VWB 2 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ mips_expand_vec_minmax (operands[0], operands[1], operands[2],
+ gen_loongson_pcmpgt<V_suffix>, false);
+ DONE;
+})
+
+;; Maximum of unsigned bytes.
+(define_insn "umaxv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (umax:V8QI (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pmaxub\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Minimum of signed halfwords.
+(define_insn "sminv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (smin:V4HI (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pminsh\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+(define_expand "smin<mode>3"
+ [(match_operand:VWB 0 "register_operand" "")
+ (match_operand:VWB 1 "register_operand" "")
+ (match_operand:VWB 2 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ mips_expand_vec_minmax (operands[0], operands[1], operands[2],
+ gen_loongson_pcmpgt<V_suffix>, true);
+ DONE;
+})
+
+;; Minimum of unsigned bytes.
+(define_insn "uminv8qi3"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (umin:V8QI (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pminub\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Move byte mask.
+(define_insn "loongson_pmovmsk<V_suffix>"
+ [(set (match_operand:VB 0 "register_operand" "=f")
+ (unspec:VB [(match_operand:VB 1 "register_operand" "f")]
+ UNSPEC_LOONGSON_PMOVMSK))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pmovmsk<V_suffix>\t%0,%1"
+ [(set_attr "type" "fabs")])
+
+;; Multiply unsigned integers and store high result.
+(define_insn "umul<mode>3_highpart"
+ [(set (match_operand:VH 0 "register_operand" "=f")
+ (unspec:VH [(match_operand:VH 1 "register_operand" "f")
+ (match_operand:VH 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PMULHU))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pmulhu<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Multiply signed integers and store high result.
+(define_insn "smul<mode>3_highpart"
+ [(set (match_operand:VH 0 "register_operand" "=f")
+ (unspec:VH [(match_operand:VH 1 "register_operand" "f")
+ (match_operand:VH 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PMULH))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pmulh<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Multiply signed integers and store low result.
+(define_insn "mul<mode>3"
+ [(set (match_operand:VH 0 "register_operand" "=f")
+ (mult:VH (match_operand:VH 1 "register_operand" "f")
+ (match_operand:VH 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pmull<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Multiply unsigned word integers.
+(define_insn "loongson_pmulu<V_suffix>"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unspec:DI [(match_operand:VW 1 "register_operand" "f")
+ (match_operand:VW 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PMULU))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pmulu<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Absolute difference.
+(define_insn "loongson_pasubub"
+ [(set (match_operand:VB 0 "register_operand" "=f")
+ (unspec:VB [(match_operand:VB 1 "register_operand" "f")
+ (match_operand:VB 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PASUBUB))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pasubub\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Sum of unsigned byte integers.
+(define_insn "loongson_biadd"
+ [(set (match_operand:<V_stretch_half> 0 "register_operand" "=f")
+ (unspec:<V_stretch_half> [(match_operand:VB 1 "register_operand" "f")]
+ UNSPEC_LOONGSON_BIADD))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "biadd\t%0,%1"
+ [(set_attr "type" "fabs")])
+
+(define_insn "reduc_uplus_v8qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (unspec:V8QI [(match_operand:V8QI 1 "register_operand" "f")]
+ UNSPEC_LOONGSON_BIADD))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "biadd\t%0,%1"
+ [(set_attr "type" "fabs")])
+
+;; Sum of absolute differences.
+(define_insn "loongson_psadbh"
+ [(set (match_operand:<V_stretch_half> 0 "register_operand" "=f")
+ (unspec:<V_stretch_half> [(match_operand:VB 1 "register_operand" "f")
+ (match_operand:VB 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PSADBH))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pasubub\t%0,%1,%2;biadd\t%0,%0"
+ [(set_attr "type" "fadd")])
+
+;; Shuffle halfwords.
+(define_insn "loongson_pshufh"
+ [(set (match_operand:VH 0 "register_operand" "=f")
+ (unspec:VH [(match_operand:VH 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PSHUFH))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "pshufh\t%0,%1,%2"
+ [(set_attr "type" "fmul")])
+
+;; Shift left logical.
+(define_insn "ashl<mode>3"
+ [(set (match_operand:VWH 0 "register_operand" "=f")
+ (ashift:VWH (match_operand:VWH 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "psll<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Shift right arithmetic.
+(define_insn "ashr<mode>3"
+ [(set (match_operand:VWH 0 "register_operand" "=f")
+ (ashiftrt:VWH (match_operand:VWH 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "psra<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Shift right logical.
+(define_insn "lshr<mode>3"
+ [(set (match_operand:VWH 0 "register_operand" "=f")
+ (lshiftrt:VWH (match_operand:VWH 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "psrl<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Subtraction, treating overflow by wraparound.
+(define_insn "sub<mode>3"
+ [(set (match_operand:VWHB 0 "register_operand" "=f")
+ (minus:VWHB (match_operand:VWHB 1 "register_operand" "f")
+ (match_operand:VWHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "psub<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Subtraction of doubleword integers stored in FP registers.
+;; Overflow is treated by wraparound.
+;; See loongson_paddd for the reason we use 'unspec' rather than
+;; 'minus' here.
+(define_insn "loongson_psubd"
+ [(set (match_operand:DI 0 "register_operand" "=f")
+ (unspec:DI [(match_operand:DI 1 "register_operand" "f")
+ (match_operand:DI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_PSUBD))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "psubd\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Subtraction, treating overflow by signed saturation.
+(define_insn "sssub<mode>3"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (ss_minus:VHB (match_operand:VHB 1 "register_operand" "f")
+ (match_operand:VHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "psubs<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Subtraction, treating overflow by unsigned saturation.
+(define_insn "ussub<mode>3"
+ [(set (match_operand:VHB 0 "register_operand" "=f")
+ (us_minus:VHB (match_operand:VHB 1 "register_operand" "f")
+ (match_operand:VHB 2 "register_operand" "f")))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "psubus<V_suffix>\t%0,%1,%2"
+ [(set_attr "type" "fadd")])
+
+;; Unpack high data. Recall that Loongson only runs in little-endian.
+(define_insn "loongson_punpckhbh"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 4) (const_int 12)
+ (const_int 5) (const_int 13)
+ (const_int 6) (const_int 14)
+ (const_int 7) (const_int 15)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpckhbh\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_punpckhhw"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 2) (const_int 6)
+ (const_int 3) (const_int 7)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpckhhw\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_punpckhhw_qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 4) (const_int 5)
+ (const_int 12) (const_int 13)
+ (const_int 6) (const_int 7)
+ (const_int 14) (const_int 15)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpckhhw\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_punpckhwd"
+ [(set (match_operand:V2SI 0 "register_operand" "=f")
+ (vec_select:V2SI
+ (vec_concat:V4SI
+ (match_operand:V2SI 1 "register_operand" "f")
+ (match_operand:V2SI 2 "register_operand" "f"))
+ (parallel [(const_int 1) (const_int 3)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpckhwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "loongson_punpckhwd_qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 4) (const_int 5)
+ (const_int 6) (const_int 7)
+ (const_int 12) (const_int 13)
+ (const_int 14) (const_int 15)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpckhwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "loongson_punpckhwd_hi"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 2) (const_int 3)
+ (const_int 6) (const_int 7)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpckhwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+;; Unpack low data.
+(define_insn "loongson_punpcklbh"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 8)
+ (const_int 1) (const_int 9)
+ (const_int 2) (const_int 10)
+ (const_int 3) (const_int 11)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpcklbh\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_punpcklhw"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 4)
+ (const_int 1) (const_int 5)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpcklhw\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "*loongson_punpcklhw_qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 8) (const_int 9)
+ (const_int 2) (const_int 3)
+ (const_int 10) (const_int 11)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpcklhw\t%0,%1,%2"
+ [(set_attr "type" "fdiv")])
+
+(define_insn "loongson_punpcklwd"
+ [(set (match_operand:V2SI 0 "register_operand" "=f")
+ (vec_select:V2SI
+ (vec_concat:V4SI
+ (match_operand:V2SI 1 "register_operand" "f")
+ (match_operand:V2SI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 2)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpcklwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "*loongson_punpcklwd_qi"
+ [(set (match_operand:V8QI 0 "register_operand" "=f")
+ (vec_select:V8QI
+ (vec_concat:V16QI
+ (match_operand:V8QI 1 "register_operand" "f")
+ (match_operand:V8QI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 2) (const_int 3)
+ (const_int 8) (const_int 9)
+ (const_int 10) (const_int 11)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpcklwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "*loongson_punpcklwd_hi"
+ [(set (match_operand:V4HI 0 "register_operand" "=f")
+ (vec_select:V4HI
+ (vec_concat:V8HI
+ (match_operand:V4HI 1 "register_operand" "f")
+ (match_operand:V4HI 2 "register_operand" "f"))
+ (parallel [(const_int 0) (const_int 1)
+ (const_int 4) (const_int 5)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "punpcklwd\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_expand "vec_unpacks_lo_<mode>"
+ [(match_operand:<V_stretch_half> 0 "register_operand" "")
+ (match_operand:VHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ mips_expand_vec_unpack (operands, false, false);
+ DONE;
+})
+
+(define_expand "vec_unpacks_hi_<mode>"
+ [(match_operand:<V_stretch_half> 0 "register_operand" "")
+ (match_operand:VHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ mips_expand_vec_unpack (operands, false, true);
+ DONE;
+})
+
+(define_expand "vec_unpacku_lo_<mode>"
+ [(match_operand:<V_stretch_half> 0 "register_operand" "")
+ (match_operand:VHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ mips_expand_vec_unpack (operands, true, false);
+ DONE;
+})
+
+(define_expand "vec_unpacku_hi_<mode>"
+ [(match_operand:<V_stretch_half> 0 "register_operand" "")
+ (match_operand:VHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ mips_expand_vec_unpack (operands, true, true);
+ DONE;
+})
+
+;; Whole vector shifts, used for reduction epilogues.
+(define_insn "vec_shl_<mode>"
+ [(set (match_operand:VWHBDI 0 "register_operand" "=f")
+ (unspec:VWHBDI [(match_operand:VWHBDI 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_DSLL))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "dsll\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "vec_shr_<mode>"
+ [(set (match_operand:VWHBDI 0 "register_operand" "=f")
+ (unspec:VWHBDI [(match_operand:VWHBDI 1 "register_operand" "f")
+ (match_operand:SI 2 "register_operand" "f")]
+ UNSPEC_LOONGSON_DSRL))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "dsrl\t%0,%1,%2"
+ [(set_attr "type" "fcvt")])
+
+(define_insn "vec_loongson_extract_lo_<mode>"
+ [(set (match_operand:<V_inner> 0 "register_operand" "=r")
+ (vec_select:<V_inner>
+ (match_operand:VWHB 1 "register_operand" "f")
+ (parallel [(const_int 0)])))]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+ "mfc1\t%0,%1"
+ [(set_attr "type" "mfc")])
+
+(define_expand "reduc_plus_scal_<mode>"
+ [(match_operand:<V_inner> 0 "register_operand" "")
+ (match_operand:VWHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
+ mips_expand_vec_reduc (tmp, operands[1], gen_add<mode>3);
+ emit_insn (gen_vec_loongson_extract_lo_<mode> (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "reduc_smax_scal_<mode>"
+ [(match_operand:<V_inner> 0 "register_operand" "")
+ (match_operand:VWHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
+ mips_expand_vec_reduc (tmp, operands[1], gen_smax<mode>3);
+ emit_insn (gen_vec_loongson_extract_lo_<mode> (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "reduc_smin_scal_<mode>"
+ [(match_operand:<V_inner> 0 "register_operand" "")
+ (match_operand:VWHB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
+ mips_expand_vec_reduc (tmp, operands[1], gen_smin<mode>3);
+ emit_insn (gen_vec_loongson_extract_lo_<mode> (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "reduc_umax_scal_<mode>"
+ [(match_operand:<V_inner> 0 "register_operand" "")
+ (match_operand:VB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
+ mips_expand_vec_reduc (tmp, operands[1], gen_umax<mode>3);
+ emit_insn (gen_vec_loongson_extract_lo_<mode> (operands[0], tmp));
+ DONE;
+})
+
+(define_expand "reduc_umin_scal_<mode>"
+ [(match_operand:<V_inner> 0 "register_operand" "")
+ (match_operand:VB 1 "register_operand" "")]
+ "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI"
+{
+ rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
+ mips_expand_vec_reduc (tmp, operands[1], gen_umin<mode>3);
+ emit_insn (gen_vec_loongson_extract_lo_<mode> (operands[0], tmp));
+ DONE;
+})
--- /dev/null
+/* Intrinsics for Loongson MultiMedia extension Instructions operations.
+
+ Copyright (C) 2008-2018 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_LOONGSON_MMIINTRIN_H
+#define _GCC_LOONGSON_MMIINTRIN_H
+
+#if !defined(__mips_loongson_mmi)
+# error "You must select -mloongson-mmi or -march=loongson2e/2f/3a to use
+ loongson-mmiintrin.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/* Vectors of unsigned bytes, halfwords and words. */
+typedef uint8_t uint8x8_t __attribute__((vector_size (8)));
+typedef uint16_t uint16x4_t __attribute__((vector_size (8)));
+typedef uint32_t uint32x2_t __attribute__((vector_size (8)));
+
+/* Vectors of signed bytes, halfwords and words. */
+typedef int8_t int8x8_t __attribute__((vector_size (8)));
+typedef int16_t int16x4_t __attribute__((vector_size (8)));
+typedef int32_t int32x2_t __attribute__((vector_size (8)));
+
+/* SIMD intrinsics.
+ Unless otherwise noted, calls to the functions below will expand into
+ precisely one machine instruction, modulo any moves required to
+ satisfy register allocation constraints. */
+
+/* Pack with signed saturation. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+packsswh (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_packsswh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+packsshb (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_packsshb (s, t);
+}
+
+/* Pack with unsigned saturation. */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+packushb (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_packushb (s, t);
+}
+
+/* Vector addition, treating overflow by wraparound. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+paddw_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_paddw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+paddh_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_paddh_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+paddb_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_paddb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+paddw_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_paddw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+paddh_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_paddh_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+paddb_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_paddb_s (s, t);
+}
+
+/* Addition of doubleword integers, treating overflow by wraparound. */
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+paddd_u (uint64_t s, uint64_t t)
+{
+ return __builtin_loongson_paddd_u (s, t);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+paddd_s (int64_t s, int64_t t)
+{
+ return __builtin_loongson_paddd_s (s, t);
+}
+
+/* Vector addition, treating overflow by signed saturation. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+paddsh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_paddsh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+paddsb (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_paddsb (s, t);
+}
+
+/* Vector addition, treating overflow by unsigned saturation. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+paddush (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_paddush (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+paddusb (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_paddusb (s, t);
+}
+
+/* Logical AND NOT. */
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+pandn_ud (uint64_t s, uint64_t t)
+{
+ return __builtin_loongson_pandn_ud (s, t);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+pandn_uw (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_pandn_uw (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pandn_uh (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pandn_uh (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pandn_ub (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pandn_ub (s, t);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+pandn_sd (int64_t s, int64_t t)
+{
+ return __builtin_loongson_pandn_sd (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pandn_sw (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_pandn_sw (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pandn_sh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pandn_sh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pandn_sb (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_pandn_sb (s, t);
+}
+
+/* Average. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pavgh (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pavgh (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pavgb (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pavgb (s, t);
+}
+
+/* Equality test. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+pcmpeqw_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_pcmpeqw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pcmpeqh_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pcmpeqh_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pcmpeqb_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pcmpeqb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pcmpeqw_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_pcmpeqw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pcmpeqh_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pcmpeqh_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pcmpeqb_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_pcmpeqb_s (s, t);
+}
+
+/* Greater-than test. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+pcmpgtw_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_pcmpgtw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pcmpgth_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pcmpgth_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pcmpgtb_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pcmpgtb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pcmpgtw_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_pcmpgtw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pcmpgth_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pcmpgth_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pcmpgtb_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_pcmpgtb_s (s, t);
+}
+
+/* Extract halfword. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pextrh_u (uint16x4_t s, int field /* 0--3. */)
+{
+ return __builtin_loongson_pextrh_u (s, field);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pextrh_s (int16x4_t s, int field /* 0--3. */)
+{
+ return __builtin_loongson_pextrh_s (s, field);
+}
+
+/* Insert halfword. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_0_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pinsrh_0_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_1_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pinsrh_1_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_2_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pinsrh_2_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pinsrh_3_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pinsrh_3_u (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_0_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pinsrh_0_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_1_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pinsrh_1_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_2_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pinsrh_2_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pinsrh_3_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pinsrh_3_s (s, t);
+}
+
+/* Multiply and add. */
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+pmaddhw (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pmaddhw (s, t);
+}
+
+/* Maximum of signed halfwords. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pmaxsh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pmaxsh (s, t);
+}
+
+/* Maximum of unsigned bytes. */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pmaxub (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pmaxub (s, t);
+}
+
+/* Minimum of signed halfwords. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pminsh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pminsh (s, t);
+}
+
+/* Minimum of unsigned bytes. */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pminub (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pminub (s, t);
+}
+
+/* Move byte mask. */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pmovmskb_u (uint8x8_t s)
+{
+ return __builtin_loongson_pmovmskb_u (s);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+pmovmskb_s (int8x8_t s)
+{
+ return __builtin_loongson_pmovmskb_s (s);
+}
+
+/* Multiply unsigned integers and store high result. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pmulhuh (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_pmulhuh (s, t);
+}
+
+/* Multiply signed integers and store high result. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pmulhh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pmulhh (s, t);
+}
+
+/* Multiply signed integers and store low result. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pmullh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_pmullh (s, t);
+}
+
+/* Multiply unsigned word integers. */
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+pmuluw (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_pmuluw (s, t);
+}
+
+/* Absolute difference. */
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+pasubub (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_pasubub (s, t);
+}
+
+/* Sum of unsigned byte integers. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+biadd (uint8x8_t s)
+{
+ return __builtin_loongson_biadd (s);
+}
+
+/* Sum of absolute differences.
+ Note that this intrinsic expands into two machine instructions:
+ PASUBUB followed by BIADD. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psadbh (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_psadbh (s, t);
+}
+
+/* Shuffle halfwords. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+pshufh_u (uint16x4_t dest, uint16x4_t s, uint8_t order)
+{
+ return __builtin_loongson_pshufh_u (s, order);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+pshufh_s (int16x4_t dest, int16x4_t s, uint8_t order)
+{
+ return __builtin_loongson_pshufh_s (s, order);
+}
+
+/* Shift left logical. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psllh_u (uint16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psllh_u (s, amount);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psllh_s (int16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psllh_s (s, amount);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psllw_u (uint32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psllw_u (s, amount);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psllw_s (int32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psllw_s (s, amount);
+}
+
+/* Shift right logical. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psrlh_u (uint16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrlh_u (s, amount);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psrlh_s (int16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrlh_s (s, amount);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psrlw_u (uint32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrlw_u (s, amount);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psrlw_s (int32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrlw_s (s, amount);
+}
+
+/* Shift right arithmetic. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psrah_u (uint16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrah_u (s, amount);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psrah_s (int16x4_t s, uint8_t amount)
+{
+ return __builtin_loongson_psrah_s (s, amount);
+}
+
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psraw_u (uint32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psraw_u (s, amount);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psraw_s (int32x2_t s, uint8_t amount)
+{
+ return __builtin_loongson_psraw_s (s, amount);
+}
+
+/* Vector subtraction, treating overflow by wraparound. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+psubw_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_psubw_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psubh_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_psubh_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+psubb_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_psubb_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+psubw_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_psubw_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psubh_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_psubh_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+psubb_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_psubb_s (s, t);
+}
+
+/* Subtraction of doubleword integers, treating overflow by wraparound. */
+__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
+psubd_u (uint64_t s, uint64_t t)
+{
+ return __builtin_loongson_psubd_u (s, t);
+}
+
+__extension__ static __inline int64_t __attribute__ ((__always_inline__))
+psubd_s (int64_t s, int64_t t)
+{
+ return __builtin_loongson_psubd_s (s, t);
+}
+
+/* Vector subtraction, treating overflow by signed saturation. */
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+psubsh (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_psubsh (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+psubsb (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_psubsb (s, t);
+}
+
+/* Vector subtraction, treating overflow by unsigned saturation. */
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+psubush (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_psubush (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+psubusb (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_psubusb (s, t);
+}
+
+/* Unpack high data. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+punpckhwd_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_punpckhwd_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+punpckhhw_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_punpckhhw_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+punpckhbh_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_punpckhbh_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+punpckhwd_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_punpckhwd_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+punpckhhw_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_punpckhhw_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+punpckhbh_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_punpckhbh_s (s, t);
+}
+
+/* Unpack low data. */
+__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
+punpcklwd_u (uint32x2_t s, uint32x2_t t)
+{
+ return __builtin_loongson_punpcklwd_u (s, t);
+}
+
+__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
+punpcklhw_u (uint16x4_t s, uint16x4_t t)
+{
+ return __builtin_loongson_punpcklhw_u (s, t);
+}
+
+__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
+punpcklbh_u (uint8x8_t s, uint8x8_t t)
+{
+ return __builtin_loongson_punpcklbh_u (s, t);
+}
+
+__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
+punpcklwd_s (int32x2_t s, int32x2_t t)
+{
+ return __builtin_loongson_punpcklwd_s (s, t);
+}
+
+__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
+punpcklhw_s (int16x4_t s, int16x4_t t)
+{
+ return __builtin_loongson_punpcklhw_s (s, t);
+}
+
+__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
+punpcklbh_s (int8x8_t s, int8x8_t t)
+{
+ return __builtin_loongson_punpcklbh_s (s, t);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
-/* Intrinsics for ST Microelectronics Loongson-2E/2F SIMD operations.
+/* Intrinsics for Loongson MultiMedia extension Instructions operations.
Copyright (C) 2008-2018 Free Software Foundation, Inc.
Contributed by CodeSourcery.
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
<http://www.gnu.org/licenses/>. */
-#ifndef _GCC_LOONGSON_H
-#define _GCC_LOONGSON_H
-
-#if !defined(__mips_loongson_vector_rev)
-# error "You must select -march=loongson2e or -march=loongson2f to use loongson.h"
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <stdint.h>
-
-/* Vectors of unsigned bytes, halfwords and words. */
-typedef uint8_t uint8x8_t __attribute__((vector_size (8)));
-typedef uint16_t uint16x4_t __attribute__((vector_size (8)));
-typedef uint32_t uint32x2_t __attribute__((vector_size (8)));
-
-/* Vectors of signed bytes, halfwords and words. */
-typedef int8_t int8x8_t __attribute__((vector_size (8)));
-typedef int16_t int16x4_t __attribute__((vector_size (8)));
-typedef int32_t int32x2_t __attribute__((vector_size (8)));
-
-/* SIMD intrinsics.
- Unless otherwise noted, calls to the functions below will expand into
- precisely one machine instruction, modulo any moves required to
- satisfy register allocation constraints. */
-
-/* Pack with signed saturation. */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-packsswh (int32x2_t s, int32x2_t t)
-{
- return __builtin_loongson_packsswh (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-packsshb (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_packsshb (s, t);
-}
-
-/* Pack with unsigned saturation. */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-packushb (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_packushb (s, t);
-}
-
-/* Vector addition, treating overflow by wraparound. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-paddw_u (uint32x2_t s, uint32x2_t t)
-{
- return __builtin_loongson_paddw_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-paddh_u (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_paddh_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-paddb_u (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_paddb_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-paddw_s (int32x2_t s, int32x2_t t)
-{
- return __builtin_loongson_paddw_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-paddh_s (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_paddh_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-paddb_s (int8x8_t s, int8x8_t t)
-{
- return __builtin_loongson_paddb_s (s, t);
-}
-
-/* Addition of doubleword integers, treating overflow by wraparound. */
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-paddd_u (uint64_t s, uint64_t t)
-{
- return __builtin_loongson_paddd_u (s, t);
-}
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-paddd_s (int64_t s, int64_t t)
-{
- return __builtin_loongson_paddd_s (s, t);
-}
-
-/* Vector addition, treating overflow by signed saturation. */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-paddsh (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_paddsh (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-paddsb (int8x8_t s, int8x8_t t)
-{
- return __builtin_loongson_paddsb (s, t);
-}
-
-/* Vector addition, treating overflow by unsigned saturation. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-paddush (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_paddush (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-paddusb (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_paddusb (s, t);
-}
-
-/* Logical AND NOT. */
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-pandn_ud (uint64_t s, uint64_t t)
-{
- return __builtin_loongson_pandn_ud (s, t);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-pandn_uw (uint32x2_t s, uint32x2_t t)
-{
- return __builtin_loongson_pandn_uw (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pandn_uh (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_pandn_uh (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pandn_ub (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_pandn_ub (s, t);
-}
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-pandn_sd (int64_t s, int64_t t)
-{
- return __builtin_loongson_pandn_sd (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-pandn_sw (int32x2_t s, int32x2_t t)
-{
- return __builtin_loongson_pandn_sw (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pandn_sh (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pandn_sh (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-pandn_sb (int8x8_t s, int8x8_t t)
-{
- return __builtin_loongson_pandn_sb (s, t);
-}
-
-/* Average. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pavgh (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_pavgh (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pavgb (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_pavgb (s, t);
-}
-
-/* Equality test. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-pcmpeqw_u (uint32x2_t s, uint32x2_t t)
-{
- return __builtin_loongson_pcmpeqw_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pcmpeqh_u (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_pcmpeqh_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pcmpeqb_u (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_pcmpeqb_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-pcmpeqw_s (int32x2_t s, int32x2_t t)
-{
- return __builtin_loongson_pcmpeqw_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pcmpeqh_s (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pcmpeqh_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-pcmpeqb_s (int8x8_t s, int8x8_t t)
-{
- return __builtin_loongson_pcmpeqb_s (s, t);
-}
-
-/* Greater-than test. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-pcmpgtw_u (uint32x2_t s, uint32x2_t t)
-{
- return __builtin_loongson_pcmpgtw_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pcmpgth_u (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_pcmpgth_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pcmpgtb_u (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_pcmpgtb_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-pcmpgtw_s (int32x2_t s, int32x2_t t)
-{
- return __builtin_loongson_pcmpgtw_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pcmpgth_s (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pcmpgth_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-pcmpgtb_s (int8x8_t s, int8x8_t t)
-{
- return __builtin_loongson_pcmpgtb_s (s, t);
-}
-
-/* Extract halfword. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pextrh_u (uint16x4_t s, int field /* 0--3 */)
-{
- return __builtin_loongson_pextrh_u (s, field);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pextrh_s (int16x4_t s, int field /* 0--3 */)
-{
- return __builtin_loongson_pextrh_s (s, field);
-}
-
-/* Insert halfword. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pinsrh_0_u (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_pinsrh_0_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pinsrh_1_u (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_pinsrh_1_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pinsrh_2_u (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_pinsrh_2_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pinsrh_3_u (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_pinsrh_3_u (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pinsrh_0_s (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pinsrh_0_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pinsrh_1_s (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pinsrh_1_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pinsrh_2_s (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pinsrh_2_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pinsrh_3_s (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pinsrh_3_s (s, t);
-}
-
-/* Multiply and add. */
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-pmaddhw (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pmaddhw (s, t);
-}
-
-/* Maximum of signed halfwords. */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pmaxsh (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pmaxsh (s, t);
-}
-
-/* Maximum of unsigned bytes. */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pmaxub (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_pmaxub (s, t);
-}
-
-/* Minimum of signed halfwords. */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pminsh (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pminsh (s, t);
-}
-
-/* Minimum of unsigned bytes. */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pminub (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_pminub (s, t);
-}
-
-/* Move byte mask. */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pmovmskb_u (uint8x8_t s)
-{
- return __builtin_loongson_pmovmskb_u (s);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-pmovmskb_s (int8x8_t s)
-{
- return __builtin_loongson_pmovmskb_s (s);
-}
-
-/* Multiply unsigned integers and store high result. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pmulhuh (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_pmulhuh (s, t);
-}
-
-/* Multiply signed integers and store high result. */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pmulhh (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pmulhh (s, t);
-}
-
-/* Multiply signed integers and store low result. */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pmullh (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_pmullh (s, t);
-}
-
-/* Multiply unsigned word integers. */
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-pmuluw (uint32x2_t s, uint32x2_t t)
-{
- return __builtin_loongson_pmuluw (s, t);
-}
-
-/* Absolute difference. */
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-pasubub (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_pasubub (s, t);
-}
-
-/* Sum of unsigned byte integers. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-biadd (uint8x8_t s)
-{
- return __builtin_loongson_biadd (s);
-}
-
-/* Sum of absolute differences.
- Note that this intrinsic expands into two machine instructions:
- PASUBUB followed by BIADD. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psadbh (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_psadbh (s, t);
-}
-
-/* Shuffle halfwords. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-pshufh_u (uint16x4_t dest, uint16x4_t s, uint8_t order)
-{
- return __builtin_loongson_pshufh_u (s, order);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-pshufh_s (int16x4_t dest, int16x4_t s, uint8_t order)
-{
- return __builtin_loongson_pshufh_s (s, order);
-}
-
-/* Shift left logical. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psllh_u (uint16x4_t s, uint8_t amount)
-{
- return __builtin_loongson_psllh_u (s, amount);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-psllh_s (int16x4_t s, uint8_t amount)
-{
- return __builtin_loongson_psllh_s (s, amount);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-psllw_u (uint32x2_t s, uint8_t amount)
-{
- return __builtin_loongson_psllw_u (s, amount);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-psllw_s (int32x2_t s, uint8_t amount)
-{
- return __builtin_loongson_psllw_s (s, amount);
-}
-
-/* Shift right logical. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psrlh_u (uint16x4_t s, uint8_t amount)
-{
- return __builtin_loongson_psrlh_u (s, amount);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-psrlh_s (int16x4_t s, uint8_t amount)
-{
- return __builtin_loongson_psrlh_s (s, amount);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-psrlw_u (uint32x2_t s, uint8_t amount)
-{
- return __builtin_loongson_psrlw_u (s, amount);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-psrlw_s (int32x2_t s, uint8_t amount)
-{
- return __builtin_loongson_psrlw_s (s, amount);
-}
-
-/* Shift right arithmetic. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psrah_u (uint16x4_t s, uint8_t amount)
-{
- return __builtin_loongson_psrah_u (s, amount);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-psrah_s (int16x4_t s, uint8_t amount)
-{
- return __builtin_loongson_psrah_s (s, amount);
-}
-
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-psraw_u (uint32x2_t s, uint8_t amount)
-{
- return __builtin_loongson_psraw_u (s, amount);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-psraw_s (int32x2_t s, uint8_t amount)
-{
- return __builtin_loongson_psraw_s (s, amount);
-}
-
-/* Vector subtraction, treating overflow by wraparound. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-psubw_u (uint32x2_t s, uint32x2_t t)
-{
- return __builtin_loongson_psubw_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psubh_u (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_psubh_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-psubb_u (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_psubb_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-psubw_s (int32x2_t s, int32x2_t t)
-{
- return __builtin_loongson_psubw_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-psubh_s (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_psubh_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-psubb_s (int8x8_t s, int8x8_t t)
-{
- return __builtin_loongson_psubb_s (s, t);
-}
-
-/* Subtraction of doubleword integers, treating overflow by wraparound. */
-__extension__ static __inline uint64_t __attribute__ ((__always_inline__))
-psubd_u (uint64_t s, uint64_t t)
-{
- return __builtin_loongson_psubd_u (s, t);
-}
-
-__extension__ static __inline int64_t __attribute__ ((__always_inline__))
-psubd_s (int64_t s, int64_t t)
-{
- return __builtin_loongson_psubd_s (s, t);
-}
-
-/* Vector subtraction, treating overflow by signed saturation. */
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-psubsh (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_psubsh (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-psubsb (int8x8_t s, int8x8_t t)
-{
- return __builtin_loongson_psubsb (s, t);
-}
-
-/* Vector subtraction, treating overflow by unsigned saturation. */
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-psubush (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_psubush (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-psubusb (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_psubusb (s, t);
-}
-
-/* Unpack high data. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-punpckhwd_u (uint32x2_t s, uint32x2_t t)
-{
- return __builtin_loongson_punpckhwd_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-punpckhhw_u (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_punpckhhw_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-punpckhbh_u (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_punpckhbh_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-punpckhwd_s (int32x2_t s, int32x2_t t)
-{
- return __builtin_loongson_punpckhwd_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-punpckhhw_s (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_punpckhhw_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-punpckhbh_s (int8x8_t s, int8x8_t t)
-{
- return __builtin_loongson_punpckhbh_s (s, t);
-}
-
-/* Unpack low data. */
-__extension__ static __inline uint32x2_t __attribute__ ((__always_inline__))
-punpcklwd_u (uint32x2_t s, uint32x2_t t)
-{
- return __builtin_loongson_punpcklwd_u (s, t);
-}
-
-__extension__ static __inline uint16x4_t __attribute__ ((__always_inline__))
-punpcklhw_u (uint16x4_t s, uint16x4_t t)
-{
- return __builtin_loongson_punpcklhw_u (s, t);
-}
-
-__extension__ static __inline uint8x8_t __attribute__ ((__always_inline__))
-punpcklbh_u (uint8x8_t s, uint8x8_t t)
-{
- return __builtin_loongson_punpcklbh_u (s, t);
-}
-
-__extension__ static __inline int32x2_t __attribute__ ((__always_inline__))
-punpcklwd_s (int32x2_t s, int32x2_t t)
-{
- return __builtin_loongson_punpcklwd_s (s, t);
-}
-
-__extension__ static __inline int16x4_t __attribute__ ((__always_inline__))
-punpcklhw_s (int16x4_t s, int16x4_t t)
-{
- return __builtin_loongson_punpcklhw_s (s, t);
-}
-
-__extension__ static __inline int8x8_t __attribute__ ((__always_inline__))
-punpcklbh_s (int8x8_t s, int8x8_t t)
-{
- return __builtin_loongson_punpcklbh_s (s, t);
-}
-
-#ifdef __cplusplus
-}
-#endif
+#if !defined(_GCC_LOONGSON_MMIINTRIN_H)
+#warning \
+ loongson.h will be deprecated without further notice at a future date. \
+ Please use loongson-mmiintrin.h instead.
+#include "loongson-mmiintrin.h"
#endif
+++ /dev/null
-;; Machine description for Loongson-specific patterns, such as
-;; ST Microelectronics Loongson-2E/2F etc.
-;; Copyright (C) 2008-2018 Free Software Foundation, Inc.
-;; Contributed by CodeSourcery.
-;;
-;; This file is part of GCC.
-;;
-;; GCC is free software; you can redistribute it and/or modify
-;; it under the terms of the GNU General Public License as published by
-;; the Free Software Foundation; either version 3, or (at your option)
-;; any later version.
-
-;; GCC is distributed in the hope that it will be useful,
-;; but WITHOUT ANY WARRANTY; without even the implied warranty of
-;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-;; GNU General Public License for more details.
-
-;; You should have received a copy of the GNU General Public License
-;; along with GCC; see the file COPYING3. If not see
-;; <http://www.gnu.org/licenses/>.
-
-(define_c_enum "unspec" [
- UNSPEC_LOONGSON_PAVG
- UNSPEC_LOONGSON_PCMPEQ
- UNSPEC_LOONGSON_PCMPGT
- UNSPEC_LOONGSON_PEXTR
- UNSPEC_LOONGSON_PINSRH
- UNSPEC_LOONGSON_VINIT
- UNSPEC_LOONGSON_PMADD
- UNSPEC_LOONGSON_PMOVMSK
- UNSPEC_LOONGSON_PMULHU
- UNSPEC_LOONGSON_PMULH
- UNSPEC_LOONGSON_PMULU
- UNSPEC_LOONGSON_PASUBUB
- UNSPEC_LOONGSON_BIADD
- UNSPEC_LOONGSON_PSADBH
- UNSPEC_LOONGSON_PSHUFH
- UNSPEC_LOONGSON_PUNPCKH
- UNSPEC_LOONGSON_PUNPCKL
- UNSPEC_LOONGSON_PADDD
- UNSPEC_LOONGSON_PSUBD
- UNSPEC_LOONGSON_DSLL
- UNSPEC_LOONGSON_DSRL
-])
-
-;; Mode iterators and attributes.
-
-;; 64-bit vectors of bytes.
-(define_mode_iterator VB [V8QI])
-
-;; 64-bit vectors of halfwords.
-(define_mode_iterator VH [V4HI])
-
-;; 64-bit vectors of words.
-(define_mode_iterator VW [V2SI])
-
-;; 64-bit vectors of halfwords and bytes.
-(define_mode_iterator VHB [V4HI V8QI])
-
-;; 64-bit vectors of words and halfwords.
-(define_mode_iterator VWH [V2SI V4HI])
-
-;; 64-bit vectors of words and bytes
-(define_mode_iterator VWB [V2SI V8QI])
-
-;; 64-bit vectors of words, halfwords and bytes.
-(define_mode_iterator VWHB [V2SI V4HI V8QI])
-
-;; 64-bit vectors of words, halfwords and bytes; and DImode.
-(define_mode_iterator VWHBDI [V2SI V4HI V8QI DI])
-
-;; The Loongson instruction suffixes corresponding to the modes in the
-;; VWHBDI iterator.
-(define_mode_attr V_suffix [(V2SI "w") (V4HI "h") (V8QI "b") (DI "d")])
-
-;; Given a vector type T, the mode of a vector half the size of T
-;; and with the same number of elements.
-(define_mode_attr V_squash [(V2SI "V2HI") (V4HI "V4QI")])
-
-;; Given a vector type T, the mode of a vector the same size as T
-;; but with half as many elements.
-(define_mode_attr V_stretch_half [(V2SI "DI") (V4HI "V2SI") (V8QI "V4HI")])
-
-;; The Loongson instruction suffixes corresponding to the transformation
-;; expressed by V_stretch_half.
-(define_mode_attr V_stretch_half_suffix [(V2SI "wd") (V4HI "hw") (V8QI "bh")])
-
-;; Given a vector type T, the mode of a vector the same size as T
-;; but with twice as many elements.
-(define_mode_attr V_squash_double [(V2SI "V4HI") (V4HI "V8QI")])
-
-;; Given a vector type T, the inner mode.
-(define_mode_attr V_inner [(V8QI "QI") (V4HI "HI") (V2SI "SI")])
-
-;; The Loongson instruction suffixes corresponding to the conversions
-;; specified by V_half_width.
-(define_mode_attr V_squash_double_suffix [(V2SI "wh") (V4HI "hb")])
-
-;; Move patterns.
-
-;; Expander to legitimize moves involving values of vector modes.
-(define_expand "mov<mode>"
- [(set (match_operand:VWHB 0)
- (match_operand:VWHB 1))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- if (mips_legitimize_move (<MODE>mode, operands[0], operands[1]))
- DONE;
-})
-
-;; Handle legitimized moves between values of vector modes.
-(define_insn "mov<mode>_internal"
- [(set (match_operand:VWHB 0 "nonimmediate_operand" "=m,f,d,f, d, m, d")
- (match_operand:VWHB 1 "move_operand" "f,m,f,dYG,dYG,dYG,m"))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- { return mips_output_move (operands[0], operands[1]); }
- [(set_attr "move_type" "fpstore,fpload,mfc,mtc,move,store,load")
- (set_attr "mode" "DI")])
-
-;; Initialization of a vector.
-
-(define_expand "vec_init<mode><unitmode>"
- [(set (match_operand:VWHB 0 "register_operand")
- (match_operand 1 ""))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- mips_expand_vector_init (operands[0], operands[1]);
- DONE;
-})
-
-;; Helper for vec_init. Initialize element 0 of the output from the input.
-;; All other elements are undefined.
-(define_insn "loongson_vec_init1_<mode>"
- [(set (match_operand:VHB 0 "register_operand" "=f")
- (unspec:VHB [(truncate:<V_inner>
- (match_operand:DI 1 "reg_or_0_operand" "Jd"))]
- UNSPEC_LOONGSON_VINIT))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "dmtc1\t%z1,%0"
- [(set_attr "move_type" "mtc")
- (set_attr "mode" "DI")])
-
-;; Helper for vec_initv2si.
-(define_insn "*vec_concatv2si"
- [(set (match_operand:V2SI 0 "register_operand" "=f")
- (vec_concat:V2SI
- (match_operand:SI 1 "register_operand" "f")
- (match_operand:SI 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpcklwd\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-;; Instruction patterns for SIMD instructions.
-
-;; Pack with signed saturation.
-(define_insn "vec_pack_ssat_<mode>"
- [(set (match_operand:<V_squash_double> 0 "register_operand" "=f")
- (vec_concat:<V_squash_double>
- (ss_truncate:<V_squash>
- (match_operand:VWH 1 "register_operand" "f"))
- (ss_truncate:<V_squash>
- (match_operand:VWH 2 "register_operand" "f"))))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "packss<V_squash_double_suffix>\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Pack with unsigned saturation.
-(define_insn "vec_pack_usat_<mode>"
- [(set (match_operand:<V_squash_double> 0 "register_operand" "=f")
- (vec_concat:<V_squash_double>
- (us_truncate:<V_squash>
- (match_operand:VH 1 "register_operand" "f"))
- (us_truncate:<V_squash>
- (match_operand:VH 2 "register_operand" "f"))))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "packus<V_squash_double_suffix>\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Addition, treating overflow by wraparound.
-(define_insn "add<mode>3"
- [(set (match_operand:VWHB 0 "register_operand" "=f")
- (plus:VWHB (match_operand:VWHB 1 "register_operand" "f")
- (match_operand:VWHB 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "padd<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Addition of doubleword integers stored in FP registers.
-;; Overflow is treated by wraparound.
-;; We use 'unspec' instead of 'plus' here to avoid clash with
-;; mips.md::add<mode>3. If 'plus' was used, then such instruction
-;; would be recognized as adddi3 and reload would make it use
-;; GPRs instead of FPRs.
-(define_insn "loongson_paddd"
- [(set (match_operand:DI 0 "register_operand" "=f")
- (unspec:DI [(match_operand:DI 1 "register_operand" "f")
- (match_operand:DI 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PADDD))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "paddd\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Addition, treating overflow by signed saturation.
-(define_insn "ssadd<mode>3"
- [(set (match_operand:VHB 0 "register_operand" "=f")
- (ss_plus:VHB (match_operand:VHB 1 "register_operand" "f")
- (match_operand:VHB 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "padds<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Addition, treating overflow by unsigned saturation.
-(define_insn "usadd<mode>3"
- [(set (match_operand:VHB 0 "register_operand" "=f")
- (us_plus:VHB (match_operand:VHB 1 "register_operand" "f")
- (match_operand:VHB 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "paddus<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Logical AND NOT.
-(define_insn "loongson_pandn_<V_suffix>"
- [(set (match_operand:VWHBDI 0 "register_operand" "=f")
- (and:VWHBDI
- (not:VWHBDI (match_operand:VWHBDI 1 "register_operand" "f"))
- (match_operand:VWHBDI 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pandn\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Logical AND.
-(define_insn "and<mode>3"
- [(set (match_operand:VWHB 0 "register_operand" "=f")
- (and:VWHB (match_operand:VWHB 1 "register_operand" "f")
- (match_operand:VWHB 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "and\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Logical OR.
-(define_insn "ior<mode>3"
- [(set (match_operand:VWHB 0 "register_operand" "=f")
- (ior:VWHB (match_operand:VWHB 1 "register_operand" "f")
- (match_operand:VWHB 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "or\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-;; Logical XOR.
-(define_insn "xor<mode>3"
- [(set (match_operand:VWHB 0 "register_operand" "=f")
- (xor:VWHB (match_operand:VWHB 1 "register_operand" "f")
- (match_operand:VWHB 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "xor\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Logical NOR.
-(define_insn "*loongson_nor"
- [(set (match_operand:VWHB 0 "register_operand" "=f")
- (and:VWHB
- (not:VWHB (match_operand:VWHB 1 "register_operand" "f"))
- (not:VWHB (match_operand:VWHB 2 "register_operand" "f"))))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "nor\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Logical NOT.
-(define_insn "one_cmpl<mode>2"
- [(set (match_operand:VWHB 0 "register_operand" "=f")
- (not:VWHB (match_operand:VWHB 1 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "nor\t%0,%1,%1"
- [(set_attr "type" "fmul")])
-
-;; Average.
-(define_insn "loongson_pavg<V_suffix>"
- [(set (match_operand:VHB 0 "register_operand" "=f")
- (unspec:VHB [(match_operand:VHB 1 "register_operand" "f")
- (match_operand:VHB 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PAVG))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pavg<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Equality test.
-(define_insn "loongson_pcmpeq<V_suffix>"
- [(set (match_operand:VWHB 0 "register_operand" "=f")
- (unspec:VWHB [(match_operand:VWHB 1 "register_operand" "f")
- (match_operand:VWHB 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PCMPEQ))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pcmpeq<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Greater-than test.
-(define_insn "loongson_pcmpgt<V_suffix>"
- [(set (match_operand:VWHB 0 "register_operand" "=f")
- (unspec:VWHB [(match_operand:VWHB 1 "register_operand" "f")
- (match_operand:VWHB 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PCMPGT))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pcmpgt<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Extract halfword.
-(define_insn "loongson_pextrh"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
- (match_operand:SI 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PEXTR))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pextrh\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-;; Insert halfword.
-(define_insn "loongson_pinsrh_0"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (vec_select:V4HI
- (vec_concat:V8HI
- (match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f"))
- (parallel [(const_int 4) (const_int 1)
- (const_int 2) (const_int 3)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pinsrh_0\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_insn "loongson_pinsrh_1"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (vec_select:V4HI
- (vec_concat:V8HI
- (match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f"))
- (parallel [(const_int 0) (const_int 4)
- (const_int 2) (const_int 3)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pinsrh_1\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_insn "loongson_pinsrh_2"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (vec_select:V4HI
- (vec_concat:V8HI
- (match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f"))
- (parallel [(const_int 0) (const_int 1)
- (const_int 4) (const_int 3)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pinsrh_2\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_insn "loongson_pinsrh_3"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (vec_select:V4HI
- (vec_concat:V8HI
- (match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f"))
- (parallel [(const_int 0) (const_int 1)
- (const_int 2) (const_int 4)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pinsrh_3\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_insn "*vec_setv4hi"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
- (match_operand:SI 2 "register_operand" "f")
- (match_operand:SI 3 "const_0_to_3_operand" "")]
- UNSPEC_LOONGSON_PINSRH))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pinsrh_%3\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_expand "vec_setv4hi"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (unspec:V4HI [(match_operand:V4HI 1 "register_operand" "f")
- (match_operand:HI 2 "register_operand" "f")
- (match_operand:SI 3 "const_0_to_3_operand" "")]
- UNSPEC_LOONGSON_PINSRH))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- rtx ext = gen_reg_rtx (SImode);
- emit_move_insn (ext, gen_lowpart (SImode, operands[2]));
- operands[2] = ext;
-})
-
-;; Multiply and add packed integers.
-(define_insn "loongson_pmaddhw"
- [(set (match_operand:V2SI 0 "register_operand" "=f")
- (unspec:V2SI [(match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PMADD))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pmaddhw\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-(define_expand "sdot_prodv4hi"
- [(match_operand:V2SI 0 "register_operand" "")
- (match_operand:V4HI 1 "register_operand" "")
- (match_operand:V4HI 2 "register_operand" "")
- (match_operand:V2SI 3 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- rtx t = gen_reg_rtx (V2SImode);
- emit_insn (gen_loongson_pmaddhw (t, operands[1], operands[2]));
- emit_insn (gen_addv2si3 (operands[0], t, operands[3]));
- DONE;
-})
-
-;; Maximum of signed halfwords.
-(define_insn "smaxv4hi3"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (smax:V4HI (match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pmaxsh\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-(define_expand "smax<mode>3"
- [(match_operand:VWB 0 "register_operand" "")
- (match_operand:VWB 1 "register_operand" "")
- (match_operand:VWB 2 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- mips_expand_vec_minmax (operands[0], operands[1], operands[2],
- gen_loongson_pcmpgt<V_suffix>, false);
- DONE;
-})
-
-;; Maximum of unsigned bytes.
-(define_insn "umaxv8qi3"
- [(set (match_operand:V8QI 0 "register_operand" "=f")
- (umax:V8QI (match_operand:V8QI 1 "register_operand" "f")
- (match_operand:V8QI 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pmaxub\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Minimum of signed halfwords.
-(define_insn "sminv4hi3"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (smin:V4HI (match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pminsh\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-(define_expand "smin<mode>3"
- [(match_operand:VWB 0 "register_operand" "")
- (match_operand:VWB 1 "register_operand" "")
- (match_operand:VWB 2 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- mips_expand_vec_minmax (operands[0], operands[1], operands[2],
- gen_loongson_pcmpgt<V_suffix>, true);
- DONE;
-})
-
-;; Minimum of unsigned bytes.
-(define_insn "uminv8qi3"
- [(set (match_operand:V8QI 0 "register_operand" "=f")
- (umin:V8QI (match_operand:V8QI 1 "register_operand" "f")
- (match_operand:V8QI 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pminub\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Move byte mask.
-(define_insn "loongson_pmovmsk<V_suffix>"
- [(set (match_operand:VB 0 "register_operand" "=f")
- (unspec:VB [(match_operand:VB 1 "register_operand" "f")]
- UNSPEC_LOONGSON_PMOVMSK))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pmovmsk<V_suffix>\t%0,%1"
- [(set_attr "type" "fabs")])
-
-;; Multiply unsigned integers and store high result.
-(define_insn "umul<mode>3_highpart"
- [(set (match_operand:VH 0 "register_operand" "=f")
- (unspec:VH [(match_operand:VH 1 "register_operand" "f")
- (match_operand:VH 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PMULHU))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pmulhu<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Multiply signed integers and store high result.
-(define_insn "smul<mode>3_highpart"
- [(set (match_operand:VH 0 "register_operand" "=f")
- (unspec:VH [(match_operand:VH 1 "register_operand" "f")
- (match_operand:VH 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PMULH))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pmulh<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Multiply signed integers and store low result.
-(define_insn "mul<mode>3"
- [(set (match_operand:VH 0 "register_operand" "=f")
- (mult:VH (match_operand:VH 1 "register_operand" "f")
- (match_operand:VH 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pmull<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Multiply unsigned word integers.
-(define_insn "loongson_pmulu<V_suffix>"
- [(set (match_operand:DI 0 "register_operand" "=f")
- (unspec:DI [(match_operand:VW 1 "register_operand" "f")
- (match_operand:VW 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PMULU))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pmulu<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Absolute difference.
-(define_insn "loongson_pasubub"
- [(set (match_operand:VB 0 "register_operand" "=f")
- (unspec:VB [(match_operand:VB 1 "register_operand" "f")
- (match_operand:VB 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PASUBUB))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pasubub\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Sum of unsigned byte integers.
-(define_insn "loongson_biadd"
- [(set (match_operand:<V_stretch_half> 0 "register_operand" "=f")
- (unspec:<V_stretch_half> [(match_operand:VB 1 "register_operand" "f")]
- UNSPEC_LOONGSON_BIADD))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "biadd\t%0,%1"
- [(set_attr "type" "fabs")])
-
-(define_insn "reduc_uplus_v8qi"
- [(set (match_operand:V8QI 0 "register_operand" "=f")
- (unspec:V8QI [(match_operand:V8QI 1 "register_operand" "f")]
- UNSPEC_LOONGSON_BIADD))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "biadd\t%0,%1"
- [(set_attr "type" "fabs")])
-
-;; Sum of absolute differences.
-(define_insn "loongson_psadbh"
- [(set (match_operand:<V_stretch_half> 0 "register_operand" "=f")
- (unspec:<V_stretch_half> [(match_operand:VB 1 "register_operand" "f")
- (match_operand:VB 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PSADBH))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pasubub\t%0,%1,%2;biadd\t%0,%0"
- [(set_attr "type" "fadd")])
-
-;; Shuffle halfwords.
-(define_insn "loongson_pshufh"
- [(set (match_operand:VH 0 "register_operand" "=f")
- (unspec:VH [(match_operand:VH 1 "register_operand" "f")
- (match_operand:SI 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PSHUFH))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "pshufh\t%0,%1,%2"
- [(set_attr "type" "fmul")])
-
-;; Shift left logical.
-(define_insn "ashl<mode>3"
- [(set (match_operand:VWH 0 "register_operand" "=f")
- (ashift:VWH (match_operand:VWH 1 "register_operand" "f")
- (match_operand:SI 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "psll<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-;; Shift right arithmetic.
-(define_insn "ashr<mode>3"
- [(set (match_operand:VWH 0 "register_operand" "=f")
- (ashiftrt:VWH (match_operand:VWH 1 "register_operand" "f")
- (match_operand:SI 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "psra<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-;; Shift right logical.
-(define_insn "lshr<mode>3"
- [(set (match_operand:VWH 0 "register_operand" "=f")
- (lshiftrt:VWH (match_operand:VWH 1 "register_operand" "f")
- (match_operand:SI 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "psrl<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-;; Subtraction, treating overflow by wraparound.
-(define_insn "sub<mode>3"
- [(set (match_operand:VWHB 0 "register_operand" "=f")
- (minus:VWHB (match_operand:VWHB 1 "register_operand" "f")
- (match_operand:VWHB 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "psub<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Subtraction of doubleword integers stored in FP registers.
-;; Overflow is treated by wraparound.
-;; See loongson_paddd for the reason we use 'unspec' rather than
-;; 'minus' here.
-(define_insn "loongson_psubd"
- [(set (match_operand:DI 0 "register_operand" "=f")
- (unspec:DI [(match_operand:DI 1 "register_operand" "f")
- (match_operand:DI 2 "register_operand" "f")]
- UNSPEC_LOONGSON_PSUBD))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "psubd\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Subtraction, treating overflow by signed saturation.
-(define_insn "sssub<mode>3"
- [(set (match_operand:VHB 0 "register_operand" "=f")
- (ss_minus:VHB (match_operand:VHB 1 "register_operand" "f")
- (match_operand:VHB 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "psubs<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Subtraction, treating overflow by unsigned saturation.
-(define_insn "ussub<mode>3"
- [(set (match_operand:VHB 0 "register_operand" "=f")
- (us_minus:VHB (match_operand:VHB 1 "register_operand" "f")
- (match_operand:VHB 2 "register_operand" "f")))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "psubus<V_suffix>\t%0,%1,%2"
- [(set_attr "type" "fadd")])
-
-;; Unpack high data. Recall that Loongson only runs in little-endian.
-(define_insn "loongson_punpckhbh"
- [(set (match_operand:V8QI 0 "register_operand" "=f")
- (vec_select:V8QI
- (vec_concat:V16QI
- (match_operand:V8QI 1 "register_operand" "f")
- (match_operand:V8QI 2 "register_operand" "f"))
- (parallel [(const_int 4) (const_int 12)
- (const_int 5) (const_int 13)
- (const_int 6) (const_int 14)
- (const_int 7) (const_int 15)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpckhbh\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_insn "loongson_punpckhhw"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (vec_select:V4HI
- (vec_concat:V8HI
- (match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f"))
- (parallel [(const_int 2) (const_int 6)
- (const_int 3) (const_int 7)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpckhhw\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_insn "loongson_punpckhhw_qi"
- [(set (match_operand:V8QI 0 "register_operand" "=f")
- (vec_select:V8QI
- (vec_concat:V16QI
- (match_operand:V8QI 1 "register_operand" "f")
- (match_operand:V8QI 2 "register_operand" "f"))
- (parallel [(const_int 4) (const_int 5)
- (const_int 12) (const_int 13)
- (const_int 6) (const_int 7)
- (const_int 14) (const_int 15)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpckhhw\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_insn "loongson_punpckhwd"
- [(set (match_operand:V2SI 0 "register_operand" "=f")
- (vec_select:V2SI
- (vec_concat:V4SI
- (match_operand:V2SI 1 "register_operand" "f")
- (match_operand:V2SI 2 "register_operand" "f"))
- (parallel [(const_int 1) (const_int 3)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpckhwd\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-(define_insn "loongson_punpckhwd_qi"
- [(set (match_operand:V8QI 0 "register_operand" "=f")
- (vec_select:V8QI
- (vec_concat:V16QI
- (match_operand:V8QI 1 "register_operand" "f")
- (match_operand:V8QI 2 "register_operand" "f"))
- (parallel [(const_int 4) (const_int 5)
- (const_int 6) (const_int 7)
- (const_int 12) (const_int 13)
- (const_int 14) (const_int 15)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpckhwd\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-(define_insn "loongson_punpckhwd_hi"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (vec_select:V4HI
- (vec_concat:V8HI
- (match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f"))
- (parallel [(const_int 2) (const_int 3)
- (const_int 6) (const_int 7)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpckhwd\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-;; Unpack low data.
-(define_insn "loongson_punpcklbh"
- [(set (match_operand:V8QI 0 "register_operand" "=f")
- (vec_select:V8QI
- (vec_concat:V16QI
- (match_operand:V8QI 1 "register_operand" "f")
- (match_operand:V8QI 2 "register_operand" "f"))
- (parallel [(const_int 0) (const_int 8)
- (const_int 1) (const_int 9)
- (const_int 2) (const_int 10)
- (const_int 3) (const_int 11)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpcklbh\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_insn "loongson_punpcklhw"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (vec_select:V4HI
- (vec_concat:V8HI
- (match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f"))
- (parallel [(const_int 0) (const_int 4)
- (const_int 1) (const_int 5)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpcklhw\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_insn "*loongson_punpcklhw_qi"
- [(set (match_operand:V8QI 0 "register_operand" "=f")
- (vec_select:V8QI
- (vec_concat:V16QI
- (match_operand:V8QI 1 "register_operand" "f")
- (match_operand:V8QI 2 "register_operand" "f"))
- (parallel [(const_int 0) (const_int 1)
- (const_int 8) (const_int 9)
- (const_int 2) (const_int 3)
- (const_int 10) (const_int 11)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpcklhw\t%0,%1,%2"
- [(set_attr "type" "fdiv")])
-
-(define_insn "loongson_punpcklwd"
- [(set (match_operand:V2SI 0 "register_operand" "=f")
- (vec_select:V2SI
- (vec_concat:V4SI
- (match_operand:V2SI 1 "register_operand" "f")
- (match_operand:V2SI 2 "register_operand" "f"))
- (parallel [(const_int 0) (const_int 2)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpcklwd\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-(define_insn "*loongson_punpcklwd_qi"
- [(set (match_operand:V8QI 0 "register_operand" "=f")
- (vec_select:V8QI
- (vec_concat:V16QI
- (match_operand:V8QI 1 "register_operand" "f")
- (match_operand:V8QI 2 "register_operand" "f"))
- (parallel [(const_int 0) (const_int 1)
- (const_int 2) (const_int 3)
- (const_int 8) (const_int 9)
- (const_int 10) (const_int 11)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpcklwd\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-(define_insn "*loongson_punpcklwd_hi"
- [(set (match_operand:V4HI 0 "register_operand" "=f")
- (vec_select:V4HI
- (vec_concat:V8HI
- (match_operand:V4HI 1 "register_operand" "f")
- (match_operand:V4HI 2 "register_operand" "f"))
- (parallel [(const_int 0) (const_int 1)
- (const_int 4) (const_int 5)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "punpcklwd\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-(define_expand "vec_unpacks_lo_<mode>"
- [(match_operand:<V_stretch_half> 0 "register_operand" "")
- (match_operand:VHB 1 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- mips_expand_vec_unpack (operands, false, false);
- DONE;
-})
-
-(define_expand "vec_unpacks_hi_<mode>"
- [(match_operand:<V_stretch_half> 0 "register_operand" "")
- (match_operand:VHB 1 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- mips_expand_vec_unpack (operands, false, true);
- DONE;
-})
-
-(define_expand "vec_unpacku_lo_<mode>"
- [(match_operand:<V_stretch_half> 0 "register_operand" "")
- (match_operand:VHB 1 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- mips_expand_vec_unpack (operands, true, false);
- DONE;
-})
-
-(define_expand "vec_unpacku_hi_<mode>"
- [(match_operand:<V_stretch_half> 0 "register_operand" "")
- (match_operand:VHB 1 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- mips_expand_vec_unpack (operands, true, true);
- DONE;
-})
-
-;; Whole vector shifts, used for reduction epilogues.
-(define_insn "vec_shl_<mode>"
- [(set (match_operand:VWHBDI 0 "register_operand" "=f")
- (unspec:VWHBDI [(match_operand:VWHBDI 1 "register_operand" "f")
- (match_operand:SI 2 "register_operand" "f")]
- UNSPEC_LOONGSON_DSLL))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "dsll\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-(define_insn "vec_shr_<mode>"
- [(set (match_operand:VWHBDI 0 "register_operand" "=f")
- (unspec:VWHBDI [(match_operand:VWHBDI 1 "register_operand" "f")
- (match_operand:SI 2 "register_operand" "f")]
- UNSPEC_LOONGSON_DSRL))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "dsrl\t%0,%1,%2"
- [(set_attr "type" "fcvt")])
-
-(define_insn "vec_loongson_extract_lo_<mode>"
- [(set (match_operand:<V_inner> 0 "register_operand" "=r")
- (vec_select:<V_inner>
- (match_operand:VWHB 1 "register_operand" "f")
- (parallel [(const_int 0)])))]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
- "mfc1\t%0,%1"
- [(set_attr "type" "mfc")])
-
-(define_expand "reduc_plus_scal_<mode>"
- [(match_operand:<V_inner> 0 "register_operand" "")
- (match_operand:VWHB 1 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
- mips_expand_vec_reduc (tmp, operands[1], gen_add<mode>3);
- emit_insn (gen_vec_loongson_extract_lo_<mode> (operands[0], tmp));
- DONE;
-})
-
-(define_expand "reduc_smax_scal_<mode>"
- [(match_operand:<V_inner> 0 "register_operand" "")
- (match_operand:VWHB 1 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
- mips_expand_vec_reduc (tmp, operands[1], gen_smax<mode>3);
- emit_insn (gen_vec_loongson_extract_lo_<mode> (operands[0], tmp));
- DONE;
-})
-
-(define_expand "reduc_smin_scal_<mode>"
- [(match_operand:<V_inner> 0 "register_operand" "")
- (match_operand:VWHB 1 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
- mips_expand_vec_reduc (tmp, operands[1], gen_smin<mode>3);
- emit_insn (gen_vec_loongson_extract_lo_<mode> (operands[0], tmp));
- DONE;
-})
-
-(define_expand "reduc_umax_scal_<mode>"
- [(match_operand:<V_inner> 0 "register_operand" "")
- (match_operand:VB 1 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
- mips_expand_vec_reduc (tmp, operands[1], gen_umax<mode>3);
- emit_insn (gen_vec_loongson_extract_lo_<mode> (operands[0], tmp));
- DONE;
-})
-
-(define_expand "reduc_umin_scal_<mode>"
- [(match_operand:<V_inner> 0 "register_operand" "")
- (match_operand:VB 1 "register_operand" "")]
- "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS"
-{
- rtx tmp = gen_reg_rtx (GET_MODE (operands[1]));
- mips_expand_vec_reduc (tmp, operands[1], gen_umin<mode>3);
- emit_insn (gen_vec_loongson_extract_lo_<mode> (operands[0], tmp));
- DONE;
-})
if (mode == CCFmode)
return !(TARGET_FLOATXX && (regno & 1) != 0);
- /* Allow 64-bit vector modes for Loongson-2E/2F. */
- if (TARGET_LOONGSON_VECTORS
+ /* Allow 64-bit vector modes for Loongson MultiMedia extensions
+ Instructions (MMI). */
+ if (TARGET_LOONGSON_MMI
&& (mode == V2SImode
|| mode == V4HImode
|| mode == V8QImode
case E_V2SImode:
case E_V4HImode:
case E_V8QImode:
- return TARGET_LOONGSON_VECTORS;
+ return TARGET_LOONGSON_MMI;
default:
return MSA_SUPPORTED_MODE_P (mode);
AVAIL_NON_MIPS16 (dsp_32, !TARGET_64BIT && TARGET_DSP)
AVAIL_NON_MIPS16 (dsp_64, TARGET_64BIT && TARGET_DSP)
AVAIL_NON_MIPS16 (dspr2_32, !TARGET_64BIT && TARGET_DSPR2)
-AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_VECTORS)
+AVAIL_NON_MIPS16 (loongson, TARGET_LOONGSON_MMI)
AVAIL_NON_MIPS16 (cache, TARGET_CACHE_BUILTIN)
AVAIL_NON_MIPS16 (msa, TARGET_MSA)
TARGET_DSPR2 = false;
}
+ /* Make sure that when TARGET_LOONGSON_MMI is true, TARGET_HARD_FLOAT_ABI
+ is true. In o32 pairs of floating-point registers provide 64-bit
+ values. */
+ if (TARGET_LOONGSON_MMI && !TARGET_HARD_FLOAT_ABI)
+ error ("%<-mloongson-mmi%> must be used with %<-mhard-float%>");
+
/* .eh_frame addresses should be the same width as a C pointer.
Most MIPS ABIs support only one pointer size, so the assembler
will usually know exactly how big an .eh_frame address is.
/* Implement TARGET_SHIFT_TRUNCATION_MASK. We want to keep the default
behavior of TARGET_SHIFT_TRUNCATION_MASK for non-vector modes even
- when TARGET_LOONGSON_VECTORS is true. */
+ when TARGET_LOONGSON_MMI is true. */
static unsigned HOST_WIDE_INT
mips_shift_truncation_mask (machine_mode mode)
{
- if (TARGET_LOONGSON_VECTORS && VECTOR_MODE_P (mode))
+ if (TARGET_LOONGSON_MMI && VECTOR_MODE_P (mode))
return 0;
return GET_MODE_BITSIZE (mode) - 1;
unsigned i, odd, nelt = d->nelt;
rtx t0, t1, t2, t3;
- if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
+ if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI))
return false;
/* Even-odd for V2SI/V2SFmode is matched by interleave directly. */
if (nelt < 4)
unsigned i, mask;
rtx rmask;
- if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
+ if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI))
return false;
if (d->vmode != V4HImode)
return false;
unsigned i, elt;
rtx t0, t1;
- if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS))
+ if (!(TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI))
return false;
/* Note that we've already matched V2SI via punpck and V4HI via pshufh. */
if (d->vmode != V8QImode)
}
/* Loongson is the only cpu with vectors with more elements. */
- gcc_assert (TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS);
+ gcc_assert (TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI);
/* If all values are identical, broadcast the value. */
if (all_same)
#define TUNE_I6400 (mips_tune == PROCESSOR_I6400)
#define TUNE_P6600 (mips_tune == PROCESSOR_P6600)
-/* Whether vector modes and intrinsics for ST Microelectronics
- Loongson-2E/2F processors should be enabled. In o32 pairs of
- floating-point registers provide 64-bit values. */
-#define TARGET_LOONGSON_VECTORS (TARGET_HARD_FLOAT_ABI \
- && (TARGET_LOONGSON_2EF \
- || TARGET_LOONGSON_3A))
-
/* True if the pre-reload scheduler should try to create chains of
multiply-add or multiply-subtract instructions. For example,
suppose we have:
if (TARGET_ABICALLS) \
builtin_define ("__mips_abicalls"); \
\
- /* Whether Loongson vector modes are enabled. */ \
- if (TARGET_LOONGSON_VECTORS) \
- builtin_define ("__mips_loongson_vector_rev"); \
+ /* Whether Loongson vector modes are enabled. */ \
+ if (TARGET_LOONGSON_MMI) \
+ { \
+ builtin_define ("__mips_loongson_vector_rev"); \
+ builtin_define ("__mips_loongson_mmi"); \
+ } \
\
/* Historical Octeon macro. */ \
if (TARGET_OCTEON) \
/* A spec that infers the:
-mnan=2008 setting from a -mips argument,
- -mdsp setting from a -march argument. */
-#define BASE_DRIVER_SELF_SPECS \
- MIPS_ISA_NAN2008_SPEC, \
+ -mdsp setting from a -march argument.
+ -mloongson-mmi setting from a -march argument. */
+#define BASE_DRIVER_SELF_SPECS \
+ MIPS_ISA_NAN2008_SPEC, \
+ MIPS_ASE_DSP_SPEC, \
+ MIPS_ASE_LOONGSON_MMI_SPEC
+
+#define MIPS_ASE_DSP_SPEC \
"%{!mno-dsp: \
%{march=24ke*|march=34kc*|march=34kf*|march=34kx*|march=1004k* \
|march=interaptiv: -mdsp} \
%{march=74k*|march=m14ke*: %{!mno-dspr2: -mdspr2 -mdsp}}}"
+#define MIPS_ASE_LOONGSON_MMI_SPEC \
+ "%{!mno-loongson-mmi: \
+ %{march=loongson2e|march=loongson2f|march=loongson3a: -mloongson-mmi}}"
+
#define DRIVER_SELF_SPECS \
MIPS_ISA_LEVEL_SPEC, \
BASE_DRIVER_SELF_SPECS
%{mcrc} %{mno-crc} \
%{mginv} %{mno-ginv} \
%{mmsa} %{mno-msa} \
+%{mloongson-mmi} %{mno-loongson-mmi} \
%{msmartmips} %{mno-smartmips} \
%{mmt} %{mno-mt} \
%{mfix-rm7000} %{mno-fix-rm7000} \
#define SLOW_BYTE_ACCESS (!TARGET_MIPS16)
/* Standard MIPS integer shifts truncate the shift amount to the
- width of the shifted operand. However, Loongson vector shifts
+ width of the shifted operand. However, Loongson MMI shifts
do not truncate the shift amount at all. */
-#define SHIFT_COUNT_TRUNCATED (!TARGET_LOONGSON_VECTORS)
+#define SHIFT_COUNT_TRUNCATED (!TARGET_LOONGSON_MMI)
/* Specify the machine mode that pointers have.
(define_mode_iterator MOVE64
[DI DF
(V2SF "TARGET_HARD_FLOAT && TARGET_PAIRED_SINGLE_FLOAT")
- (V2SI "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS")
- (V4HI "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS")
- (V8QI "TARGET_HARD_FLOAT && TARGET_LOONGSON_VECTORS")])
+ (V2SI "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI")
+ (V4HI "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI")
+ (V8QI "TARGET_HARD_FLOAT && TARGET_LOONGSON_MMI")])
;; 128-bit modes for which we provide move patterns on 64-bit targets.
(define_mode_iterator MOVE128 [TI TF])
[(DF "!TARGET_64BIT && TARGET_DOUBLE_FLOAT")
(DI "!TARGET_64BIT && TARGET_DOUBLE_FLOAT")
(V2SF "!TARGET_64BIT && TARGET_PAIRED_SINGLE_FLOAT")
- (V2SI "!TARGET_64BIT && TARGET_LOONGSON_VECTORS")
- (V4HI "!TARGET_64BIT && TARGET_LOONGSON_VECTORS")
- (V8QI "!TARGET_64BIT && TARGET_LOONGSON_VECTORS")
+ (V2SI "!TARGET_64BIT && TARGET_LOONGSON_MMI")
+ (V4HI "!TARGET_64BIT && TARGET_LOONGSON_MMI")
+ (V8QI "!TARGET_64BIT && TARGET_LOONGSON_MMI")
(TF "TARGET_64BIT && TARGET_FLOAT64")])
;; In GPR templates, a string like "<d>subu" will expand to "subu" in the
; microMIPS patterns.
(include "micromips.md")
-; ST-Microelectronics Loongson-2E/2F-specific patterns.
-(include "loongson.md")
+; Loongson MultiMedia extensions Instructions (MMI) patterns.
+(include "loongson-mmi.md")
; The MIPS MSA Instructions.
(include "mips-msa.md")
EnumValue
Enum(mips_cb_setting) String(always) Value(MIPS_CB_ALWAYS)
+
+mloongson-mmi
+Target Report Mask(LOONGSON_MMI)
+Use Loongson MultiMedia extensions Instructions (MMI) instructions.
-mginv -mno-ginv @gol
-mmicromips -mno-micromips @gol
-mmsa -mno-msa @gol
+-mloongson-mmi -mno-loongson-mmi @gol
-mfpu=@var{fpu-type} @gol
-msmartmips -mno-smartmips @gol
-mpaired-single -mno-paired-single -mdmx -mno-mdmx @gol
@opindex mno-ginv
Use (do not use) the MIPS Global INValidate (GINV) instructions.
+@item -mloongson-mmi
+@itemx -mno-loongson-mmi
+@opindex mloongson-mmi
+@opindex mno-loongson-mmi
+Use (do not use) the MIPS Loongson MultiMedia extensions Instructions (MMI).
+
@item -mlong64
@opindex mlong64
Force @code{long} types to be 64 bits wide. See @option{-mlong32} for
+2018-11-07 Chenghua Xu <paul.hua.gm@gmail.com>
+
+ * gcc.target/mips/loongson-shift-count-truncated-1.c
+ (dg-options): Run under -mloongson-mmi option.
+ Include loongson-mmiintrin.h instead of loongson.h.
+ * gcc.target/mips/loongson-simd.c: Likewise.
+ * gcc.target/mips/mips.exp (mips_option_groups): Add
+ -mloongson-mmi option.
+ (mips-dg-options): Add mips_option_dependency options "-mips16" vs
+ "-mno-loongson-mmi", "-mmicromips" vs "-mno-loongson-mmi",
+ "-msoft-float" vs "-mno-loongson-mmi".
+ (mips-dg-init): Add -mloongson-mmi option.
+ * lib/target-supports.exp: Rename check_mips_loongson_hw_available
+ to check_mips_loongson_mmi_hw_available.
+ Rename check_effective_target_mips_loongson_runtime to
+ check_effective_target_mips_loongson_mmi_runtime.
+ (check_effective_target_vect_int): Use mips_loongson_mmi instead
+ of mips_loongson when check et-is-effective-target.
+ (add_options_for_mips_loongson_mmi): New proc.
+ Rename check_effective_target_mips_loongson to
+ check_effective_target_mips_loongson_mmi.
+ (check_effective_target_vect_shift,
+ check_effective_target_whole_vector_shift,
+ check_effective_target_vect_no_int_min_max,
+ check_effective_target_vect_no_align,
+ check_effective_target_vect_short_mult,
+ check_vect_support_and_set_flags):Use mips_loongson_mmi instead
+ of mips_loongson when check et-is-effective-target.
+
2018-11-07 Richard Biener <rguenther@suse.de>
PR lto/87906
/* loongson.h does not handle or check for MIPS16ness. There doesn't
seem any good reason for it to, given that the Loongson processors
do not support MIPS16. */
-/* { dg-options "isa=loongson -mhard-float -mno-mips16 (REQUIRES_STDLIB)" } */
+/* { dg-options "-mloongson-mmi -mhard-float -mno-mips16 (REQUIRES_STDLIB)" } */
/* See PR 52155. */
-/* { dg-options "isa=loongson -mhard-float -mno-mips16 -mlong64" { mips*-*-elf* && ilp32 } } */
+/* { dg-options "-mloongson-mmi -mhard-float -mno-mips16 -mlong64" { mips*-*-elf* && ilp32 } } */
-#include "loongson.h"
+#include "loongson-mmiintrin.h"
#include <assert.h>
typedef union { int32x2_t v; int32_t a[2]; } int32x2_encap_t;
because inclusion of some system headers e.g. stdint.h will fail due to not
finding stubs-o32_hard.h. */
/* { dg-require-effective-target mips_nanlegacy } */
-/* { dg-options "isa=loongson -mhard-float -mno-micromips -mno-mips16 -flax-vector-conversions (REQUIRES_STDLIB)" } */
+/* { dg-options "-mloongson-mmi -mhard-float -mno-micromips -mno-mips16 -flax-vector-conversions (REQUIRES_STDLIB)" } */
-#include "loongson.h"
+#include "loongson-mmiintrin.h"
#include <stdio.h>
#include <stdint.h>
#include <assert.h>
mcount-ra-address
odd-spreg
msa
+ loongson-mmi
} {
lappend mips_option_groups $option "-m(no-|)$option"
}
"-mno-msa"
#endif
+ #ifdef __mips_loongson_mmi
+ "-mloongson-mmi"
+ #else
+ "-mno-loongson-mmi"
+ #endif
+
0
};
} 0]
mips_option_dependency options "-mno-plt" "addressing=unknown"
mips_option_dependency options "-mabicalls" "-G0"
mips_option_dependency options "-mno-gpopt" "-mexplicit-relocs"
+ mips_option_dependency options "-mips16" "-mno-loongson-mmi"
+ mips_option_dependency options "-mmicromips" "-mno-loongson-mmi"
+ mips_option_dependency options "-msoft-float" "-mno-loongson-mmi"
# Work out information about the current ABI.
set abi_test_option_p [mips_test_option_p options abi]
# Return 1 if the target supports executing Loongson vector instructions,
# 0 otherwise. Cache the result.
-proc check_mips_loongson_hw_available { } {
- return [check_cached_effective_target mips_loongson_hw_available {
+proc check_mips_loongson_mmi_hw_available { } {
+ return [check_cached_effective_target mips_loongson_mmi_hw_available {
# If this is not the right target then we can skip the test.
if { !([istarget mips*-*-*]) } {
expr 0
} else {
- check_runtime_nocache mips_loongson_hw_available {
- #include <loongson.h>
+ check_runtime_nocache mips_loongson_mmi_hw_available {
+ #include <loongson-mmiintrin.h>
int main()
{
asm volatile ("paddw $f2,$f4,$f6");
return 0;
}
- } ""
+ } "-mloongson-mmi"
}
}]
}
# Return 1 if the target supports running Loongson executables, 0 otherwise.
-proc check_effective_target_mips_loongson_runtime { } {
- if { [check_effective_target_mips_loongson]
- && [check_mips_loongson_hw_available] } {
+proc check_effective_target_mips_loongson_mmi_runtime { } {
+ if { [check_effective_target_mips_loongson_mmi]
+ && [check_mips_loongson_mmi_hw_available] } {
return 1
}
return 0
|| [istarget aarch64*-*-*]
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
- && ([et-is-effective-target mips_loongson]
+ && ([et-is-effective-target mips_loongson_mmi]
|| [et-is-effective-target mips_msa]))
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx])
return "$flags -mmsa"
}
+# Add the options needed for MIPS Loongson MMI Architecture.
+
+proc add_options_for_mips_loongson_mmi { flags } {
+ if { ! [check_effective_target_mips_loongson_mmi] } {
+ return "$flags"
+ }
+ return "$flags -mloongson-mmi"
+}
+
+
# Return 1 if this a Loongson-2E or -2F target using an ABI that supports
# the Loongson vector modes.
-proc check_effective_target_mips_loongson { } {
+proc check_effective_target_mips_loongson_mmi { } {
return [check_no_compiler_messages loongson assembly {
+ #if !defined(__mips_loongson_mmi)
+ #error !__mips_loongson_mmi
+ #endif
#if !defined(__mips_loongson_vector_rev)
#error !__mips_loongson_vector_rev
#endif
|| [is-effective-target arm_neon]
|| ([istarget mips*-*-*]
&& ([et-is-effective-target mips_msa]
- || [et-is-effective-target mips_loongson]))
+ || [et-is-effective-target mips_loongson_mmi]))
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx]) }}]
}
|| ([is-effective-target arm_neon]
&& [check_effective_target_arm_little_endian])
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_loongson])
+ && [et-is-effective-target mips_loongson_mmi])
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx]) } {
set answer 1
|| [istarget spu-*-*]
|| [istarget alpha*-*-*]
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_loongson]) }}]
+ && [et-is-effective-target mips_loongson_mmi]) }}]
}
# Return 1 if the target plus current options does not support a vector
|| [check_effective_target_arm_vect_no_misalign]
|| ([istarget powerpc*-*-*] && [check_p8vector_hw_available])
|| ([istarget mips*-*-*]
- && [et-is-effective-target mips_loongson]) }}]
+ && [et-is-effective-target mips_loongson_mmi]) }}]
}
# Return 1 if the target supports a vector misalign access, 0 otherwise.
|| [check_effective_target_arm32]
|| ([istarget mips*-*-*]
&& ([et-is-effective-target mips_msa]
- || [et-is-effective-target mips_loongson]))
+ || [et-is-effective-target mips_loongson_mmi]))
|| ([istarget s390*-*-*]
&& [check_effective_target_s390_vx]) }}]
}
if { [check_effective_target_mpaired_single] } {
lappend EFFECTIVE_TARGETS mpaired_single
}
- if { [check_effective_target_mips_loongson] } {
- lappend EFFECTIVE_TARGETS mips_loongson
+ if { [check_effective_target_mips_loongson_mmi] } {
+ lappend EFFECTIVE_TARGETS mips_loongson_mmi
}
if { [check_effective_target_mips_msa] } {
lappend EFFECTIVE_TARGETS mips_msa