+2011-07-15 Bernd Schmidt <bernds@codesourcery.com>
+
+ * gcc_update: Add C6X generated files.
+ * contrib/config-list.mk: Add c6x-elf and c6x-uclinux.
+
2011-07-01 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
* config-list.mk (LIST): Append OPT-enable-obsolete to
arm-linux-androideabi arm-uclinux_eabi arm-ecos-elf arm-eabi \
arm-symbianelf arm-rtems arm-elf arm-wince-pe avr-rtems avr-elf \
bfin-elf bfin-uclinux bfin-linux-uclibc bfin-rtems bfin-openbsd \
- cris-elf cris-linux crisv32-elf crisv32-linux fido-elf \
+ c6x-elf c6x-uclinux cris-elf cris-linux crisv32-elf crisv32-linux fido-elf \
fr30-elf frv-elf frv-linux h8300-elf h8300-rtems hppa-linux-gnu \
hppa-linux-gnuOPT-enable-sjlj-exceptions=yes hppa64-linux-gnu \
hppa2.0-hpux10.1 hppa64-hpux11.3 \
gcc/config/arm/arm-tune.md: gcc/config/arm/arm-cores.def gcc/config/arm/gentune.sh
gcc/config/arm/arm-tables.opt: gcc/config/arm/arm-arches.def gcc/config/arm/arm-cores.def gcc/config/arm/arm-fpus.def gcc/config/arm/genopt.sh
gcc/config/avr/avr-tables.opt: gcc/config/avr/avr-mcus.def gcc/config/avr/genopt.sh
+gcc/config/c6x/c6x-tables.opt: gcc/config/c6x/c6x-isas.def gcc/config/c6x/genopt.sh
+gcc/config/c6x/c6x-sched.md: gcc/config/c6x/c6x-sched.md.in gcc/config/c6x/gensched.sh
+gcc/config/c6x/c6x-mult.md: gcc/config/c6x/c6x-mult.md.in gcc/config/c6x/genmult.sh
gcc/config/m68k/m68k-tables.opt: gcc/config/m68k/m68k-devices.def gcc/config/m68k/m68k-isas.def gcc/config/m68k/m68k-microarchs.def gcc/config/m68k/genopt.sh
gcc/config/mips/mips-tables.opt: gcc/config/mips/mips-cpus.def gcc/config/mips/genopt.sh
gcc/config/rs6000/rs6000-tables.opt: gcc/config/rs6000/rs6000-cpus.def gcc/config/rs6000/genopt.sh
+2011-07-15 Bernd Schmidt <bernds@codesourcery.com>
+
+ * doc/invoke.texi (C6X Options): New section.
+ * doc/md.texi (TI C6X family): New section.
+ * config.gcc: Handle tic6x, in particular tic6x-*-elf and
+ tic6x-*-uclinux.
+ * longlong.h (add_ssaaaa, __umulsidi3, umul_ppmm,
+ count_leading_zeros, count_trailing_zeros, UMUL_TIME, UDIV_TIME):
+ Provide C6X definitions.
+ * config/c6x/c6x.md: New file.
+ * config/c6x/constraints.md: New file.
+ * config/c6x/predicates.md: New file.
+ * config/c6x/c6x-sched.md.in: New file.
+ * config/c6x/c6x-sched.md: New file.
+ * config/c6x/gensched.sh: New file.
+ * config/c6x/c6x-mult.md.in: New file.
+ * config/c6x/genmult.sh: New file.
+ * config/c6x/c6x-mult.md: New file.
+ * config/c6x/sync.md: New file.
+ * config/c6x/c6x-protos.h: New file.
+ * config/c6x/sfp-machine.h: New file.
+ * config/c6x/c6x.c: New file.
+ * config/c6x/c6x.h: New file.
+ * config/c6x/crti.s: New file.
+ * config/c6x/crtn.s: New file.
+ * config/c6x/lib1funcs.asm: New file.
+ * config/c6x/c6x-modes.def: New file.
+ * config/c6x/genopt.sh: New file.
+ * config/c6x/c6x.opt: New file.
+ * config/c6x/c6x-tables.opt: New file.
+ * config/c6x/c6x-opts.h: New file.
+ * config/c6x/c6x-isas.def: New file.
+ * config/c6x/elf.h: New file.
+ * config/c6x/elf-common.h: New file.
+ * config/c6x/uclinux-elf.h: New file.
+ * config/c6x/t-c6x: New file.
+ * config/c6x/t-c6x-elf: New file.
+ * config/c6x/t-c6x-uclinux: New file.
+ * config/c6x/t-c6x-softfp: New file.
+ * config/c6x/gtd.c: New file.
+ * config/c6x/gtf.c: New file.
+ * config/c6x/ltd.c: New file.
+ * config/c6x/ltf.c: New file.
+ * config/c6x/ged.c: New file.
+ * config/c6x/gef.c: New file.
+ * config/c6x/led.c: New file.
+ * config/c6x/lef.c: New file.
+ * config/c6x/eqd.c: New file.
+ * config/c6x/eqf.c: New file.
+ * config/c6x/libgcc-c6xeabi.ver: New file.
+
2011-07-14 Andrew Pinski <pinskia@gmail.com>
PR tree-opt/49309
v850*-*-*)
cpu_type=v850
;;
+tic6x-*-*)
+ cpu_type=c6x
+ extra_headers="c6x_intrinsics.h"
+ extra_options="${extra_options} c6x/c6x-tables.opt"
+ ;;
xtensa*-*-*)
extra_options="${extra_options} fused-madd.opt"
;;
c_target_objs="${c_target_objs} spu-c.o"
cxx_target_objs="${cxx_target_objs} spu-c.o"
;;
-
+tic6x-*-elf)
+ tm_file="elfos.h ${tm_file} c6x/elf-common.h c6x/elf.h"
+ tm_file="${tm_file} dbxelf.h tm-dwarf2.h newlib-stdint.h"
+ libgcc_tm_file="${libgcc_tm_file} c6x/c6x-abi.h"
+ tmake_file="c6x/t-c6x c6x/t-c6x-elf"
+ tmake_file="${tmake_file} c6x/t-c6x-softfp soft-fp/t-softfp"
+ use_collect2=no
+ ;;
+tic6x-*-uclinux)
+ tm_file="elfos.h ${tm_file} gnu-user.h linux.h c6x/elf-common.h c6x/uclinux-elf.h"
+ tm_file="${tm_file} dbxelf.h tm-dwarf2.h glibc-stdint.h"
+ tm_file="${tm_file} ./sysroot-suffix.h"
+ libgcc_tm_file="${libgcc_tm_file} c6x/c6x-abi.h"
+ tmake_file="t-slibgcc-elf-ver t-sysroot-suffix"
+ tmake_file="${tmake_file} c6x/t-c6x c6x/t-c6x-elf c6x/t-c6x-uclinux"
+ tmake_file="${tmake_file} c6x/t-c6x-softfp soft-fp/t-softfp"
+ use_collect2=no
+ ;;
v850*-*-*)
case ${target} in
v850e2v3-*-*)
done
;;
+ tic6x-*-*)
+ supported_defaults="arch"
+
+ case ${with_arch} in
+ "" | c62x | c64x | c64x+ | c67x | c67x+ | c674x)
+ # OK
+ ;;
+ *)
+ echo "Unknown arch used in --with-arch=$with_arch." 1>&2
+ exit 1
+ ;;
+ esac
+ ;;
+
v850*-*-*)
supported_defaults=cpu
case ${with_cpu} in
--- /dev/null
+/* C6X ISA names.
+ Copyright (C) 2011
+ Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Define ISAs for the -march option, used both in C6X.c and to
+ generate c6x-tables.opt. Before including this file, define a
+ macro:
+
+ C6X_ISA (NAME, ENUM_VALUE, FLAGS)
+
+ where NAME is the name for use with -march=, ENUM_VALUE is an enum
+ corresponding to this arch, and FLAGS is a combination of flags
+ that together specify the available instructions. */
+
+C6X_ISA("c62x", C6X_CPU_C62X, C6X_INSNS_C62X)
+C6X_ISA("c64x", C6X_CPU_C64X, C6X_INSNS_C62X | C6X_INSNS_C64X)
+C6X_ISA("c64x+", C6X_CPU_C64XP, C6X_INSNS_C62X | C6X_INSNS_C64X | C6X_INSNS_C64XP)
+C6X_ISA("c67x", C6X_CPU_C67X, C6X_INSNS_C62X | C6X_INSNS_C67X)
+C6X_ISA("c67x+", C6X_CPU_C67XP, C6X_INSNS_C62X | C6X_INSNS_C67X | C6X_INSNS_C67XP)
+C6X_ISA("c674x", C6X_CPU_C674X,
+ (C6X_INSNS_C62X | C6X_INSNS_C64X | C6X_INSNS_C64XP | C6X_INSNS_C67X
+ | C6X_INSNS_C67XP | C6X_INSNS_C674X))
--- /dev/null
+/* Definitions of target machine for GNU compiler, for TI C6x.
+ Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+VECTOR_MODES (INT, 4); /* V4QI V2HI */
+VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */
+
+VECTOR_MODE (FRACT, SQ, 2); /* V2SQ. */
+VECTOR_MODE (FRACT, HQ, 2); /* V2HQ. */
--- /dev/null
+;; -*- buffer-read-only: t -*-
+;; Generated automatically from c6x-mult.md.in by genmult.sh
+;; Multiplication patterns for TI C6X.
+;; This file is processed by genmult.sh to produce two variants of each
+;; pattern, a normal one and a real_mult variant for modulo scheduling.
+;; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; -------------------------------------------------------------------------
+;; Miscellaneous insns that execute on the M units
+;; -------------------------------------------------------------------------
+
+(define_insn "rotlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (rotate:SI (match_operand:SI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 2 "reg_or_ucst5_operand" "aIu5,bIu5,aIu5,bIu5")))]
+ "TARGET_INSNS_64"
+ "%|%.\\trotl\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "bitrevsi2"
+ [(set (match_operand:SI 0 "register_operand" "=a,a,b,b")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "a,?b,b,?a")]
+ UNSPEC_BITREV))]
+ "TARGET_INSNS_64"
+ "%|%.\\tbitr\\t%$\\t%1, %0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,y,n,y")])
+
+;; Vector average.
+
+(define_insn "avgv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=a,b,a,b")
+ (unspec:V2HI [(match_operand:V2HI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:V2HI 2 "register_operand" "a,b,a,b")] UNSPEC_AVG))]
+ "TARGET_INSNS_64"
+ "%|%.\\tavg2\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "uavgv4qi3"
+ [(set (match_operand:V4QI 0 "register_operand" "=a,b,a,b")
+ (unspec:V4QI [(match_operand:V4QI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:V4QI 2 "register_operand" "a,b,a,b")] UNSPEC_AVG))]
+ "TARGET_INSNS_64"
+ "%|%.\\tavgu4\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,n,y,y")])
+
+;; -------------------------------------------------------------------------
+;; Multiplication
+;; -------------------------------------------------------------------------
+
+(define_insn "mulhi3"
+ [(set (match_operand:HI 0 "register_operand" "=a,b,a,b")
+ (mult:HI (match_operand:HI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:HI 2 "reg_or_scst5_operand" "aIs5,bIs5,aIs5,bIs5")))]
+ ""
+ "%|%.\\tmpy\\t%$\\t%2, %1, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_const"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,ab")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?ab"))
+ (match_operand:HI 2 "scst5_operand" "Is5,Is5,Is5")))]
+ ""
+ "%|%.\\tmpy\\t%$\\t%2, %1, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y")])
+
+(define_insn "*mulhisi3_insn"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "%a,b,?a,?b"))
+ (sign_extend:SI
+ (match_operand:HI 2 "reg_or_scst5_operand" "a,b,b,a"))))]
+ ""
+ "%|%.\\tmpy\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_lh"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16))))]
+ ""
+ "%|%.\\tmpylh\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_hl"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a"))))]
+ ""
+ "%|%.\\tmpyhl\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_hh"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "register_operand" "%a,b,?a,?b")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16))))]
+ ""
+ "%|%.\\tmpyh\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "%a,b,?a,?b"))
+ (zero_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a"))))]
+ ""
+ "%|%.\\tmpyu\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_lh"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (lshiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16))))]
+ ""
+ "%|%.\\tmpylhu\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_hl"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a"))))]
+ ""
+ "%|%.\\tmpyhlu\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_hh"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "%a,b,?a,?b")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16))))]
+ ""
+ "%|%.\\tmpyhu\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_const"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,ab")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?ab"))
+ (match_operand:SI 2 "scst5_operand" "Is5,Is5,Is5")))]
+ ""
+ "%|%.\\tmpysu\\t%$\\t%2, %1, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y")])
+
+(define_insn "*usmulhisi3_insn"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (sign_extend:SI
+ (match_operand:HI 2 "reg_or_scst5_operand" "aIs5,bIs5,bIs5,aIs5"))))]
+ ""
+ "%|%.\\tmpyus\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_lh"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16))))]
+ ""
+ "%|%.\\tmpyluhs\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_hl"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a"))))]
+ ""
+ "%|%.\\tmpyhuls\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_hh"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16))))]
+ ""
+ "%|%.\\tmpyhus\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulsi3_insn"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (mult:SI (match_operand:SI 1 "register_operand" "%a,b,?a,?b")
+ (match_operand:SI 2 "register_operand" "a,b,b,a")))]
+ "TARGET_MPY32"
+ "%|%.\\tmpy32\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "<u>mulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=a,b,a,b")
+ (mult:DI (any_ext:DI
+ (match_operand:SI 1 "register_operand" "%a,b,?a,?b"))
+ (any_ext:DI
+ (match_operand:SI 2 "register_operand" "a,b,b,a"))))]
+ "TARGET_MPY32"
+ "%|%.\\tmpy32<u>\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=a,b,a,b")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b"))
+ (sign_extend:DI
+ (match_operand:SI 2 "register_operand" "a,b,b,a"))))]
+ "TARGET_MPY32"
+ "%|%.\\tmpy32us\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+;; Widening vector multiply and dot product
+
+(define_insn "mulv2hiv2si3"
+ [(set (match_operand:V2SI 0 "register_operand" "=a,b,a,b")
+ (mult:V2SI
+ (sign_extend:V2SI (match_operand:V2HI 1 "register_operand" "a,b,a,b"))
+ (sign_extend:V2SI (match_operand:V2HI 2 "register_operand" "a,b,?b,?a"))))]
+ "TARGET_INSNS_64"
+ "%|%.\\tmpy2\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulv4qiv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=a,b,a,b")
+ (mult:V4HI
+ (zero_extend:V4HI (match_operand:V4QI 1 "register_operand" "a,b,a,b"))
+ (zero_extend:V4HI (match_operand:V4QI 2 "register_operand" "a,b,?b,?a"))))]
+ "TARGET_INSNS_64"
+ "%|%.\\tmpyu4\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulv4qiv4hi3"
+ [(set (match_operand:V4HI 0 "register_operand" "=a,b,a,b")
+ (mult:V4HI
+ (zero_extend:V4HI (match_operand:V4QI 1 "register_operand" "a,b,?b,?a"))
+ (sign_extend:V4HI (match_operand:V4QI 2 "register_operand" "a,b,a,b"))))]
+ "TARGET_INSNS_64"
+ "%|%.\\tmpyus4\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "dotv2hi"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (plus:SI
+ (mult:SI
+ (sign_extend:SI
+ (vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "a,b,a,b")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "a,b,?b,?a")
+ (parallel [(const_int 0)]))))
+ (mult:SI
+ (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)]))))))]
+ "TARGET_INSNS_64"
+ "%|%.\\tdotp2\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+;; Fractional multiply
+
+(define_insn "mulv2hqv2sq3"
+ [(set (match_operand:V2SQ 0 "register_operand" "=a,b,a,b")
+ (ss_mult:V2SQ
+ (fract_convert:V2SQ
+ (match_operand:V2HQ 1 "register_operand" "%a,b,?a,?b"))
+ (fract_convert:V2SQ
+ (match_operand:V2HQ 2 "register_operand" "a,b,b,a"))))]
+ ""
+ "%|%.\\tsmpy2\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3"
+ [(set (match_operand:SQ 0 "register_operand" "=a,b,a,b")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (match_operand:HQ 1 "register_operand" "%a,b,?a,?b"))
+ (fract_convert:SQ
+ (match_operand:HQ 2 "register_operand" "a,b,b,a"))))]
+ ""
+ "%|%.\\tsmpy\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_lh"
+ [(set (match_operand:SQ 0 "register_operand" "=a,b,a,b")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (match_operand:HQ 1 "register_operand" "a,b,?a,?b"))
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 2 "register_operand" "a,b,b,a")))))]
+ ""
+ "%|%.\\tsmpylh\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_hl"
+ [(set (match_operand:SQ 0 "register_operand" "=a,b,a,b")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 1 "register_operand" "a,b,b,a")))
+ (fract_convert:SQ
+ (match_operand:HQ 2 "register_operand" "a,b,b,a"))))]
+ ""
+ "%|%.\\tsmpyhl\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_hh"
+ [(set (match_operand:SQ 0 "register_operand" "=a,b,a,b")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 1 "register_operand" "a,b,b,a")))
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 2 "register_operand" "a,b,b,a")))))]
+ ""
+ "%|%.\\tsmpyh\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+;; Multiplication patterns for TI C6X.
+;; This file is processed by genmult.sh to produce two variants of each
+;; pattern, a normal one and a real_mult variant for modulo scheduling.
+;; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; -------------------------------------------------------------------------
+;; Miscellaneous insns that execute on the M units
+;; -------------------------------------------------------------------------
+
+(define_insn "rotlsi3_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (rotate:SI (match_operand:SI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 2 "reg_or_ucst5_operand" "aIu5,bIu5,aIu5,bIu5"))] UNSPEC_REAL_MULT)]
+ "TARGET_INSNS_64"
+ "%|%.\\trotl\\t%$\\t%1, %2, %k0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "bitrevsi2_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JA,JB,JB")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "a,?b,b,?a")]
+ UNSPEC_BITREV)] UNSPEC_REAL_MULT)]
+ "TARGET_INSNS_64"
+ "%|%.\\tbitr\\t%$\\t%1, %k0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,y,n,y")])
+
+;; Vector average.
+
+(define_insn "avgv2hi3_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (unspec:V2HI [(match_operand:V2HI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:V2HI 2 "register_operand" "a,b,a,b")] UNSPEC_AVG)] UNSPEC_REAL_MULT)]
+ "TARGET_INSNS_64"
+ "%|%.\\tavg2\\t%$\\t%1, %2, %k0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "uavgv4qi3_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (unspec:V4QI [(match_operand:V4QI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:V4QI 2 "register_operand" "a,b,a,b")] UNSPEC_AVG)] UNSPEC_REAL_MULT)]
+ "TARGET_INSNS_64"
+ "%|%.\\tavgu4\\t%$\\t%1, %2, %k0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,n,y,y")])
+
+;; -------------------------------------------------------------------------
+;; Multiplication
+;; -------------------------------------------------------------------------
+
+(define_insn "mulhi3_real"
+ [(unspec [(match_operand:HI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:HI (match_operand:HI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:HI 2 "reg_or_scst5_operand" "aIs5,bIs5,aIs5,bIs5"))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpy\\t%$\\t%2, %1, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_const_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JAJB")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?ab"))
+ (match_operand:HI 2 "scst5_operand" "Is5,Is5,Is5"))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpy\\t%$\\t%2, %1, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y")])
+
+(define_insn "*mulhisi3_insn_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "%a,b,?a,?b"))
+ (sign_extend:SI
+ (match_operand:HI 2 "reg_or_scst5_operand" "a,b,b,a")))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpy\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_lh_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpylh\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_hl_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a")))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpyhl\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_hh_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "register_operand" "%a,b,?a,?b")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpyh\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "%a,b,?a,?b"))
+ (zero_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a")))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpyu\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_lh_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (lshiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpylhu\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_hl_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a")))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpyhlu\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_hh_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "%a,b,?a,?b")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpyhu\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_const_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JAJB")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?ab"))
+ (match_operand:SI 2 "scst5_operand" "Is5,Is5,Is5"))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpysu\\t%$\\t%2, %1, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y")])
+
+(define_insn "*usmulhisi3_insn_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (sign_extend:SI
+ (match_operand:HI 2 "reg_or_scst5_operand" "aIs5,bIs5,bIs5,aIs5")))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpyus\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_lh_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpyluhs\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_hl_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a")))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpyhuls\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_hh_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tmpyhus\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulsi3_insn_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:SI (match_operand:SI 1 "register_operand" "%a,b,?a,?b")
+ (match_operand:SI 2 "register_operand" "a,b,b,a"))] UNSPEC_REAL_MULT)]
+ "TARGET_MPY32"
+ "%|%.\\tmpy32\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "<u>mulsidi3_real"
+ [(unspec [(match_operand:DI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:DI (any_ext:DI
+ (match_operand:SI 1 "register_operand" "%a,b,?a,?b"))
+ (any_ext:DI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")))] UNSPEC_REAL_MULT)]
+ "TARGET_MPY32"
+ "%|%.\\tmpy32<u>\\t%$\\t%1, %2, %K0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulsidi3_real"
+ [(unspec [(match_operand:DI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b"))
+ (sign_extend:DI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")))] UNSPEC_REAL_MULT)]
+ "TARGET_MPY32"
+ "%|%.\\tmpy32us\\t%$\\t%1, %2, %K0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+;; Widening vector multiply and dot product
+
+(define_insn "mulv2hiv2si3_real"
+ [(unspec [(match_operand:V2SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:V2SI
+ (sign_extend:V2SI (match_operand:V2HI 1 "register_operand" "a,b,a,b"))
+ (sign_extend:V2SI (match_operand:V2HI 2 "register_operand" "a,b,?b,?a")))] UNSPEC_REAL_MULT)]
+ "TARGET_INSNS_64"
+ "%|%.\\tmpy2\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulv4qiv4hi3_real"
+ [(unspec [(match_operand:V4HI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:V4HI
+ (zero_extend:V4HI (match_operand:V4QI 1 "register_operand" "a,b,a,b"))
+ (zero_extend:V4HI (match_operand:V4QI 2 "register_operand" "a,b,?b,?a")))] UNSPEC_REAL_MULT)]
+ "TARGET_INSNS_64"
+ "%|%.\\tmpyu4\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulv4qiv4hi3_real"
+ [(unspec [(match_operand:V4HI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (mult:V4HI
+ (zero_extend:V4HI (match_operand:V4QI 1 "register_operand" "a,b,?b,?a"))
+ (sign_extend:V4HI (match_operand:V4QI 2 "register_operand" "a,b,a,b")))] UNSPEC_REAL_MULT)]
+ "TARGET_INSNS_64"
+ "%|%.\\tmpyus4\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "dotv2hi_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (plus:SI
+ (mult:SI
+ (sign_extend:SI
+ (vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "a,b,a,b")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "a,b,?b,?a")
+ (parallel [(const_int 0)]))))
+ (mult:SI
+ (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))] UNSPEC_REAL_MULT)]
+ "TARGET_INSNS_64"
+ "%|%.\\tdotp2\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+;; Fractional multiply
+
+(define_insn "mulv2hqv2sq3_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (ss_mult:V2SQ
+ (fract_convert:V2SQ
+ (match_operand:V2HQ 1 "register_operand" "%a,b,?a,?b"))
+ (fract_convert:V2SQ
+ (match_operand:V2HQ 2 "register_operand" "a,b,b,a")))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tsmpy2\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (match_operand:HQ 1 "register_operand" "%a,b,?a,?b"))
+ (fract_convert:SQ
+ (match_operand:HQ 2 "register_operand" "a,b,b,a")))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tsmpy\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_lh_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (match_operand:HQ 1 "register_operand" "a,b,?a,?b"))
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 2 "register_operand" "a,b,b,a"))))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tsmpylh\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_hl_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 1 "register_operand" "a,b,b,a")))
+ (fract_convert:SQ
+ (match_operand:HQ 2 "register_operand" "a,b,b,a")))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tsmpyhl\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_hh_real"
+ [(unspec [(match_operand:SI 0 "const_int_operand" "=JA,JB,JA,JB")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 1 "register_operand" "a,b,b,a")))
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 2 "register_operand" "a,b,b,a"))))] UNSPEC_REAL_MULT)]
+ ""
+ "%|%.\\tsmpyh\\t%$\\t%1, %2, %k0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
--- /dev/null
+;; Multiplication patterns for TI C6X.
+;; This file is processed by genmult.sh to produce two variants of each
+;; pattern, a normal one and a real_mult variant for modulo scheduling.
+;; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; -------------------------------------------------------------------------
+;; Miscellaneous insns that execute on the M units
+;; -------------------------------------------------------------------------
+
+(define_insn "rotlsi3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (rotate:SI (match_operand:SI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 2 "reg_or_ucst5_operand" "aIu5,bIu5,aIu5,bIu5"))_CBRK_)]
+ "TARGET_INSNS_64"
+ "%|%.\\trotl\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "bitrevsi2_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_A_,_B_,_B_")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "a,?b,b,?a")]
+ UNSPEC_BITREV)_CBRK_)]
+ "TARGET_INSNS_64"
+ "%|%.\\tbitr\\t%$\\t%1, %_MODk_0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,y,n,y")])
+
+;; Vector average.
+
+(define_insn "avgv2hi3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:_MV2HI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (unspec:V2HI [(match_operand:V2HI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:V2HI 2 "register_operand" "a,b,a,b")] UNSPEC_AVG)_CBRK_)]
+ "TARGET_INSNS_64"
+ "%|%.\\tavg2\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "uavgv4qi3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:_MV4QI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (unspec:V4QI [(match_operand:V4QI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:V4QI 2 "register_operand" "a,b,a,b")] UNSPEC_AVG)_CBRK_)]
+ "TARGET_INSNS_64"
+ "%|%.\\tavgu4\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "units" "m")
+ (set_attr "type" "mpy2")
+ (set_attr "cross" "n,n,y,y")])
+
+;; -------------------------------------------------------------------------
+;; Multiplication
+;; -------------------------------------------------------------------------
+
+(define_insn "mulhi3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:HI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:HI (match_operand:HI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:HI 2 "reg_or_scst5_operand" "aIs5,bIs5,aIs5,bIs5"))_CBRK_)]
+ ""
+ "%|%.\\tmpy\\t%$\\t%2, %1, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_const_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A__B_")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?ab"))
+ (match_operand:HI 2 "scst5_operand" "Is5,Is5,Is5"))_CBRK_)]
+ ""
+ "%|%.\\tmpy\\t%$\\t%2, %1, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y")])
+
+(define_insn "*mulhisi3_insn_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "%a,b,?a,?b"))
+ (sign_extend:SI
+ (match_operand:HI 2 "reg_or_scst5_operand" "a,b,b,a")))_CBRK_)]
+ ""
+ "%|%.\\tmpy\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_lh_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (sign_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))_CBRK_)]
+ ""
+ "%|%.\\tmpylh\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_hl_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a")))_CBRK_)]
+ ""
+ "%|%.\\tmpyhl\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhisi3_hh_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (ashiftrt:SI
+ (match_operand:SI 1 "register_operand" "%a,b,?a,?b")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))_CBRK_)]
+ ""
+ "%|%.\\tmpyh\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "%a,b,?a,?b"))
+ (zero_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a")))_CBRK_)]
+ ""
+ "%|%.\\tmpyu\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_lh_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (lshiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))_CBRK_)]
+ ""
+ "%|%.\\tmpylhu\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_hl_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (zero_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a")))_CBRK_)]
+ ""
+ "%|%.\\tmpyhlu\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulhisi3_hh_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "%a,b,?a,?b")
+ (const_int 16))
+ (lshiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))_CBRK_)]
+ ""
+ "%|%.\\tmpyhu\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_const_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A__B_")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?ab"))
+ (match_operand:SI 2 "scst5_operand" "Is5,Is5,Is5"))_CBRK_)]
+ ""
+ "%|%.\\tmpysu\\t%$\\t%2, %1, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y")])
+
+(define_insn "*usmulhisi3_insn_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (sign_extend:SI
+ (match_operand:HI 2 "reg_or_scst5_operand" "aIs5,bIs5,bIs5,aIs5")))_CBRK_)]
+ ""
+ "%|%.\\tmpyus\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_lh_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (zero_extend:SI
+ (match_operand:HI 1 "register_operand" "a,b,?a,?b"))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))_CBRK_)]
+ ""
+ "%|%.\\tmpyluhs\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_hl_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (sign_extend:SI
+ (match_operand:HI 2 "register_operand" "a,b,b,a")))_CBRK_)]
+ ""
+ "%|%.\\tmpyhuls\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulhisi3_hh_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (lshiftrt:SI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b")
+ (const_int 16))
+ (ashiftrt:SI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")
+ (const_int 16)))_CBRK_)]
+ ""
+ "%|%.\\tmpyhus\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulsi3_insn_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:SI (match_operand:SI 1 "register_operand" "%a,b,?a,?b")
+ (match_operand:SI 2 "register_operand" "a,b,b,a"))_CBRK_)]
+ "TARGET_MPY32"
+ "%|%.\\tmpy32\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "<u>mulsidi3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:DI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:DI (any_ext:DI
+ (match_operand:SI 1 "register_operand" "%a,b,?a,?b"))
+ (any_ext:DI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")))_CBRK_)]
+ "TARGET_MPY32"
+ "%|%.\\tmpy32<u>\\t%$\\t%1, %2, %_MODK_0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulsidi3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:DI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:DI (zero_extend:DI
+ (match_operand:SI 1 "register_operand" "a,b,?a,?b"))
+ (sign_extend:DI
+ (match_operand:SI 2 "register_operand" "a,b,b,a")))_CBRK_)]
+ "TARGET_MPY32"
+ "%|%.\\tmpy32us\\t%$\\t%1, %2, %_MODK_0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+;; Widening vector multiply and dot product
+
+(define_insn "mulv2hiv2si3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:V2SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:V2SI
+ (sign_extend:V2SI (match_operand:V2HI 1 "register_operand" "a,b,a,b"))
+ (sign_extend:V2SI (match_operand:V2HI 2 "register_operand" "a,b,?b,?a")))_CBRK_)]
+ "TARGET_INSNS_64"
+ "%|%.\\tmpy2\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umulv4qiv4hi3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:V4HI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:V4HI
+ (zero_extend:V4HI (match_operand:V4QI 1 "register_operand" "a,b,a,b"))
+ (zero_extend:V4HI (match_operand:V4QI 2 "register_operand" "a,b,?b,?a")))_CBRK_)]
+ "TARGET_INSNS_64"
+ "%|%.\\tmpyu4\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "usmulv4qiv4hi3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:V4HI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (mult:V4HI
+ (zero_extend:V4HI (match_operand:V4QI 1 "register_operand" "a,b,?b,?a"))
+ (sign_extend:V4HI (match_operand:V4QI 2 "register_operand" "a,b,a,b")))_CBRK_)]
+ "TARGET_INSNS_64"
+ "%|%.\\tmpyus4\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "dotv2hi_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:SI 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (plus:SI
+ (mult:SI
+ (sign_extend:SI
+ (vec_select:HI
+ (match_operand:V2HI 1 "register_operand" "a,b,a,b")
+ (parallel [(const_int 0)])))
+ (sign_extend:SI
+ (vec_select:HI
+ (match_operand:V2HI 2 "register_operand" "a,b,?b,?a")
+ (parallel [(const_int 0)]))))
+ (mult:SI
+ (sign_extend:SI
+ (vec_select:HI (match_dup 1) (parallel [(const_int 1)])))
+ (sign_extend:SI
+ (vec_select:HI (match_dup 2) (parallel [(const_int 1)])))))_CBRK_)]
+ "TARGET_INSNS_64"
+ "%|%.\\tdotp2\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+;; Fractional multiply
+
+(define_insn "mulv2hqv2sq3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:_MV2SQ 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (ss_mult:V2SQ
+ (fract_convert:V2SQ
+ (match_operand:V2HQ 1 "register_operand" "%a,b,?a,?b"))
+ (fract_convert:V2SQ
+ (match_operand:V2HQ 2 "register_operand" "a,b,b,a")))_CBRK_)]
+ ""
+ "%|%.\\tsmpy2\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:_MSQ 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (match_operand:HQ 1 "register_operand" "%a,b,?a,?b"))
+ (fract_convert:SQ
+ (match_operand:HQ 2 "register_operand" "a,b,b,a")))_CBRK_)]
+ ""
+ "%|%.\\tsmpy\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_lh_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:_MSQ 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (match_operand:HQ 1 "register_operand" "a,b,?a,?b"))
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 2 "register_operand" "a,b,b,a"))))_CBRK_)]
+ ""
+ "%|%.\\tsmpylh\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_hl_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:_MSQ 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 1 "register_operand" "a,b,b,a")))
+ (fract_convert:SQ
+ (match_operand:HQ 2 "register_operand" "a,b,b,a")))_CBRK_)]
+ ""
+ "%|%.\\tsmpyhl\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "mulhqsq3_hh_VARIANT_"
+ [(_SET_ _OBRK_(match_operand:_MSQ 0 "_DESTOPERAND_" "=_A_,_B_,_A_,_B_")
+ (ss_mult:SQ
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 1 "register_operand" "a,b,b,a")))
+ (fract_convert:SQ
+ (truncate:HQ (match_operand:SQ 2 "register_operand" "a,b,b,a"))))_CBRK_)]
+ ""
+ "%|%.\\tsmpyh\\t%$\\t%1, %2, %_MODk_0"
+ [(set_attr "type" "mpy2")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
--- /dev/null
+/* Definitions for option handling for TI C6X.
+ Copyright (C) 2011
+ Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+#ifndef C6X_OPTS_H
+#define C6X_OPTS_H
+
+/* An enumeration of all supported target devices. */
+typedef enum c6x_cpu_type
+{
+#define C6X_ISA(NAME,ENUM_VALUE,FLAGS) \
+ ENUM_VALUE,
+#include "c6x-isas.def"
+#undef C6X_ISA
+ unk_isa
+} c6x_cpu_t;
+
+enum c6x_sdata { C6X_SDATA_NONE, C6X_SDATA_DEFAULT, C6X_SDATA_ALL };
+
+#endif
--- /dev/null
+/* Prototypes for exported functions defined in c6x.c.
+ Copyright (C) 2010, 2011
+ Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_C6X_PROTOS_H
+#define GCC_C6X_PROTOS_H
+
+/* Functions defined in c6x.c. */
+
+#ifdef RTX_CODE
+extern void c6x_init_cumulative_args (CUMULATIVE_ARGS *, const_tree, rtx, int);
+extern bool c6x_block_reg_pad_upward (enum machine_mode, const_tree, bool);
+
+extern bool c6x_legitimate_address_p_1 (enum machine_mode, rtx, bool, bool);
+extern bool c6x_mem_operand (rtx, enum reg_class, bool);
+extern bool expand_move (rtx *, enum machine_mode);
+
+extern bool c6x_long_call_p (rtx);
+extern void c6x_expand_call (rtx, rtx, bool);
+extern rtx c6x_expand_compare (rtx, enum machine_mode);
+extern bool c6x_force_op_for_comparison_p (enum rtx_code, rtx);
+extern bool c6x_expand_movmem (rtx, rtx, rtx, rtx, rtx, rtx);
+
+extern rtx c6x_subword (rtx, bool);
+extern void split_di (rtx *, int, rtx *, rtx *);
+extern bool c6x_valid_mask_p (HOST_WIDE_INT);
+
+extern char c6x_get_unit_specifier (rtx);
+
+extern void c6x_final_prescan_insn(rtx insn, rtx *opvec, int noperands);
+
+extern int c6x_nsaved_regs (void);
+extern HOST_WIDE_INT c6x_initial_elimination_offset (int, int);
+extern void c6x_expand_prologue (void);
+extern void c6x_expand_epilogue (bool);
+
+extern rtx c6x_return_addr_rtx (int);
+
+extern void c6x_set_return_address (rtx, rtx);
+#endif
+
+extern void c6x_override_options (void);
+extern void c6x_optimization_options (int, int);
+
+extern void c6x_output_file_unwind (FILE *);
+
+extern void c6x_function_end (FILE *, const char *);
+
+#endif /* GCC_C6X_PROTOS_H */
--- /dev/null
+;; -*- buffer-read-only: t -*-
+;; Generated automatically from c6x-sched.md.in by gensched.sh
+
+;; Definitions for side 1, cross n
+
+;; Scheduling description for TI C6X.
+;; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Input file for gensched.sh We process this file multiple times,
+;; replacing 1 with either 1 or 2 for each of the sides of the
+;; machine, and a correspondingly with "a" or "b". n and
+;; are replaced with yes/no and the appropriate reservation.
+
+(define_insn_reservation "load_d1n" 5
+ (and (eq_attr "type" "load")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "a"))))
+ "d1+t1")
+
+(define_insn_reservation "store_d1n" 1
+ (and (eq_attr "type" "store")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "a"))))
+ "d1+t1")
+
+(define_insn_reservation "loadn_d1n" 5
+ (and (eq_attr "type" "loadn")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "a"))))
+ "d1+t1+t2")
+
+(define_insn_reservation "storen_d1n" 1
+ (and (eq_attr "type" "storen")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "a"))))
+ "d1+t1+t2")
+
+(define_insn_reservation "single_d1n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "d")
+ (eq_attr "dest_regfile" "a"))))
+ "d1")
+
+(define_insn_reservation "single_l1n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "a"))))
+ "l1+l1w")
+
+(define_insn_reservation "fp4_l1n" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "a"))))
+ "l1,nothing*2,l1w")
+
+(define_insn_reservation "intdp_l1n" 5
+ (and (eq_attr "type" "intdp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "a"))))
+ "l1,nothing*2,l1w*2")
+
+(define_insn_reservation "adddp_l1n" 7
+ (and (eq_attr "type" "adddp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "a"))))
+ "(l1)*2,nothing*3,l1w*2")
+
+(define_insn_reservation "branch_s1n" 6
+ (and (eq_attr "type" "branch")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "(s1+s1w)+br1")
+
+(define_insn_reservation "call_addkpc_s1n" 6
+ (and (eq_attr "type" "call")
+ (and (ne (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a")))))
+ "(s1+s1w)+br1,s2+br0+br1")
+
+(define_insn_reservation "call_mvk_s1n" 6
+ (and (eq_attr "type" "call")
+ (and (eq (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a")))))
+ "(s1+s1w)+br1,s2,s2")
+
+(define_insn_reservation "single_s1n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "(s1+s1w)")
+
+(define_insn_reservation "cmpdp_s1n" 2
+ (and (eq_attr "type" "cmpdp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "s1,(s1)+s1w")
+
+(define_insn_reservation "dp2_s1n" 2
+ (and (eq_attr "type" "dp2")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "s1+s1w,s1w")
+
+(define_insn_reservation "fp4_s1n" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "s1,nothing*2,s1w")
+
+(define_insn_reservation "mvilc4_s1n" 4
+ (and (eq_attr "type" "mvilc")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "(s1+s1w)")
+
+(define_insn_reservation "single_dl1n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "dl")
+ (eq_attr "dest_regfile" "a"))))
+ "(d1|(l1+l1w))")
+
+(define_insn_reservation "single_ds1n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "ds")
+ (eq_attr "dest_regfile" "a"))))
+ "(d1|(s1+s1w))")
+
+(define_insn_reservation "single_ls1n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "a"))))
+ "((l1+l1w)|(s1+s1w))")
+
+(define_insn_reservation "dp2_l1n" 2
+ (and (eq_attr "type" "dp2")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "a"))))
+ "l1+l1w,l1w")
+
+(define_insn_reservation "fp4_ls1n" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "a"))))
+ "(s1,nothing*2,s1w)|(l1,nothing*2,l1w)")
+
+(define_insn_reservation "adddp_ls1n" 7
+ (and (eq_attr "type" "adddp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "a"))))
+ "((s1)*2,nothing*3,s1w*2)|((l1)*2,nothing*3,l1w*2)")
+
+(define_insn_reservation "single_dls1n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "dls")
+ (eq_attr "dest_regfile" "a"))))
+ "(d1|(l1+l1w)|(s1+s1w))")
+
+(define_insn_reservation "mpy2_m1n" 2
+ (and (eq_attr "type" "mpy2")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "a"))))
+ "m1,m1w")
+
+(define_insn_reservation "mpy4_m1n" 4
+ (and (eq_attr "type" "mpy4")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "a"))))
+ "m1,nothing,nothing,m1w")
+
+(define_insn_reservation "mpydp_m1n" 10
+ (and (eq_attr "type" "mpydp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "a"))))
+ "(m1)*4,nothing*4,m1w*2")
+
+(define_insn_reservation "mpyspdp_m1n" 7
+ (and (eq_attr "type" "mpyspdp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "a"))))
+ "(m1)*2,nothing*3,m1w*2")
+
+(define_insn_reservation "mpysp2dp_m1n" 5
+ (and (eq_attr "type" "mpysp2dp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "a"))))
+ "m1,nothing*2,m1w*2")
+
+;; Definitions for side 2, cross n
+
+;; Scheduling description for TI C6X.
+;; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Input file for gensched.sh We process this file multiple times,
+;; replacing 2 with either 1 or 2 for each of the sides of the
+;; machine, and b correspondingly with "a" or "b". n and
+;; are replaced with yes/no and the appropriate reservation.
+
+(define_insn_reservation "load_d2n" 5
+ (and (eq_attr "type" "load")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "b"))))
+ "d2+t2")
+
+(define_insn_reservation "store_d2n" 1
+ (and (eq_attr "type" "store")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "b"))))
+ "d2+t2")
+
+(define_insn_reservation "loadn_d2n" 5
+ (and (eq_attr "type" "loadn")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "b"))))
+ "d2+t1+t2")
+
+(define_insn_reservation "storen_d2n" 1
+ (and (eq_attr "type" "storen")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "b"))))
+ "d2+t1+t2")
+
+(define_insn_reservation "single_d2n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "d")
+ (eq_attr "dest_regfile" "b"))))
+ "d2")
+
+(define_insn_reservation "single_l2n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "b"))))
+ "l2+l2w")
+
+(define_insn_reservation "fp4_l2n" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "b"))))
+ "l2,nothing*2,l2w")
+
+(define_insn_reservation "intdp_l2n" 5
+ (and (eq_attr "type" "intdp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "b"))))
+ "l2,nothing*2,l2w*2")
+
+(define_insn_reservation "adddp_l2n" 7
+ (and (eq_attr "type" "adddp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "b"))))
+ "(l2)*2,nothing*3,l2w*2")
+
+(define_insn_reservation "branch_s2n" 6
+ (and (eq_attr "type" "branch")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "(s2+s2w)+br1")
+
+(define_insn_reservation "call_addkpc_s2n" 6
+ (and (eq_attr "type" "call")
+ (and (ne (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b")))))
+ "(s2+s2w)+br1,s2+br0+br1")
+
+(define_insn_reservation "call_mvk_s2n" 6
+ (and (eq_attr "type" "call")
+ (and (eq (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b")))))
+ "(s2+s2w)+br1,s2,s2")
+
+(define_insn_reservation "single_s2n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "(s2+s2w)")
+
+(define_insn_reservation "cmpdp_s2n" 2
+ (and (eq_attr "type" "cmpdp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "s2,(s2)+s2w")
+
+(define_insn_reservation "dp2_s2n" 2
+ (and (eq_attr "type" "dp2")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "s2+s2w,s2w")
+
+(define_insn_reservation "fp4_s2n" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "s2,nothing*2,s2w")
+
+(define_insn_reservation "mvilc4_s2n" 4
+ (and (eq_attr "type" "mvilc")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "(s2+s2w)")
+
+(define_insn_reservation "single_dl2n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "dl")
+ (eq_attr "dest_regfile" "b"))))
+ "(d2|(l2+l2w))")
+
+(define_insn_reservation "single_ds2n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "ds")
+ (eq_attr "dest_regfile" "b"))))
+ "(d2|(s2+s2w))")
+
+(define_insn_reservation "single_ls2n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "b"))))
+ "((l2+l2w)|(s2+s2w))")
+
+(define_insn_reservation "dp2_l2n" 2
+ (and (eq_attr "type" "dp2")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "b"))))
+ "l2+l2w,l2w")
+
+(define_insn_reservation "fp4_ls2n" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "b"))))
+ "(s2,nothing*2,s2w)|(l2,nothing*2,l2w)")
+
+(define_insn_reservation "adddp_ls2n" 7
+ (and (eq_attr "type" "adddp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "b"))))
+ "((s2)*2,nothing*3,s2w*2)|((l2)*2,nothing*3,l2w*2)")
+
+(define_insn_reservation "single_dls2n" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "dls")
+ (eq_attr "dest_regfile" "b"))))
+ "(d2|(l2+l2w)|(s2+s2w))")
+
+(define_insn_reservation "mpy2_m2n" 2
+ (and (eq_attr "type" "mpy2")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "b"))))
+ "m2,m2w")
+
+(define_insn_reservation "mpy4_m2n" 4
+ (and (eq_attr "type" "mpy4")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "b"))))
+ "m2,nothing,nothing,m2w")
+
+(define_insn_reservation "mpydp_m2n" 10
+ (and (eq_attr "type" "mpydp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "b"))))
+ "(m2)*4,nothing*4,m2w*2")
+
+(define_insn_reservation "mpyspdp_m2n" 7
+ (and (eq_attr "type" "mpyspdp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "b"))))
+ "(m2)*2,nothing*3,m2w*2")
+
+(define_insn_reservation "mpysp2dp_m2n" 5
+ (and (eq_attr "type" "mpysp2dp")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "b"))))
+ "m2,nothing*2,m2w*2")
+
+;; Definitions for side 1, cross y
+
+;; Scheduling description for TI C6X.
+;; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Input file for gensched.sh We process this file multiple times,
+;; replacing 1 with either 1 or 2 for each of the sides of the
+;; machine, and a correspondingly with "a" or "b". y and
+;; +x1 are replaced with yes/no and the appropriate reservation.
+
+(define_insn_reservation "load_d1y" 5
+ (and (eq_attr "type" "load")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "a"))))
+ "d1+t2")
+
+(define_insn_reservation "store_d1y" 1
+ (and (eq_attr "type" "store")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "a"))))
+ "d1+t2")
+
+(define_insn_reservation "loadn_d1y" 5
+ (and (eq_attr "type" "loadn")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "a"))))
+ "d1+t1+t2")
+
+(define_insn_reservation "storen_d1y" 1
+ (and (eq_attr "type" "storen")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "a"))))
+ "d1+t1+t2")
+
+(define_insn_reservation "single_d1y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "d")
+ (eq_attr "dest_regfile" "a"))))
+ "d1+x1")
+
+(define_insn_reservation "single_l1y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "a"))))
+ "l1+l1w+x1")
+
+(define_insn_reservation "fp4_l1y" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "a"))))
+ "l1+x1,nothing*2,l1w")
+
+(define_insn_reservation "intdp_l1y" 5
+ (and (eq_attr "type" "intdp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "a"))))
+ "l1+x1,nothing*2,l1w*2")
+
+(define_insn_reservation "adddp_l1y" 7
+ (and (eq_attr "type" "adddp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "a"))))
+ "(l1+x1)*2,nothing*3,l1w*2")
+
+(define_insn_reservation "branch_s1y" 6
+ (and (eq_attr "type" "branch")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "(s1+s1w)+x1+br1")
+
+(define_insn_reservation "call_addkpc_s1y" 6
+ (and (eq_attr "type" "call")
+ (and (ne (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a")))))
+ "(s1+s1w)+x1+br1,s2+br0+br1")
+
+(define_insn_reservation "call_mvk_s1y" 6
+ (and (eq_attr "type" "call")
+ (and (eq (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a")))))
+ "(s1+s1w)+x1+br1,s2,s2")
+
+(define_insn_reservation "single_s1y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "(s1+s1w)+x1")
+
+(define_insn_reservation "cmpdp_s1y" 2
+ (and (eq_attr "type" "cmpdp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "s1+x1,(s1+x1)+s1w")
+
+(define_insn_reservation "dp2_s1y" 2
+ (and (eq_attr "type" "dp2")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "s1+s1w+x1,s1w")
+
+(define_insn_reservation "fp4_s1y" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "s1+x1,nothing*2,s1w")
+
+(define_insn_reservation "mvilc4_s1y" 4
+ (and (eq_attr "type" "mvilc")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "a"))))
+ "(s1+s1w)+x1")
+
+(define_insn_reservation "single_dl1y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "dl")
+ (eq_attr "dest_regfile" "a"))))
+ "(d1|(l1+l1w))+x1")
+
+(define_insn_reservation "single_ds1y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "ds")
+ (eq_attr "dest_regfile" "a"))))
+ "(d1|(s1+s1w))+x1")
+
+(define_insn_reservation "single_ls1y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "a"))))
+ "((l1+l1w)|(s1+s1w))+x1")
+
+(define_insn_reservation "dp2_l1y" 2
+ (and (eq_attr "type" "dp2")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "a"))))
+ "l1+l1w+x1,l1w")
+
+(define_insn_reservation "fp4_ls1y" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "a"))))
+ "(s1+x1,nothing*2,s1w)|(l1+x1,nothing*2,l1w)")
+
+(define_insn_reservation "adddp_ls1y" 7
+ (and (eq_attr "type" "adddp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "a"))))
+ "((s1+x1)*2,nothing*3,s1w*2)|((l1+x1)*2,nothing*3,l1w*2)")
+
+(define_insn_reservation "single_dls1y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "dls")
+ (eq_attr "dest_regfile" "a"))))
+ "(d1|(l1+l1w)|(s1+s1w))+x1")
+
+(define_insn_reservation "mpy2_m1y" 2
+ (and (eq_attr "type" "mpy2")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "a"))))
+ "m1+x1,m1w")
+
+(define_insn_reservation "mpy4_m1y" 4
+ (and (eq_attr "type" "mpy4")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "a"))))
+ "m1+x1,nothing,nothing,m1w")
+
+(define_insn_reservation "mpydp_m1y" 10
+ (and (eq_attr "type" "mpydp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "a"))))
+ "(m1+x1)*4,nothing*4,m1w*2")
+
+(define_insn_reservation "mpyspdp_m1y" 7
+ (and (eq_attr "type" "mpyspdp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "a"))))
+ "(m1+x1)*2,nothing*3,m1w*2")
+
+(define_insn_reservation "mpysp2dp_m1y" 5
+ (and (eq_attr "type" "mpysp2dp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "a"))))
+ "m1+x1,nothing*2,m1w*2")
+
+;; Definitions for side 2, cross y
+
+;; Scheduling description for TI C6X.
+;; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Input file for gensched.sh We process this file multiple times,
+;; replacing 2 with either 1 or 2 for each of the sides of the
+;; machine, and b correspondingly with "a" or "b". y and
+;; +x2 are replaced with yes/no and the appropriate reservation.
+
+(define_insn_reservation "load_d2y" 5
+ (and (eq_attr "type" "load")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "b"))))
+ "d2+t1")
+
+(define_insn_reservation "store_d2y" 1
+ (and (eq_attr "type" "store")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "b"))))
+ "d2+t1")
+
+(define_insn_reservation "loadn_d2y" 5
+ (and (eq_attr "type" "loadn")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "b"))))
+ "d2+t1+t2")
+
+(define_insn_reservation "storen_d2y" 1
+ (and (eq_attr "type" "storen")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "b"))))
+ "d2+t1+t2")
+
+(define_insn_reservation "single_d2y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "d")
+ (eq_attr "dest_regfile" "b"))))
+ "d2+x2")
+
+(define_insn_reservation "single_l2y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "b"))))
+ "l2+l2w+x2")
+
+(define_insn_reservation "fp4_l2y" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "b"))))
+ "l2+x2,nothing*2,l2w")
+
+(define_insn_reservation "intdp_l2y" 5
+ (and (eq_attr "type" "intdp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "b"))))
+ "l2+x2,nothing*2,l2w*2")
+
+(define_insn_reservation "adddp_l2y" 7
+ (and (eq_attr "type" "adddp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "b"))))
+ "(l2+x2)*2,nothing*3,l2w*2")
+
+(define_insn_reservation "branch_s2y" 6
+ (and (eq_attr "type" "branch")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "(s2+s2w)+x2+br1")
+
+(define_insn_reservation "call_addkpc_s2y" 6
+ (and (eq_attr "type" "call")
+ (and (ne (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b")))))
+ "(s2+s2w)+x2+br1,s2+br0+br1")
+
+(define_insn_reservation "call_mvk_s2y" 6
+ (and (eq_attr "type" "call")
+ (and (eq (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b")))))
+ "(s2+s2w)+x2+br1,s2,s2")
+
+(define_insn_reservation "single_s2y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "(s2+s2w)+x2")
+
+(define_insn_reservation "cmpdp_s2y" 2
+ (and (eq_attr "type" "cmpdp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "s2+x2,(s2+x2)+s2w")
+
+(define_insn_reservation "dp2_s2y" 2
+ (and (eq_attr "type" "dp2")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "s2+s2w+x2,s2w")
+
+(define_insn_reservation "fp4_s2y" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "s2+x2,nothing*2,s2w")
+
+(define_insn_reservation "mvilc4_s2y" 4
+ (and (eq_attr "type" "mvilc")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "b"))))
+ "(s2+s2w)+x2")
+
+(define_insn_reservation "single_dl2y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "dl")
+ (eq_attr "dest_regfile" "b"))))
+ "(d2|(l2+l2w))+x2")
+
+(define_insn_reservation "single_ds2y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "ds")
+ (eq_attr "dest_regfile" "b"))))
+ "(d2|(s2+s2w))+x2")
+
+(define_insn_reservation "single_ls2y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "b"))))
+ "((l2+l2w)|(s2+s2w))+x2")
+
+(define_insn_reservation "dp2_l2y" 2
+ (and (eq_attr "type" "dp2")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "b"))))
+ "l2+l2w+x2,l2w")
+
+(define_insn_reservation "fp4_ls2y" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "b"))))
+ "(s2+x2,nothing*2,s2w)|(l2+x2,nothing*2,l2w)")
+
+(define_insn_reservation "adddp_ls2y" 7
+ (and (eq_attr "type" "adddp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "b"))))
+ "((s2+x2)*2,nothing*3,s2w*2)|((l2+x2)*2,nothing*3,l2w*2)")
+
+(define_insn_reservation "single_dls2y" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "dls")
+ (eq_attr "dest_regfile" "b"))))
+ "(d2|(l2+l2w)|(s2+s2w))+x2")
+
+(define_insn_reservation "mpy2_m2y" 2
+ (and (eq_attr "type" "mpy2")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "b"))))
+ "m2+x2,m2w")
+
+(define_insn_reservation "mpy4_m2y" 4
+ (and (eq_attr "type" "mpy4")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "b"))))
+ "m2+x2,nothing,nothing,m2w")
+
+(define_insn_reservation "mpydp_m2y" 10
+ (and (eq_attr "type" "mpydp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "b"))))
+ "(m2+x2)*4,nothing*4,m2w*2")
+
+(define_insn_reservation "mpyspdp_m2y" 7
+ (and (eq_attr "type" "mpyspdp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "b"))))
+ "(m2+x2)*2,nothing*3,m2w*2")
+
+(define_insn_reservation "mpysp2dp_m2y" 5
+ (and (eq_attr "type" "mpysp2dp")
+ (and (eq_attr "cross" "y")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "b"))))
+ "m2+x2,nothing*2,m2w*2")
--- /dev/null
+;; Scheduling description for TI C6X.
+;; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; Input file for gensched.sh We process this file multiple times,
+;; replacing _N_ with either 1 or 2 for each of the sides of the
+;; machine, and _RF_ correspondingly with "a" or "b". _CROSS_ and
+;; _CUNIT_ are replaced with yes/no and the appropriate reservation.
+
+(define_insn_reservation "load_d_N__CROSS_" 5
+ (and (eq_attr "type" "load")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "_RF_"))))
+ "d_N_+t_NX_")
+
+(define_insn_reservation "store_d_N__CROSS_" 1
+ (and (eq_attr "type" "store")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "_RF_"))))
+ "d_N_+t_NX_")
+
+(define_insn_reservation "loadn_d_N__CROSS_" 5
+ (and (eq_attr "type" "loadn")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "_RF_"))))
+ "d_N_+t1+t2")
+
+(define_insn_reservation "storen_d_N__CROSS_" 1
+ (and (eq_attr "type" "storen")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "d_addr")
+ (eq_attr "addr_regfile" "_RF_"))))
+ "d_N_+t1+t2")
+
+(define_insn_reservation "single_d_N__CROSS_" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "d")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "d_N__CUNIT_")
+
+(define_insn_reservation "single_l_N__CROSS_" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "l_N_+l_N_w_CUNIT_")
+
+(define_insn_reservation "fp4_l_N__CROSS_" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "l_N__CUNIT_,nothing*2,l_N_w")
+
+(define_insn_reservation "intdp_l_N__CROSS_" 5
+ (and (eq_attr "type" "intdp")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "l_N__CUNIT_,nothing*2,l_N_w*2")
+
+(define_insn_reservation "adddp_l_N__CROSS_" 7
+ (and (eq_attr "type" "adddp")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "(l_N__CUNIT_)*2,nothing*3,l_N_w*2")
+
+(define_insn_reservation "branch_s_N__CROSS_" 6
+ (and (eq_attr "type" "branch")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "(s_N_+s_N_w)_CUNIT_+br1")
+
+(define_insn_reservation "call_addkpc_s_N__CROSS_" 6
+ (and (eq_attr "type" "call")
+ (and (ne (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "_RF_")))))
+ "(s_N_+s_N_w)_CUNIT_+br1,s2+br0+br1")
+
+(define_insn_reservation "call_mvk_s_N__CROSS_" 6
+ (and (eq_attr "type" "call")
+ (and (eq (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "_RF_")))))
+ "(s_N_+s_N_w)_CUNIT_+br1,s2,s2")
+
+(define_insn_reservation "single_s_N__CROSS_" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "(s_N_+s_N_w)_CUNIT_")
+
+(define_insn_reservation "cmpdp_s_N__CROSS_" 2
+ (and (eq_attr "type" "cmpdp")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "s_N__CUNIT_,(s_N__CUNIT_)+s_N_w")
+
+(define_insn_reservation "dp2_s_N__CROSS_" 2
+ (and (eq_attr "type" "dp2")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "s_N_+s_N_w_CUNIT_,s_N_w")
+
+(define_insn_reservation "fp4_s_N__CROSS_" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "s_N__CUNIT_,nothing*2,s_N_w")
+
+(define_insn_reservation "mvilc4_s_N__CROSS_" 4
+ (and (eq_attr "type" "mvilc")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "(s_N_+s_N_w)_CUNIT_")
+
+(define_insn_reservation "single_dl_N__CROSS_" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "dl")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "(d_N_|(l_N_+l_N_w))_CUNIT_")
+
+(define_insn_reservation "single_ds_N__CROSS_" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "ds")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "(d_N_|(s_N_+s_N_w))_CUNIT_")
+
+(define_insn_reservation "single_ls_N__CROSS_" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "((l_N_+l_N_w)|(s_N_+s_N_w))_CUNIT_")
+
+(define_insn_reservation "dp2_l_N__CROSS_" 2
+ (and (eq_attr "type" "dp2")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "l")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "l_N_+l_N_w_CUNIT_,l_N_w")
+
+(define_insn_reservation "fp4_ls_N__CROSS_" 4
+ (and (eq_attr "type" "fp4")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "(s_N__CUNIT_,nothing*2,s_N_w)|(l_N__CUNIT_,nothing*2,l_N_w)")
+
+(define_insn_reservation "adddp_ls_N__CROSS_" 7
+ (and (eq_attr "type" "adddp")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "ls")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "((s_N__CUNIT_)*2,nothing*3,s_N_w*2)|((l_N__CUNIT_)*2,nothing*3,l_N_w*2)")
+
+(define_insn_reservation "single_dls_N__CROSS_" 1
+ (and (eq_attr "type" "single")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "dls")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "(d_N_|(l_N_+l_N_w)|(s_N_+s_N_w))_CUNIT_")
+
+(define_insn_reservation "mpy2_m_N__CROSS_" 2
+ (and (eq_attr "type" "mpy2")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "m_N__CUNIT_,m_N_w")
+
+(define_insn_reservation "mpy4_m_N__CROSS_" 4
+ (and (eq_attr "type" "mpy4")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "m_N__CUNIT_,nothing,nothing,m_N_w")
+
+(define_insn_reservation "mpydp_m_N__CROSS_" 10
+ (and (eq_attr "type" "mpydp")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "(m_N__CUNIT_)*4,nothing*4,m_N_w*2")
+
+(define_insn_reservation "mpyspdp_m_N__CROSS_" 7
+ (and (eq_attr "type" "mpyspdp")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "(m_N__CUNIT_)*2,nothing*3,m_N_w*2")
+
+(define_insn_reservation "mpysp2dp_m_N__CROSS_" 5
+ (and (eq_attr "type" "mpysp2dp")
+ (and (eq_attr "cross" "_CROSS_")
+ (and (eq_attr "units" "m")
+ (eq_attr "dest_regfile" "_RF_"))))
+ "m_N__CUNIT_,nothing*2,m_N_w*2")
--- /dev/null
+; -*- buffer-read-only: t -*-
+; Generated automatically by genopt.sh from c6x-isas.def.
+;
+; Copyright (C) 2011 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+Enum
+Name(c6x_isa) Type(int)
+Known C6X ISAs (for use with the -march= option):
+
+EnumValue
+Enum(c6x_isa) String(c62x) Value(0)
+
+EnumValue
+Enum(c6x_isa) String(c64x) Value(1)
+
+EnumValue
+Enum(c6x_isa) String(c64x+) Value(2)
+
+EnumValue
+Enum(c6x_isa) String(c67x) Value(3)
+
+EnumValue
+Enum(c6x_isa) String(c67x+) Value(4)
+
+EnumValue
+Enum(c6x_isa) String(c674x) Value(5)
+
--- /dev/null
+/* Target Code for TI C6X
+ Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Andrew Jenner <andrew@codesourcery.com>
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "tree.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "insn-codes.h"
+#include "expr.h"
+#include "regs.h"
+#include "optabs.h"
+#include "recog.h"
+#include "ggc.h"
+#include "sched-int.h"
+#include "timevar.h"
+#include "tm_p.h"
+#include "tm-preds.h"
+#include "tm-constrs.h"
+#include "df.h"
+#include "integrate.h"
+#include "diagnostic-core.h"
+#include "cgraph.h"
+#include "cfglayout.h"
+#include "langhooks.h"
+#include "target.h"
+#include "target-def.h"
+#include "sel-sched.h"
+#include "debug.h"
+#include "opts.h"
+
+/* Table of supported architecture variants. */
+typedef struct
+{
+ const char *arch;
+ enum c6x_cpu_type type;
+ unsigned short features;
+} c6x_arch_table;
+
+/* A list of all ISAs, mapping each one to a representative device.
+ Used for -march selection. */
+static const c6x_arch_table all_isas[] =
+{
+#define C6X_ISA(NAME,DEVICE,FLAGS) \
+ { NAME, DEVICE, FLAGS },
+#include "c6x-isas.def"
+#undef C6X_ISA
+ { NULL, C6X_CPU_C62X, 0 }
+};
+
+/* This is the parsed result of the "-march=" option, if given. */
+enum c6x_cpu_type c6x_arch = C6X_DEFAULT_ARCH;
+
+/* A mask of insn types that are allowed by the architecture selected by
+ the -march option. */
+unsigned long c6x_insn_mask = C6X_DEFAULT_INSN_MASK;
+
+/* The instruction that is being output (as obtained from FINAL_PRESCAN_INSN).
+ */
+static rtx c6x_current_insn = NULL_RTX;
+
+/* A decl we build to access __c6xabi_DSBT_base. */
+static GTY(()) tree dsbt_decl;
+\f
+/* Determines whether we run our final scheduling pass or not. We always
+ avoid the normal second scheduling pass. */
+static int c6x_flag_schedule_insns2;
+
+/* Determines whether we run variable tracking in machine dependent
+ reorganization. */
+static int c6x_flag_var_tracking;
+
+/* Determines whether we use modulo scheduling. */
+static int c6x_flag_modulo_sched;
+
+/* Record the state of flag_pic before we set it to 1 for DSBT. */
+int c6x_initial_flag_pic;
+\f
+typedef struct
+{
+ /* We record the clock cycle for every insn during scheduling. */
+ int clock;
+ /* After scheduling, we run assign_reservations to choose unit
+ reservations for all insns. These are recorded here. */
+ int reservation;
+ /* Records the new condition for insns which must be made
+ conditional after scheduling. An entry of NULL_RTX means no such
+ change is necessary. */
+ rtx new_cond;
+ /* True for the first insn that was scheduled in an ebb. */
+ bool ebb_start;
+} c6x_sched_insn_info;
+
+DEF_VEC_O(c6x_sched_insn_info);
+DEF_VEC_ALLOC_O(c6x_sched_insn_info, heap);
+
+/* Record a c6x_sched_insn_info structure for every insn in the function. */
+static VEC(c6x_sched_insn_info, heap) *insn_info;
+
+#define INSN_INFO_LENGTH (VEC_length (c6x_sched_insn_info, insn_info))
+#define INSN_INFO_ENTRY(N) (*VEC_index (c6x_sched_insn_info, insn_info, (N)))
+
+static bool done_cfi_sections;
+
+/* The DFA names of the units, in packet order. */
+static const char *const c6x_unit_names[] =
+{
+ "d1", "l1", "s1", "m1",
+ "d2", "l2", "s2", "m2",
+};
+
+#define RESERVATION_FLAG_D 1
+#define RESERVATION_FLAG_L 2
+#define RESERVATION_FLAG_S 4
+#define RESERVATION_FLAG_M 8
+#define RESERVATION_FLAG_DL (RESERVATION_FLAG_D | RESERVATION_FLAG_L)
+#define RESERVATION_FLAG_DS (RESERVATION_FLAG_D | RESERVATION_FLAG_S)
+#define RESERVATION_FLAG_LS (RESERVATION_FLAG_L | RESERVATION_FLAG_S)
+#define RESERVATION_FLAG_DLS (RESERVATION_FLAG_D | RESERVATION_FLAG_LS)
+
+#define RESERVATION_S1 2
+#define RESERVATION_S2 6
+\f
+/* Register map for debugging. */
+int const dbx_register_map[FIRST_PSEUDO_REGISTER] =
+{
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, /* A0 - A15. */
+ 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, /* A16 - A32. */
+ 50, 51, 52,
+ 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, /* B0 - B15. */
+ 29, 30, 31,
+ 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, /* B16 - B32. */
+ 66, 67, 68,
+ -1, -1, -1 /* FP, ARGP, ILC. */
+};
+\f
+/* Allocate a new, cleared machine_function structure. */
+
+static struct machine_function *
+c6x_init_machine_status (void)
+{
+ return ggc_alloc_cleared_machine_function ();
+}
+
+/* Implement TARGET_OPTION_OVERRIDE. */
+
+static void
+c6x_option_override (void)
+{
+ if (global_options_set.x_c6x_arch_option)
+ {
+ c6x_arch = all_isas[c6x_arch_option].type;
+ c6x_insn_mask &= ~C6X_INSNS_ALL_CPU_BITS;
+ c6x_insn_mask |= all_isas[c6x_arch_option].features;
+ }
+
+ c6x_flag_schedule_insns2 = flag_schedule_insns_after_reload;
+ flag_schedule_insns_after_reload = 0;
+
+ c6x_flag_modulo_sched = flag_modulo_sched;
+ flag_modulo_sched = 0;
+
+ init_machine_status = c6x_init_machine_status;
+
+ if (flag_pic && !TARGET_DSBT)
+ {
+ error ("-fpic and -fPIC not supported without -mdsbt on this target");
+ flag_pic = 0;
+ }
+ c6x_initial_flag_pic = flag_pic;
+ if (TARGET_DSBT && !flag_pic)
+ flag_pic = 1;
+}
+
+
+/* Implement the TARGET_CONDITIONAL_REGISTER_USAGE hook. */
+
+static void
+c6x_conditional_register_usage (void)
+{
+ int i;
+ if (c6x_arch == C6X_CPU_C62X || c6x_arch == C6X_CPU_C67X)
+ for (i = 16; i < 32; i++)
+ {
+ fixed_regs[i] = 1;
+ fixed_regs[32 + i] = 1;
+ }
+ if (TARGET_INSNS_64)
+ {
+ SET_HARD_REG_BIT (reg_class_contents[(int)PREDICATE_A_REGS],
+ REG_A0);
+ SET_HARD_REG_BIT (reg_class_contents[(int)PREDICATE_REGS],
+ REG_A0);
+ CLEAR_HARD_REG_BIT (reg_class_contents[(int)NONPREDICATE_A_REGS],
+ REG_A0);
+ CLEAR_HARD_REG_BIT (reg_class_contents[(int)NONPREDICATE_REGS],
+ REG_A0);
+ }
+}
+\f
+static GTY(()) rtx eqdf_libfunc;
+static GTY(()) rtx nedf_libfunc;
+static GTY(()) rtx ledf_libfunc;
+static GTY(()) rtx ltdf_libfunc;
+static GTY(()) rtx gedf_libfunc;
+static GTY(()) rtx gtdf_libfunc;
+static GTY(()) rtx eqsf_libfunc;
+static GTY(()) rtx nesf_libfunc;
+static GTY(()) rtx lesf_libfunc;
+static GTY(()) rtx ltsf_libfunc;
+static GTY(()) rtx gesf_libfunc;
+static GTY(()) rtx gtsf_libfunc;
+static GTY(()) rtx strasgi_libfunc;
+static GTY(()) rtx strasgi64p_libfunc;
+
+/* Implement the TARGET_INIT_LIBFUNCS macro. We use this to rename library
+ functions to match the C6x ABI. */
+
+static void
+c6x_init_libfuncs (void)
+{
+ /* Double-precision floating-point arithmetic. */
+ set_optab_libfunc (add_optab, DFmode, "__c6xabi_addd");
+ set_optab_libfunc (sdiv_optab, DFmode, "__c6xabi_divd");
+ set_optab_libfunc (smul_optab, DFmode, "__c6xabi_mpyd");
+ set_optab_libfunc (neg_optab, DFmode, "__c6xabi_negd");
+ set_optab_libfunc (sub_optab, DFmode, "__c6xabi_subd");
+
+ /* Single-precision floating-point arithmetic. */
+ set_optab_libfunc (add_optab, SFmode, "__c6xabi_addf");
+ set_optab_libfunc (sdiv_optab, SFmode, "__c6xabi_divf");
+ set_optab_libfunc (smul_optab, SFmode, "__c6xabi_mpyf");
+ set_optab_libfunc (neg_optab, SFmode, "__c6xabi_negf");
+ set_optab_libfunc (sub_optab, SFmode, "__c6xabi_subf");
+
+ /* Floating-point comparisons. */
+ eqsf_libfunc = init_one_libfunc ("__c6xabi_eqf");
+ nesf_libfunc = init_one_libfunc ("__c6xabi_neqf");
+ lesf_libfunc = init_one_libfunc ("__c6xabi_lef");
+ ltsf_libfunc = init_one_libfunc ("__c6xabi_ltf");
+ gesf_libfunc = init_one_libfunc ("__c6xabi_gef");
+ gtsf_libfunc = init_one_libfunc ("__c6xabi_gtf");
+ eqdf_libfunc = init_one_libfunc ("__c6xabi_eqd");
+ nedf_libfunc = init_one_libfunc ("__c6xabi_neqd");
+ ledf_libfunc = init_one_libfunc ("__c6xabi_led");
+ ltdf_libfunc = init_one_libfunc ("__c6xabi_ltd");
+ gedf_libfunc = init_one_libfunc ("__c6xabi_ged");
+ gtdf_libfunc = init_one_libfunc ("__c6xabi_gtd");
+
+ set_optab_libfunc (eq_optab, SFmode, NULL);
+ set_optab_libfunc (ne_optab, SFmode, "__c6xabi_neqf");
+ set_optab_libfunc (gt_optab, SFmode, NULL);
+ set_optab_libfunc (ge_optab, SFmode, NULL);
+ set_optab_libfunc (lt_optab, SFmode, NULL);
+ set_optab_libfunc (le_optab, SFmode, NULL);
+ set_optab_libfunc (unord_optab, SFmode, "__c6xabi_unordf");
+ set_optab_libfunc (eq_optab, DFmode, NULL);
+ set_optab_libfunc (ne_optab, DFmode, "__c6xabi_neqd");
+ set_optab_libfunc (gt_optab, DFmode, NULL);
+ set_optab_libfunc (ge_optab, DFmode, NULL);
+ set_optab_libfunc (lt_optab, DFmode, NULL);
+ set_optab_libfunc (le_optab, DFmode, NULL);
+ set_optab_libfunc (unord_optab, DFmode, "__c6xabi_unordd");
+
+ /* Floating-point to integer conversions. */
+ set_conv_libfunc (sfix_optab, SImode, DFmode, "__c6xabi_fixdi");
+ set_conv_libfunc (ufix_optab, SImode, DFmode, "__c6xabi_fixdu");
+ set_conv_libfunc (sfix_optab, DImode, DFmode, "__c6xabi_fixdlli");
+ set_conv_libfunc (ufix_optab, DImode, DFmode, "__c6xabi_fixdull");
+ set_conv_libfunc (sfix_optab, SImode, SFmode, "__c6xabi_fixfi");
+ set_conv_libfunc (ufix_optab, SImode, SFmode, "__c6xabi_fixfu");
+ set_conv_libfunc (sfix_optab, DImode, SFmode, "__c6xabi_fixflli");
+ set_conv_libfunc (ufix_optab, DImode, SFmode, "__c6xabi_fixfull");
+
+ /* Conversions between floating types. */
+ set_conv_libfunc (trunc_optab, SFmode, DFmode, "__c6xabi_cvtdf");
+ set_conv_libfunc (sext_optab, DFmode, SFmode, "__c6xabi_cvtfd");
+
+ /* Integer to floating-point conversions. */
+ set_conv_libfunc (sfloat_optab, DFmode, SImode, "__c6xabi_fltid");
+ set_conv_libfunc (ufloat_optab, DFmode, SImode, "__c6xabi_fltud");
+ set_conv_libfunc (sfloat_optab, DFmode, DImode, "__c6xabi_fltllid");
+ set_conv_libfunc (ufloat_optab, DFmode, DImode, "__c6xabi_fltulld");
+ set_conv_libfunc (sfloat_optab, SFmode, SImode, "__c6xabi_fltif");
+ set_conv_libfunc (ufloat_optab, SFmode, SImode, "__c6xabi_fltuf");
+ set_conv_libfunc (sfloat_optab, SFmode, DImode, "__c6xabi_fltllif");
+ set_conv_libfunc (ufloat_optab, SFmode, DImode, "__c6xabi_fltullf");
+
+ /* Long long. */
+ set_optab_libfunc (smul_optab, DImode, "__c6xabi_mpyll");
+ set_optab_libfunc (ashl_optab, DImode, "__c6xabi_llshl");
+ set_optab_libfunc (lshr_optab, DImode, "__c6xabi_llshru");
+ set_optab_libfunc (ashr_optab, DImode, "__c6xabi_llshr");
+
+ set_optab_libfunc (sdiv_optab, SImode, "__c6xabi_divi");
+ set_optab_libfunc (udiv_optab, SImode, "__c6xabi_divu");
+ set_optab_libfunc (smod_optab, SImode, "__c6xabi_remi");
+ set_optab_libfunc (umod_optab, SImode, "__c6xabi_remu");
+ set_optab_libfunc (sdivmod_optab, SImode, "__c6xabi_divremi");
+ set_optab_libfunc (udivmod_optab, SImode, "__c6xabi_divremu");
+ set_optab_libfunc (sdiv_optab, DImode, "__c6xabi_divlli");
+ set_optab_libfunc (udiv_optab, DImode, "__c6xabi_divull");
+ set_optab_libfunc (smod_optab, DImode, "__c6xabi_remlli");
+ set_optab_libfunc (umod_optab, DImode, "__c6xabi_remull");
+ set_optab_libfunc (udivmod_optab, DImode, "__c6xabi_divremull");
+
+ /* Block move. */
+ strasgi_libfunc = init_one_libfunc ("__c6xabi_strasgi");
+ strasgi64p_libfunc = init_one_libfunc ("__c6xabi_strasgi_64plus");
+}
+
+/* Begin the assembly file. */
+
+static void
+c6x_file_start (void)
+{
+ /* Variable tracking should be run after all optimizations which change order
+ of insns. It also needs a valid CFG. This can't be done in
+ c6x_override_options, because flag_var_tracking is finalized after
+ that. */
+ c6x_flag_var_tracking = flag_var_tracking;
+ flag_var_tracking = 0;
+
+ done_cfi_sections = false;
+ default_file_start ();
+
+ /* Arrays are aligned to 8-byte boundaries. */
+ asm_fprintf (asm_out_file,
+ "\t.c6xabi_attribute Tag_ABI_array_object_alignment, 0\n");
+ asm_fprintf (asm_out_file,
+ "\t.c6xabi_attribute Tag_ABI_array_object_align_expected, 0\n");
+
+ /* Stack alignment is 8 bytes. */
+ asm_fprintf (asm_out_file,
+ "\t.c6xabi_attribute Tag_ABI_stack_align_needed, 0\n");
+ asm_fprintf (asm_out_file,
+ "\t.c6xabi_attribute Tag_ABI_stack_align_preserved, 0\n");
+
+#if 0 /* FIXME: Reenable when TI's tools are fixed. */
+ /* ??? Ideally we'd check flag_short_wchar somehow. */
+ asm_fprintf (asm_out_file, "\t.c6xabi_attribute Tag_ABI_wchar_t, %d\n", 2);
+#endif
+
+ /* We conform to version 1.0 of the ABI. */
+ asm_fprintf (asm_out_file,
+ "\t.c6xabi_attribute Tag_ABI_conformance, \"1.0\"\n");
+
+}
+
+/* The LTO frontend only enables exceptions when it sees a function that
+ uses it. This changes the return value of dwarf2out_do_frame, so we
+ have to check before every function. */
+
+void
+c6x_output_file_unwind (FILE * f)
+{
+ if (done_cfi_sections)
+ return;
+
+ /* Output a .cfi_sections directive if we aren't
+ already doing so for debug info. */
+ if (write_symbols != DWARF2_DEBUG && write_symbols != VMS_AND_DWARF2_DEBUG
+ && dwarf2out_do_frame ())
+ {
+ asm_fprintf (f, "\t.cfi_sections .c6xabi.exidx\n");
+ done_cfi_sections = true;
+ }
+}
+
+/* Output unwind directives at the end of a function. */
+
+static void
+c6x_output_fn_unwind (FILE * f)
+{
+ /* Return immediately if we are not generating unwinding tables. */
+ if (! (flag_unwind_tables || flag_exceptions))
+ return;
+
+ /* If this function will never be unwound, then mark it as such. */
+ if (!(flag_unwind_tables || crtl->uses_eh_lsda)
+ && (TREE_NOTHROW (current_function_decl)
+ || crtl->all_throwers_are_sibcalls))
+ fputs("\t.cantunwind\n", f);
+
+ fputs ("\t.endp\n", f);
+}
+
+\f
+/* Stack and Calling. */
+
+int argument_registers[10] =
+{
+ REG_A4, REG_B4,
+ REG_A6, REG_B6,
+ REG_A8, REG_B8,
+ REG_A10, REG_B10,
+ REG_A12, REG_B12
+};
+
+/* Implements the macro INIT_CUMULATIVE_ARGS defined in c6x.h. */
+
+void
+c6x_init_cumulative_args (CUMULATIVE_ARGS *cum, const_tree fntype, rtx libname,
+ int n_named_args ATTRIBUTE_UNUSED)
+{
+ cum->count = 0;
+ cum->nregs = 10;
+ if (!libname && fntype)
+ {
+ /* We need to find out the number of named arguments. Unfortunately,
+ for incoming arguments, N_NAMED_ARGS is set to -1. */
+ if (stdarg_p (fntype))
+ cum->nregs = type_num_arguments (fntype) - 1;
+ if (cum->nregs > 10)
+ cum->nregs = 10;
+ }
+}
+
+/* Implements the macro FUNCTION_ARG defined in c6x.h. */
+
+static rtx
+c6x_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ if (cum->count >= cum->nregs)
+ return NULL_RTX;
+ if (type)
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ if (TARGET_BIG_ENDIAN && AGGREGATE_TYPE_P (type))
+ {
+ if (size > 4)
+ {
+ rtx reg1 = gen_rtx_REG (SImode, argument_registers[cum->count] + 1);
+ rtx reg2 = gen_rtx_REG (SImode, argument_registers[cum->count]);
+ rtvec vec = gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode, reg1, const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode, reg2, GEN_INT (4)));
+ return gen_rtx_PARALLEL (mode, vec);
+ }
+ }
+ }
+ return gen_rtx_REG (mode, argument_registers[cum->count]);
+}
+
+static void
+c6x_function_arg_advance (cumulative_args_t cum_v,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ cum->count++;
+}
+
+
+/* Return true if BLOCK_REG_PADDING (MODE, TYPE, FIRST) should return
+ upward rather than downward. */
+
+bool
+c6x_block_reg_pad_upward (enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type, bool first)
+{
+ HOST_WIDE_INT size;
+
+ if (!TARGET_BIG_ENDIAN)
+ return true;
+ if (!first)
+ return true;
+ if (!type)
+ return true;
+ size = int_size_in_bytes (type);
+ return size == 3;
+}
+
+/* Implement TARGET_FUNCTION_ARG_BOUNDARY. */
+
+static unsigned int
+c6x_function_arg_boundary (enum machine_mode mode, const_tree type)
+{
+ unsigned int boundary = type ? TYPE_ALIGN (type) : GET_MODE_BITSIZE (mode);
+
+ if (boundary > BITS_PER_WORD)
+ return 2 * BITS_PER_WORD;
+
+ if (mode == BLKmode)
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ if (size > 4)
+ return 2 * BITS_PER_WORD;
+ if (boundary < BITS_PER_WORD)
+ {
+ if (size >= 3)
+ return BITS_PER_WORD;
+ if (size >= 2)
+ return 2 * BITS_PER_UNIT;
+ }
+ }
+ return boundary;
+}
+
+/* Implement TARGET_FUNCTION_ARG_ROUND_BOUNDARY. */
+static unsigned int
+c6x_function_arg_round_boundary (enum machine_mode mode, const_tree type)
+{
+ return c6x_function_arg_boundary (mode, type);
+}
+
+/* TARGET_FUNCTION_VALUE implementation. Returns an RTX representing the place
+ where function FUNC returns or receives a value of data type TYPE. */
+
+static rtx
+c6x_function_value (const_tree type, const_tree func ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ /* Functions return values in register A4. When returning aggregates, we may
+ have to adjust for endianness. */
+ if (TARGET_BIG_ENDIAN && type && AGGREGATE_TYPE_P (type))
+ {
+ HOST_WIDE_INT size = int_size_in_bytes (type);
+ if (size > 4)
+ {
+
+ rtx reg1 = gen_rtx_REG (SImode, REG_A4 + 1);
+ rtx reg2 = gen_rtx_REG (SImode, REG_A4);
+ rtvec vec = gen_rtvec (2, gen_rtx_EXPR_LIST (VOIDmode, reg1, const0_rtx),
+ gen_rtx_EXPR_LIST (VOIDmode, reg2, GEN_INT (4)));
+ return gen_rtx_PARALLEL (TYPE_MODE (type), vec);
+ }
+ }
+ return gen_rtx_REG (TYPE_MODE (type), REG_A4);
+}
+
+/* Implement TARGET_LIBCALL_VALUE. */
+
+static rtx
+c6x_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (mode, REG_A4);
+}
+
+/* TARGET_STRUCT_VALUE_RTX implementation. */
+
+static rtx
+c6x_struct_value_rtx (tree type ATTRIBUTE_UNUSED, int incoming ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (Pmode, REG_A3);
+}
+
+/* Implement TARGET_FUNCTION_VALUE_REGNO_P. */
+
+static bool
+c6x_function_value_regno_p (const unsigned int regno)
+{
+ return regno == REG_A4;
+}
+
+/* Types larger than 64 bit, and variable sized types, are passed by
+ reference. The callee must copy them; see c6x_callee_copies. */
+
+static bool
+c6x_pass_by_reference (cumulative_args_t cum_v ATTRIBUTE_UNUSED,
+ enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ int size = -1;
+ if (type)
+ size = int_size_in_bytes (type);
+ else if (mode != VOIDmode)
+ size = GET_MODE_SIZE (mode);
+ return size > 2 * UNITS_PER_WORD || size == -1;
+}
+
+/* Decide whether a type should be returned in memory (true)
+ or in a register (false). This is called by the macro
+ TARGET_RETURN_IN_MEMORY. */
+
+static bool
+c6x_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ int size = int_size_in_bytes (type);
+ return size > 2 * UNITS_PER_WORD || size == -1;
+}
+
+/* Values which must be returned in the most-significant end of the return
+ register. */
+
+static bool
+c6x_return_in_msb (const_tree valtype)
+{
+ HOST_WIDE_INT size = int_size_in_bytes (valtype);
+ return TARGET_BIG_ENDIAN && AGGREGATE_TYPE_P (valtype) && size == 3;
+}
+
+/* Implement TARGET_CALLEE_COPIES. */
+
+static bool
+c6x_callee_copies (cumulative_args_t cum_v ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ const_tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ return true;
+}
+
+/* Return the type to use as __builtin_va_list. */
+static tree
+c6x_build_builtin_va_list (void)
+{
+ return build_pointer_type (char_type_node);
+}
+\f
+static void
+c6x_asm_trampoline_template (FILE *f)
+{
+ fprintf (f, "\t.long\t0x0000002b\n"); /* mvkl .s2 fnlow,B0 */
+ fprintf (f, "\t.long\t0x01000028\n"); /* || mvkl .s1 sclow,A2 */
+ fprintf (f, "\t.long\t0x0000006b\n"); /* mvkh .s2 fnhigh,B0 */
+ fprintf (f, "\t.long\t0x01000068\n"); /* || mvkh .s1 schigh,A2 */
+ fprintf (f, "\t.long\t0x00000362\n"); /* b .s2 B0 */
+ fprintf (f, "\t.long\t0x00008000\n"); /* nop 5 */
+ fprintf (f, "\t.long\t0x00000000\n"); /* nop */
+ fprintf (f, "\t.long\t0x00000000\n"); /* nop */
+}
+
+/* Emit RTL insns to initialize the variable parts of a trampoline at
+ TRAMP. FNADDR is an RTX for the address of the function's pure
+ code. CXT is an RTX for the static chain value for the function. */
+
+static void
+c6x_initialize_trampoline (rtx tramp, tree fndecl, rtx cxt)
+{
+ rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
+ rtx t1 = copy_to_reg (fnaddr);
+ rtx t2 = copy_to_reg (cxt);
+ rtx mask = gen_reg_rtx (SImode);
+ int i;
+
+ emit_block_move (tramp, assemble_trampoline_template (),
+ GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
+
+ emit_move_insn (mask, GEN_INT (0xffff << 7));
+
+ for (i = 0; i < 4; i++)
+ {
+ rtx mem = adjust_address (tramp, SImode, i * 4);
+ rtx t = (i & 1) ? t2 : t1;
+ rtx v1 = gen_reg_rtx (SImode);
+ rtx v2 = gen_reg_rtx (SImode);
+ emit_move_insn (v1, mem);
+ if (i < 2)
+ emit_insn (gen_ashlsi3 (v2, t, GEN_INT (7)));
+ else
+ emit_insn (gen_lshrsi3 (v2, t, GEN_INT (9)));
+ emit_insn (gen_andsi3 (v2, v2, mask));
+ emit_insn (gen_iorsi3 (v2, v2, v1));
+ emit_move_insn (mem, v2);
+ }
+#ifdef CLEAR_INSN_CACHE
+ tramp = XEXP (tramp, 0);
+ emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__gnu_clear_cache"),
+ LCT_NORMAL, VOIDmode, 2, tramp, Pmode,
+ plus_constant (tramp, TRAMPOLINE_SIZE), Pmode);
+#endif
+}
+\f
+/* Determine whether c6x_output_mi_thunk can succeed. */
+
+static bool
+c6x_can_output_mi_thunk (const_tree thunk ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT delta ATTRIBUTE_UNUSED,
+ HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
+ const_tree function ATTRIBUTE_UNUSED)
+{
+ return !TARGET_LONG_CALLS;
+}
+
+/* Output the assembler code for a thunk function. THUNK is the
+ declaration for the thunk function itself, FUNCTION is the decl for
+ the target function. DELTA is an immediate constant offset to be
+ added to THIS. If VCALL_OFFSET is nonzero, the word at
+ *(*this + vcall_offset) should be added to THIS. */
+
+static void
+c6x_output_mi_thunk (FILE *file ATTRIBUTE_UNUSED,
+ tree thunk ATTRIBUTE_UNUSED, HOST_WIDE_INT delta,
+ HOST_WIDE_INT vcall_offset, tree function)
+{
+ rtx xops[5];
+ /* The this parameter is passed as the first argument. */
+ rtx this_rtx = gen_rtx_REG (Pmode, REG_A4);
+
+ c6x_current_insn = NULL_RTX;
+
+ xops[4] = XEXP (DECL_RTL (function), 0);
+ if (!vcall_offset)
+ {
+ output_asm_insn ("b .s2 \t%4", xops);
+ if (!delta)
+ output_asm_insn ("nop 5", xops);
+ }
+
+ /* Adjust the this parameter by a fixed constant. */
+ if (delta)
+ {
+ xops[0] = GEN_INT (delta);
+ xops[1] = this_rtx;
+ if (delta >= -16 && delta <= 15)
+ {
+ output_asm_insn ("add .s1 %0, %1, %1", xops);
+ if (!vcall_offset)
+ output_asm_insn ("nop 4", xops);
+ }
+ else if (delta >= 16 && delta < 32)
+ {
+ output_asm_insn ("add .d1 %0, %1, %1", xops);
+ if (!vcall_offset)
+ output_asm_insn ("nop 4", xops);
+ }
+ else if (delta >= -32768 && delta < 32768)
+ {
+ output_asm_insn ("mvk .s1 %0, A0", xops);
+ output_asm_insn ("add .d1 %1, A0, %1", xops);
+ if (!vcall_offset)
+ output_asm_insn ("nop 3", xops);
+ }
+ else
+ {
+ output_asm_insn ("mvkl .s1 %0, A0", xops);
+ output_asm_insn ("mvkh .s1 %0, A0", xops);
+ output_asm_insn ("add .d1 %1, A0, %1", xops);
+ if (!vcall_offset)
+ output_asm_insn ("nop 3", xops);
+ }
+ }
+
+ /* Adjust the this parameter by a value stored in the vtable. */
+ if (vcall_offset)
+ {
+ rtx a0tmp = gen_rtx_REG (Pmode, REG_A0);
+ rtx a3tmp = gen_rtx_REG (Pmode, REG_A3);
+
+ xops[1] = a3tmp;
+ xops[2] = a0tmp;
+ xops[3] = gen_rtx_MEM (Pmode, a0tmp);
+ output_asm_insn ("mv .s1 a4, %2", xops);
+ output_asm_insn ("ldw .d1t1 %3, %2", xops);
+
+ /* Adjust the this parameter. */
+ xops[0] = gen_rtx_MEM (Pmode, plus_constant (a0tmp, vcall_offset));
+ if (!memory_operand (xops[0], Pmode))
+ {
+ rtx tmp2 = gen_rtx_REG (Pmode, REG_A1);
+ xops[0] = GEN_INT (vcall_offset);
+ xops[1] = tmp2;
+ output_asm_insn ("mvkl .s1 %0, %1", xops);
+ output_asm_insn ("mvkh .s1 %0, %1", xops);
+ output_asm_insn ("nop 2", xops);
+ output_asm_insn ("add .d1 %2, %1, %2", xops);
+ xops[0] = gen_rtx_MEM (Pmode, a0tmp);
+ }
+ else
+ output_asm_insn ("nop 4", xops);
+ xops[2] = this_rtx;
+ output_asm_insn ("ldw .d1t1 %0, %1", xops);
+ output_asm_insn ("|| b .s2 \t%4", xops);
+ output_asm_insn ("nop 4", xops);
+ output_asm_insn ("add .d1 %2, %1, %2", xops);
+ }
+}
+\f
+/* Return true if EXP goes in small data/bss. */
+
+static bool
+c6x_in_small_data_p (const_tree exp)
+{
+ /* We want to merge strings, so we never consider them small data. */
+ if (TREE_CODE (exp) == STRING_CST)
+ return false;
+
+ /* Functions are never small data. */
+ if (TREE_CODE (exp) == FUNCTION_DECL)
+ return false;
+
+ if (TREE_CODE (exp) == VAR_DECL && DECL_WEAK (exp))
+ return false;
+
+ if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp))
+ {
+ const char *section = TREE_STRING_POINTER (DECL_SECTION_NAME (exp));
+
+ if (strcmp (section, ".neardata") == 0
+ || strncmp (section, ".neardata.", 10) == 0
+ || strncmp (section, ".gnu.linkonce.s.", 16) == 0
+ || strcmp (section, ".bss") == 0
+ || strncmp (section, ".bss.", 5) == 0
+ || strncmp (section, ".gnu.linkonce.sb.", 17) == 0
+ || strcmp (section, ".rodata") == 0
+ || strncmp (section, ".rodata.", 8) == 0
+ || strncmp (section, ".gnu.linkonce.s2.", 17) == 0)
+ return true;
+ }
+ else
+ return PLACE_IN_SDATA_P (exp);
+
+ return false;
+}
+
+/* Return a section for X. The only special thing we do here is to
+ honor small data. We don't have a tree type, so we can't use the
+ PLACE_IN_SDATA_P macro we use everywhere else; we choose to place
+ everything sized 8 bytes or smaller into small data. */
+
+static section *
+c6x_select_rtx_section (enum machine_mode mode, rtx x,
+ unsigned HOST_WIDE_INT align)
+{
+ if (c6x_sdata_mode == C6X_SDATA_ALL
+ || (c6x_sdata_mode != C6X_SDATA_NONE && GET_MODE_SIZE (mode) <= 8))
+ /* ??? Consider using mergeable sdata sections. */
+ return sdata_section;
+ else
+ return default_elf_select_rtx_section (mode, x, align);
+}
+
+static section *
+c6x_elf_select_section (tree decl, int reloc,
+ unsigned HOST_WIDE_INT align)
+{
+ const char *sname = NULL;
+ unsigned int flags = SECTION_WRITE;
+ if (c6x_in_small_data_p (decl))
+ {
+ switch (categorize_decl_for_section (decl, reloc))
+ {
+ case SECCAT_SRODATA:
+ sname = ".rodata";
+ flags = 0;
+ break;
+ case SECCAT_SDATA:
+ sname = ".neardata";
+ break;
+ case SECCAT_SBSS:
+ sname = ".bss";
+ flags |= SECTION_BSS;
+ default:
+ break;
+ }
+ }
+ else
+ {
+ switch (categorize_decl_for_section (decl, reloc))
+ {
+ case SECCAT_DATA:
+ sname = ".fardata";
+ break;
+ case SECCAT_DATA_REL:
+ sname = ".fardata.rel";
+ break;
+ case SECCAT_DATA_REL_LOCAL:
+ sname = ".fardata.rel.local";
+ break;
+ case SECCAT_DATA_REL_RO:
+ sname = ".fardata.rel.ro";
+ break;
+ case SECCAT_DATA_REL_RO_LOCAL:
+ sname = ".fardata.rel.ro.local";
+ break;
+ case SECCAT_BSS:
+ sname = ".far";
+ flags |= SECTION_BSS;
+ break;
+ case SECCAT_RODATA:
+ sname = ".const";
+ flags = 0;
+ break;
+ case SECCAT_SRODATA:
+ case SECCAT_SDATA:
+ case SECCAT_SBSS:
+ gcc_unreachable ();
+ default:
+ break;
+ }
+ }
+ if (sname)
+ {
+ /* We might get called with string constants, but get_named_section
+ doesn't like them as they are not DECLs. Also, we need to set
+ flags in that case. */
+ if (!DECL_P (decl))
+ return get_section (sname, flags, NULL);
+ return get_named_section (decl, sname, reloc);
+ }
+
+ return default_elf_select_section (decl, reloc, align);
+}
+
+/* Build up a unique section name, expressed as a
+ STRING_CST node, and assign it to DECL_SECTION_NAME (decl).
+ RELOC indicates whether the initial value of EXP requires
+ link-time relocations. */
+
+static void ATTRIBUTE_UNUSED
+c6x_elf_unique_section (tree decl, int reloc)
+{
+ const char *prefix = NULL;
+ /* We only need to use .gnu.linkonce if we don't have COMDAT groups. */
+ bool one_only = DECL_ONE_ONLY (decl) && !HAVE_COMDAT_GROUP;
+
+ if (c6x_in_small_data_p (decl))
+ {
+ switch (categorize_decl_for_section (decl, reloc))
+ {
+ case SECCAT_SDATA:
+ prefix = one_only ? ".s" : ".neardata";
+ break;
+ case SECCAT_SBSS:
+ prefix = one_only ? ".sb" : ".bss";
+ break;
+ case SECCAT_SRODATA:
+ prefix = one_only ? ".s2" : ".rodata";
+ break;
+ case SECCAT_RODATA_MERGE_STR:
+ case SECCAT_RODATA_MERGE_STR_INIT:
+ case SECCAT_RODATA_MERGE_CONST:
+ case SECCAT_RODATA:
+ case SECCAT_DATA:
+ case SECCAT_DATA_REL:
+ case SECCAT_DATA_REL_LOCAL:
+ case SECCAT_DATA_REL_RO:
+ case SECCAT_DATA_REL_RO_LOCAL:
+ gcc_unreachable ();
+ default:
+ /* Everything else we place into default sections and hope for the
+ best. */
+ break;
+ }
+ }
+ else
+ {
+ switch (categorize_decl_for_section (decl, reloc))
+ {
+ case SECCAT_DATA:
+ case SECCAT_DATA_REL:
+ case SECCAT_DATA_REL_LOCAL:
+ case SECCAT_DATA_REL_RO:
+ case SECCAT_DATA_REL_RO_LOCAL:
+ prefix = one_only ? ".fd" : ".fardata";
+ break;
+ case SECCAT_BSS:
+ prefix = one_only ? ".fb" : ".far";
+ break;
+ case SECCAT_RODATA:
+ case SECCAT_RODATA_MERGE_STR:
+ case SECCAT_RODATA_MERGE_STR_INIT:
+ case SECCAT_RODATA_MERGE_CONST:
+ prefix = one_only ? ".fr" : ".const";
+ break;
+ case SECCAT_SRODATA:
+ case SECCAT_SDATA:
+ case SECCAT_SBSS:
+ gcc_unreachable ();
+ default:
+ break;
+ }
+ }
+
+ if (prefix)
+ {
+ const char *name, *linkonce;
+ char *string;
+
+ name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
+ name = targetm.strip_name_encoding (name);
+
+ /* If we're using one_only, then there needs to be a .gnu.linkonce
+ prefix to the section name. */
+ linkonce = one_only ? ".gnu.linkonce" : "";
+
+ string = ACONCAT ((linkonce, prefix, ".", name, NULL));
+
+ DECL_SECTION_NAME (decl) = build_string (strlen (string), string);
+ return;
+ }
+ default_unique_section (decl, reloc);
+}
+
+static unsigned int
+c6x_section_type_flags (tree decl, const char *name, int reloc)
+{
+ unsigned int flags = 0;
+
+ if (strcmp (name, ".far") == 0
+ || strncmp (name, ".far.", 5) == 0)
+ flags |= SECTION_BSS;
+
+ flags |= default_section_type_flags (decl, name, reloc);
+
+ return flags;
+}
+\f
+/* Checks whether the given CALL_EXPR would use a caller saved
+ register. This is used to decide whether sibling call optimization
+ could be performed on the respective function call. */
+
+static bool
+c6x_call_saved_register_used (tree call_expr)
+{
+ CUMULATIVE_ARGS cum_v;
+ cumulative_args_t cum;
+ HARD_REG_SET call_saved_regset;
+ tree parameter;
+ enum machine_mode mode;
+ tree type;
+ rtx parm_rtx;
+ int i;
+
+ INIT_CUMULATIVE_ARGS (cum_v, NULL, NULL, 0, 0);
+ cum = pack_cumulative_args (&cum_v);
+
+ COMPL_HARD_REG_SET (call_saved_regset, call_used_reg_set);
+ for (i = 0; i < call_expr_nargs (call_expr); i++)
+ {
+ parameter = CALL_EXPR_ARG (call_expr, i);
+ gcc_assert (parameter);
+
+ /* For an undeclared variable passed as parameter we will get
+ an ERROR_MARK node here. */
+ if (TREE_CODE (parameter) == ERROR_MARK)
+ return true;
+
+ type = TREE_TYPE (parameter);
+ gcc_assert (type);
+
+ mode = TYPE_MODE (type);
+ gcc_assert (mode);
+
+ if (pass_by_reference (&cum_v, mode, type, true))
+ {
+ mode = Pmode;
+ type = build_pointer_type (type);
+ }
+
+ parm_rtx = c6x_function_arg (cum, mode, type, 0);
+
+ c6x_function_arg_advance (cum, mode, type, 0);
+
+ if (!parm_rtx)
+ continue;
+
+ if (REG_P (parm_rtx)
+ && overlaps_hard_reg_set_p (call_saved_regset, GET_MODE (parm_rtx),
+ REGNO (parm_rtx)))
+ return true;
+ if (GET_CODE (parm_rtx) == PARALLEL)
+ {
+ int n = XVECLEN (parm_rtx, 0);
+ while (n-- > 0)
+ {
+ rtx x = XEXP (XVECEXP (parm_rtx, 0, n), 0);
+ if (REG_P (x)
+ && overlaps_hard_reg_set_p (call_saved_regset,
+ GET_MODE (x), REGNO (x)))
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+/* Decide whether we can make a sibling call to a function. DECL is the
+ declaration of the function being targeted by the call and EXP is the
+ CALL_EXPR representing the call. */
+
+static bool
+c6x_function_ok_for_sibcall (tree decl, tree exp)
+{
+ /* Registers A10, A12, B10 and B12 are available as arguments
+ register but unfortunately caller saved. This makes functions
+ needing these registers for arguments not suitable for
+ sibcalls. */
+ if (c6x_call_saved_register_used (exp))
+ return false;
+
+ if (!flag_pic)
+ return true;
+
+ if (TARGET_DSBT)
+ {
+ /* When compiling for DSBT, the calling function must be local,
+ so that when we reload B14 in the sibcall epilogue, it will
+ not change its value. */
+ struct cgraph_local_info *this_func;
+
+ if (!decl)
+ /* Not enough information. */
+ return false;
+
+ this_func = cgraph_local_info (current_function_decl);
+ return this_func->local;
+ }
+
+ return true;
+}
+
+/* Return true if DECL is known to be linked into section SECTION. */
+
+static bool
+c6x_function_in_section_p (tree decl, section *section)
+{
+ /* We can only be certain about functions defined in the same
+ compilation unit. */
+ if (!TREE_STATIC (decl))
+ return false;
+
+ /* Make sure that SYMBOL always binds to the definition in this
+ compilation unit. */
+ if (!targetm.binds_local_p (decl))
+ return false;
+
+ /* If DECL_SECTION_NAME is set, assume it is trustworthy. */
+ if (!DECL_SECTION_NAME (decl))
+ {
+ /* Make sure that we will not create a unique section for DECL. */
+ if (flag_function_sections || DECL_ONE_ONLY (decl))
+ return false;
+ }
+
+ return function_section (decl) == section;
+}
+
+/* Return true if a call to OP, which is a SYMBOL_REF, must be expanded
+ as a long call. */
+bool
+c6x_long_call_p (rtx op)
+{
+ tree decl;
+
+ if (!TARGET_LONG_CALLS)
+ return false;
+
+ decl = SYMBOL_REF_DECL (op);
+
+ /* Try to determine whether the symbol is in the same section as the current
+ function. Be conservative, and only cater for cases in which the
+ whole of the current function is placed in the same section. */
+ if (decl != NULL_TREE
+ && !flag_reorder_blocks_and_partition
+ && TREE_CODE (decl) == FUNCTION_DECL
+ && c6x_function_in_section_p (decl, current_function_section ()))
+ return false;
+
+ return true;
+}
+
+/* Emit the sequence for a call. */
+void
+c6x_expand_call (rtx retval, rtx address, bool sibcall)
+{
+ rtx callee = XEXP (address, 0);
+ rtx call_insn;
+
+ if (!c6x_call_operand (callee, Pmode))
+ {
+ callee = force_reg (Pmode, callee);
+ address = change_address (address, Pmode, callee);
+ }
+ call_insn = gen_rtx_CALL (VOIDmode, address, const0_rtx);
+ if (sibcall)
+ {
+ call_insn = emit_call_insn (call_insn);
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
+ gen_rtx_REG (Pmode, REG_B3));
+ }
+ else
+ {
+ if (retval == NULL_RTX)
+ call_insn = emit_call_insn (call_insn);
+ else
+ call_insn = emit_call_insn (gen_rtx_SET (GET_MODE (retval), retval,
+ call_insn));
+ }
+ if (flag_pic)
+ use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), pic_offset_table_rtx);
+}
+
+/* Legitimize PIC addresses. If the address is already position-independent,
+ we return ORIG. Newly generated position-independent addresses go into a
+ reg. This is REG if nonzero, otherwise we allocate register(s) as
+ necessary. PICREG is the register holding the pointer to the PIC offset
+ table. */
+
+static rtx
+legitimize_pic_address (rtx orig, rtx reg, rtx picreg)
+{
+ rtx addr = orig;
+ rtx new_rtx = orig;
+
+ if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == LABEL_REF)
+ {
+ int unspec = UNSPEC_LOAD_GOT;
+ rtx tmp;
+
+ if (reg == 0)
+ {
+ gcc_assert (can_create_pseudo_p ());
+ reg = gen_reg_rtx (Pmode);
+ }
+ if (flag_pic == 2)
+ {
+ if (can_create_pseudo_p ())
+ tmp = gen_reg_rtx (Pmode);
+ else
+ tmp = reg;
+ emit_insn (gen_movsi_gotoff_high (tmp, addr));
+ emit_insn (gen_movsi_gotoff_lo_sum (tmp, tmp, addr));
+ emit_insn (gen_load_got_gotoff (reg, picreg, tmp));
+ }
+ else
+ {
+ tmp = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr), unspec);
+ new_rtx = gen_const_mem (Pmode, gen_rtx_PLUS (Pmode, picreg, tmp));
+
+ emit_move_insn (reg, new_rtx);
+ }
+ if (picreg == pic_offset_table_rtx)
+ crtl->uses_pic_offset_table = 1;
+ return reg;
+ }
+
+ else if (GET_CODE (addr) == CONST || GET_CODE (addr) == PLUS)
+ {
+ rtx base;
+
+ if (GET_CODE (addr) == CONST)
+ {
+ addr = XEXP (addr, 0);
+ gcc_assert (GET_CODE (addr) == PLUS);
+ }
+
+ if (XEXP (addr, 0) == picreg)
+ return orig;
+
+ if (reg == 0)
+ {
+ gcc_assert (can_create_pseudo_p ());
+ reg = gen_reg_rtx (Pmode);
+ }
+
+ base = legitimize_pic_address (XEXP (addr, 0), reg, picreg);
+ addr = legitimize_pic_address (XEXP (addr, 1),
+ base == reg ? NULL_RTX : reg,
+ picreg);
+
+ if (GET_CODE (addr) == CONST_INT)
+ {
+ gcc_assert (! reload_in_progress && ! reload_completed);
+ addr = force_reg (Pmode, addr);
+ }
+
+ if (GET_CODE (addr) == PLUS && CONSTANT_P (XEXP (addr, 1)))
+ {
+ base = gen_rtx_PLUS (Pmode, base, XEXP (addr, 0));
+ addr = XEXP (addr, 1);
+ }
+
+ return gen_rtx_PLUS (Pmode, base, addr);
+ }
+
+ return new_rtx;
+}
+
+/* Expand a move operation in mode MODE. The operands are in OPERANDS.
+ Returns true if no further code must be generated, false if the caller
+ should generate an insn to move OPERANDS[1] to OPERANDS[0]. */
+
+bool
+expand_move (rtx *operands, enum machine_mode mode)
+{
+ rtx dest = operands[0];
+ rtx op = operands[1];
+
+ if ((reload_in_progress | reload_completed) == 0
+ && GET_CODE (dest) == MEM && GET_CODE (op) != REG)
+ operands[1] = force_reg (mode, op);
+ else if (mode == SImode && symbolic_operand (op, SImode))
+ {
+ if (flag_pic)
+ {
+ if (sdata_symbolic_operand (op, SImode))
+ {
+ emit_insn (gen_load_sdata_pic (dest, pic_offset_table_rtx, op));
+ crtl->uses_pic_offset_table = 1;
+ return true;
+ }
+ else
+ {
+ rtx temp = (reload_completed || reload_in_progress
+ ? dest : gen_reg_rtx (Pmode));
+
+ operands[1] = legitimize_pic_address (op, temp,
+ pic_offset_table_rtx);
+ }
+ }
+ else if (reload_completed
+ && !sdata_symbolic_operand (op, SImode))
+ {
+ emit_insn (gen_movsi_high (dest, op));
+ emit_insn (gen_movsi_lo_sum (dest, dest, op));
+ return true;
+ }
+ }
+ return false;
+}
+
+/* This function is called when we're about to expand an integer compare
+ operation which performs COMPARISON. It examines the second operand,
+ and if it is an integer constant that cannot be used directly on the
+ current machine in a comparison insn, it returns true. */
+bool
+c6x_force_op_for_comparison_p (enum rtx_code code, rtx op)
+{
+ if (!CONST_INT_P (op) || satisfies_constraint_Iu4 (op))
+ return false;
+
+ if ((code == EQ || code == LT || code == GT)
+ && !satisfies_constraint_Is5 (op))
+ return true;
+ if ((code == GTU || code == LTU)
+ && (!TARGET_INSNS_64 || !satisfies_constraint_Iu5 (op)))
+ return true;
+
+ return false;
+}
+
+/* Emit comparison instruction if necessary, returning the expression
+ that holds the compare result in the proper mode. Return the comparison
+ that should be used in the jump insn. */
+
+rtx
+c6x_expand_compare (rtx comparison, enum machine_mode mode)
+{
+ enum rtx_code code = GET_CODE (comparison);
+ rtx op0 = XEXP (comparison, 0);
+ rtx op1 = XEXP (comparison, 1);
+ rtx cmp;
+ enum rtx_code jump_code = code;
+ enum machine_mode op_mode = GET_MODE (op0);
+
+ if (op_mode == DImode && (code == NE || code == EQ) && op1 == const0_rtx)
+ {
+ rtx t = gen_reg_rtx (SImode);
+ emit_insn (gen_iorsi3 (t, gen_lowpart (SImode, op0),
+ gen_highpart (SImode, op0)));
+ op_mode = SImode;
+ cmp = t;
+ }
+ else if (op_mode == DImode)
+ {
+ rtx lo[2], high[2];
+ rtx cmp1, cmp2;
+
+ if (code == NE || code == GEU || code == LEU || code == GE || code == LE)
+ {
+ code = reverse_condition (code);
+ jump_code = EQ;
+ }
+ else
+ jump_code = NE;
+
+ split_di (&op0, 1, lo, high);
+ split_di (&op1, 1, lo + 1, high + 1);
+
+ if (c6x_force_op_for_comparison_p (code, high[1])
+ || c6x_force_op_for_comparison_p (EQ, high[1]))
+ high[1] = force_reg (SImode, high[1]);
+
+ cmp1 = gen_reg_rtx (SImode);
+ cmp2 = gen_reg_rtx (SImode);
+ emit_insn (gen_rtx_SET (VOIDmode, cmp1,
+ gen_rtx_fmt_ee (code, SImode, high[0], high[1])));
+ if (code == EQ)
+ {
+ if (c6x_force_op_for_comparison_p (code, lo[1]))
+ lo[1] = force_reg (SImode, lo[1]);
+ emit_insn (gen_rtx_SET (VOIDmode, cmp2,
+ gen_rtx_fmt_ee (code, SImode, lo[0], lo[1])));
+ emit_insn (gen_andsi3 (cmp1, cmp1, cmp2));
+ }
+ else
+ {
+ emit_insn (gen_rtx_SET (VOIDmode, cmp2,
+ gen_rtx_EQ (SImode, high[0], high[1])));
+ if (code == GT)
+ code = GTU;
+ else if (code == LT)
+ code = LTU;
+ if (c6x_force_op_for_comparison_p (code, lo[1]))
+ lo[1] = force_reg (SImode, lo[1]);
+ emit_insn (gen_cmpsi_and (cmp2, gen_rtx_fmt_ee (code, SImode,
+ lo[0], lo[1]),
+ lo[0], lo[1], cmp2));
+ emit_insn (gen_iorsi3 (cmp1, cmp1, cmp2));
+ }
+ cmp = cmp1;
+ }
+ else if (TARGET_FP && !flag_finite_math_only
+ && (op_mode == DFmode || op_mode == SFmode)
+ && code != EQ && code != NE && code != LT && code != GT
+ && code != UNLE && code != UNGE)
+ {
+ enum rtx_code code1, code2, code3;
+ rtx (*fn) (rtx, rtx, rtx, rtx, rtx);
+
+ jump_code = NE;
+ code3 = UNKNOWN;
+ switch (code)
+ {
+ case UNLT:
+ case UNGT:
+ jump_code = EQ;
+ /* fall through */
+ case LE:
+ case GE:
+ code1 = code == LE || code == UNGT ? LT : GT;
+ code2 = EQ;
+ break;
+
+ case UNORDERED:
+ jump_code = EQ;
+ /* fall through */
+ case ORDERED:
+ code3 = EQ;
+ /* fall through */
+ case LTGT:
+ code1 = LT;
+ code2 = GT;
+ break;
+
+ case UNEQ:
+ code1 = LT;
+ code2 = GT;
+ jump_code = EQ;
+ break;
+
+ default:
+ gcc_unreachable ();
+ }
+
+ cmp = gen_reg_rtx (SImode);
+ emit_insn (gen_rtx_SET (VOIDmode, cmp,
+ gen_rtx_fmt_ee (code1, SImode, op0, op1)));
+ fn = op_mode == DFmode ? gen_cmpdf_ior : gen_cmpsf_ior;
+ emit_insn (fn (cmp, gen_rtx_fmt_ee (code2, SImode, op0, op1),
+ op0, op1, cmp));
+ if (code3 != UNKNOWN)
+ emit_insn (fn (cmp, gen_rtx_fmt_ee (code3, SImode, op0, op1),
+ op0, op1, cmp));
+ }
+ else if (op_mode == SImode && (code == NE || code == EQ) && op1 == const0_rtx)
+ cmp = op0;
+ else
+ {
+ bool is_fp_libfunc;
+ is_fp_libfunc = !TARGET_FP && (op_mode == DFmode || op_mode == SFmode);
+
+ if ((code == NE || code == GEU || code == LEU || code == GE || code == LE)
+ && !is_fp_libfunc)
+ {
+ code = reverse_condition (code);
+ jump_code = EQ;
+ }
+ else if (code == UNGE)
+ {
+ code = LT;
+ jump_code = EQ;
+ }
+ else if (code == UNLE)
+ {
+ code = GT;
+ jump_code = EQ;
+ }
+ else
+ jump_code = NE;
+
+ if (is_fp_libfunc)
+ {
+ rtx insns;
+ rtx libfunc;
+ switch (code)
+ {
+ case EQ:
+ libfunc = op_mode == DFmode ? eqdf_libfunc : eqsf_libfunc;
+ break;
+ case NE:
+ libfunc = op_mode == DFmode ? nedf_libfunc : nesf_libfunc;
+ break;
+ case GT:
+ libfunc = op_mode == DFmode ? gtdf_libfunc : gtsf_libfunc;
+ break;
+ case GE:
+ libfunc = op_mode == DFmode ? gedf_libfunc : gesf_libfunc;
+ break;
+ case LT:
+ libfunc = op_mode == DFmode ? ltdf_libfunc : ltsf_libfunc;
+ break;
+ case LE:
+ libfunc = op_mode == DFmode ? ledf_libfunc : lesf_libfunc;
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ start_sequence ();
+
+ cmp = emit_library_call_value (libfunc, 0, LCT_CONST, SImode, 2,
+ op0, op_mode, op1, op_mode);
+ insns = get_insns ();
+ end_sequence ();
+
+ emit_libcall_block (insns, cmp, cmp,
+ gen_rtx_fmt_ee (code, SImode, op0, op1));
+ }
+ else
+ {
+ cmp = gen_reg_rtx (SImode);
+ if (c6x_force_op_for_comparison_p (code, op1))
+ op1 = force_reg (SImode, op1);
+ emit_insn (gen_rtx_SET (VOIDmode, cmp,
+ gen_rtx_fmt_ee (code, SImode, op0, op1)));
+ }
+ }
+
+ return gen_rtx_fmt_ee (jump_code, mode, cmp, const0_rtx);
+}
+
+/* Return one word of double-word value OP. HIGH_P is true to select the
+ high part, false to select the low part. When encountering auto-increment
+ addressing, we make the assumption that the low part is going to be accessed
+ first. */
+
+rtx
+c6x_subword (rtx op, bool high_p)
+{
+ unsigned int byte;
+ enum machine_mode mode;
+
+ mode = GET_MODE (op);
+ if (mode == VOIDmode)
+ mode = DImode;
+
+ if (TARGET_BIG_ENDIAN ? !high_p : high_p)
+ byte = UNITS_PER_WORD;
+ else
+ byte = 0;
+
+ if (MEM_P (op))
+ {
+ rtx addr = XEXP (op, 0);
+ if (GET_CODE (addr) == PLUS || REG_P (addr))
+ return adjust_address (op, word_mode, byte);
+ /* FIXME: should really support autoincrement addressing for
+ multi-word modes. */
+ gcc_unreachable ();
+ }
+
+ return simplify_gen_subreg (word_mode, op, mode, byte);
+}
+
+/* Split one or more DImode RTL references into pairs of SImode
+ references. The RTL can be REG, offsettable MEM, integer constant, or
+ CONST_DOUBLE. "operands" is a pointer to an array of DImode RTL to
+ split and "num" is its length. lo_half and hi_half are output arrays
+ that parallel "operands". */
+
+void
+split_di (rtx operands[], int num, rtx lo_half[], rtx hi_half[])
+{
+ while (num--)
+ {
+ rtx op = operands[num];
+
+ lo_half[num] = c6x_subword (op, false);
+ hi_half[num] = c6x_subword (op, true);
+ }
+}
+
+/* Return true if VAL is a mask valid for a clr instruction. */
+bool
+c6x_valid_mask_p (HOST_WIDE_INT val)
+{
+ int i;
+ for (i = 0; i < 32; i++)
+ if (!(val & ((unsigned HOST_WIDE_INT)1 << i)))
+ break;
+ for (; i < 32; i++)
+ if (val & ((unsigned HOST_WIDE_INT)1 << i))
+ break;
+ for (; i < 32; i++)
+ if (!(val & ((unsigned HOST_WIDE_INT)1 << i)))
+ return false;
+ return true;
+}
+
+/* Expand a block move for a movmemM pattern. */
+
+bool
+c6x_expand_movmem (rtx dst, rtx src, rtx count_exp, rtx align_exp,
+ rtx expected_align_exp ATTRIBUTE_UNUSED,
+ rtx expected_size_exp ATTRIBUTE_UNUSED)
+{
+ unsigned HOST_WIDE_INT align = 1;
+ unsigned HOST_WIDE_INT src_mem_align, dst_mem_align, min_mem_align;
+ unsigned HOST_WIDE_INT count = 0, offset = 0;
+ unsigned int biggest_move = TARGET_STDW ? 8 : 4;
+
+ if (CONST_INT_P (align_exp))
+ align = INTVAL (align_exp);
+
+ src_mem_align = MEM_ALIGN (src) / BITS_PER_UNIT;
+ dst_mem_align = MEM_ALIGN (dst) / BITS_PER_UNIT;
+ min_mem_align = MIN (src_mem_align, dst_mem_align);
+
+ if (min_mem_align > align)
+ align = min_mem_align / BITS_PER_UNIT;
+ if (src_mem_align < align)
+ src_mem_align = align;
+ if (dst_mem_align < align)
+ dst_mem_align = align;
+
+ if (CONST_INT_P (count_exp))
+ count = INTVAL (count_exp);
+ else
+ return false;
+
+ /* Make sure we don't need to care about overflow later on. */
+ if (count > ((unsigned HOST_WIDE_INT) 1 << 30))
+ return false;
+
+ if (count >= 28 && (count & 3) == 0 && align >= 4)
+ {
+ tree dst_expr = MEM_EXPR (dst);
+ tree src_expr = MEM_EXPR (src);
+ rtx fn = TARGET_INSNS_64PLUS ? strasgi64p_libfunc : strasgi_libfunc;
+ rtx srcreg = force_reg (Pmode, XEXP (src, 0));
+ rtx dstreg = force_reg (Pmode, XEXP (dst, 0));
+
+ if (src_expr)
+ mark_addressable (src_expr);
+ if (dst_expr)
+ mark_addressable (dst_expr);
+ emit_library_call (fn, LCT_NORMAL, VOIDmode, 3,
+ dstreg, Pmode, srcreg, Pmode, count_exp, SImode);
+ return true;
+ }
+
+ if (biggest_move > align && !TARGET_INSNS_64)
+ biggest_move = align;
+
+ if (count / biggest_move > 7)
+ return false;
+
+ while (count > 0)
+ {
+ rtx reg, reg_lowpart;
+ enum machine_mode srcmode, dstmode;
+ unsigned HOST_WIDE_INT src_size, dst_size, src_left;
+ int shift;
+ rtx srcmem, dstmem;
+
+ while (biggest_move > count)
+ biggest_move /= 2;
+
+ src_size = dst_size = biggest_move;
+ if (src_size > src_mem_align && src_size == 2)
+ src_size = 1;
+ if (dst_size > dst_mem_align && dst_size == 2)
+ dst_size = 1;
+
+ if (dst_size > src_size)
+ dst_size = src_size;
+
+ srcmode = mode_for_size (src_size * BITS_PER_UNIT, MODE_INT, 0);
+ dstmode = mode_for_size (dst_size * BITS_PER_UNIT, MODE_INT, 0);
+ if (src_size >= 4)
+ reg_lowpart = reg = gen_reg_rtx (srcmode);
+ else
+ {
+ reg = gen_reg_rtx (SImode);
+ reg_lowpart = gen_lowpart (srcmode, reg);
+ }
+
+ srcmem = adjust_address (copy_rtx (src), srcmode, offset);
+
+ if (src_size > src_mem_align)
+ {
+ enum insn_code icode = (srcmode == SImode ? CODE_FOR_movmisalignsi
+ : CODE_FOR_movmisaligndi);
+ emit_insn (GEN_FCN (icode) (reg_lowpart, srcmem));
+ }
+ else
+ emit_move_insn (reg_lowpart, srcmem);
+
+ src_left = src_size;
+ shift = TARGET_BIG_ENDIAN ? (src_size - dst_size) * BITS_PER_UNIT : 0;
+ while (src_left > 0)
+ {
+ rtx dstreg = reg_lowpart;
+
+ if (src_size > dst_size)
+ {
+ rtx srcword = reg;
+ int shift_amount = shift & (BITS_PER_WORD - 1);
+ if (src_size > 4)
+ srcword = operand_subword_force (srcword, src_left >= 4 ? 0 : 4,
+ SImode);
+ if (shift_amount > 0)
+ {
+ dstreg = gen_reg_rtx (SImode);
+ emit_insn (gen_lshrsi3 (dstreg, srcword,
+ GEN_INT (shift_amount)));
+ }
+ else
+ dstreg = srcword;
+ dstreg = gen_lowpart (dstmode, dstreg);
+ }
+
+ dstmem = adjust_address (copy_rtx (dst), dstmode, offset);
+ if (dst_size > dst_mem_align)
+ {
+ enum insn_code icode = (dstmode == SImode ? CODE_FOR_movmisalignsi
+ : CODE_FOR_movmisaligndi);
+ emit_insn (GEN_FCN (icode) (dstmem, dstreg));
+ }
+ else
+ emit_move_insn (dstmem, dstreg);
+
+ if (TARGET_BIG_ENDIAN)
+ shift -= dst_size * BITS_PER_UNIT;
+ else
+ shift += dst_size * BITS_PER_UNIT;
+ offset += dst_size;
+ src_left -= dst_size;
+ }
+ count -= src_size;
+ }
+ return true;
+}
+\f
+/* Subroutine of print_address_operand, print a single address offset OFF for
+ a memory access of mode MEM_MODE, choosing between normal form and scaled
+ form depending on the type of the insn. Misaligned memory references must
+ use the scaled form. */
+
+static void
+print_address_offset (FILE *file, rtx off, enum machine_mode mem_mode)
+{
+ rtx pat;
+
+ if (c6x_current_insn != NULL_RTX)
+ {
+ pat = PATTERN (c6x_current_insn);
+ if (GET_CODE (pat) == COND_EXEC)
+ pat = COND_EXEC_CODE (pat);
+ if (GET_CODE (pat) == PARALLEL)
+ pat = XVECEXP (pat, 0, 0);
+
+ if (GET_CODE (pat) == SET
+ && GET_CODE (SET_SRC (pat)) == UNSPEC
+ && XINT (SET_SRC (pat), 1) == UNSPEC_MISALIGNED_ACCESS)
+ {
+ gcc_assert (CONST_INT_P (off)
+ && (INTVAL (off) & (GET_MODE_SIZE (mem_mode) - 1)) == 0);
+ fprintf (file, "[" HOST_WIDE_INT_PRINT_DEC "]",
+ INTVAL (off) / GET_MODE_SIZE (mem_mode));
+ return;
+ }
+ }
+ fputs ("(", file);
+ output_address (off);
+ fputs (")", file);
+}
+
+static bool
+c6x_print_operand_punct_valid_p (unsigned char c)
+{
+ return c == '$' || c == '.' || c == '|';
+}
+
+static void c6x_print_operand (FILE *, rtx, int);
+
+/* Subroutine of c6x_print_operand; used to print a memory reference X to FILE. */
+
+static void
+c6x_print_address_operand (FILE *file, rtx x, enum machine_mode mem_mode)
+{
+ rtx off;
+ switch (GET_CODE (x))
+ {
+ case PRE_MODIFY:
+ case POST_MODIFY:
+ if (GET_CODE (x) == POST_MODIFY)
+ output_address (XEXP (x, 0));
+ off = XEXP (XEXP (x, 1), 1);
+ if (XEXP (x, 0) == stack_pointer_rtx)
+ {
+ if (GET_CODE (x) == PRE_MODIFY)
+ gcc_assert (INTVAL (off) > 0);
+ else
+ gcc_assert (INTVAL (off) < 0);
+ }
+ if (CONST_INT_P (off) && INTVAL (off) < 0)
+ {
+ fprintf (file, "--");
+ off = GEN_INT (-INTVAL (off));
+ }
+ else
+ fprintf (file, "++");
+ if (GET_CODE (x) == PRE_MODIFY)
+ output_address (XEXP (x, 0));
+ print_address_offset (file, off, mem_mode);
+ break;
+
+ case PLUS:
+ off = XEXP (x, 1);
+ if (CONST_INT_P (off) && INTVAL (off) < 0)
+ {
+ fprintf (file, "-");
+ off = GEN_INT (-INTVAL (off));
+ }
+ else
+ fprintf (file, "+");
+ output_address (XEXP (x, 0));
+ print_address_offset (file, off, mem_mode);
+ break;
+
+ case PRE_DEC:
+ gcc_assert (XEXP (x, 0) != stack_pointer_rtx);
+ fprintf (file, "--");
+ output_address (XEXP (x, 0));
+ fprintf (file, "[1]");
+ break;
+ case PRE_INC:
+ fprintf (file, "++");
+ output_address (XEXP (x, 0));
+ fprintf (file, "[1]");
+ break;
+ case POST_INC:
+ gcc_assert (XEXP (x, 0) != stack_pointer_rtx);
+ output_address (XEXP (x, 0));
+ fprintf (file, "++[1]");
+ break;
+ case POST_DEC:
+ output_address (XEXP (x, 0));
+ fprintf (file, "--[1]");
+ break;
+
+ case SYMBOL_REF:
+ case CONST:
+ case LABEL_REF:
+ gcc_assert (sdata_symbolic_operand (x, Pmode));
+ fprintf (file, "+B14(");
+ output_addr_const (file, x);
+ fprintf (file, ")");
+ break;
+
+ case UNSPEC:
+ switch (XINT (x, 1))
+ {
+ case UNSPEC_LOAD_GOT:
+ fputs ("$GOT(", file);
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ fputs (")", file);
+ break;
+ case UNSPEC_LOAD_SDATA:
+ output_addr_const (file, XVECEXP (x, 0, 0));
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ break;
+
+ default:
+ gcc_assert (GET_CODE (x) != MEM);
+ c6x_print_operand (file, x, 0);
+ break;
+ }
+}
+
+/* Return a single character, which is either 'l', 's', 'd' or 'm', which
+ specifies the functional unit used by INSN. */
+
+char
+c6x_get_unit_specifier (rtx insn)
+{
+ enum attr_units units;
+
+ if (insn_info)
+ {
+ int unit = INSN_INFO_ENTRY (INSN_UID (insn)).reservation;
+ return c6x_unit_names[unit][0];
+ }
+
+ units = get_attr_units (insn);
+ switch (units)
+ {
+ case UNITS_D:
+ case UNITS_DL:
+ case UNITS_DS:
+ case UNITS_DLS:
+ case UNITS_D_ADDR:
+ return 'd';
+ break;
+ case UNITS_L:
+ case UNITS_LS:
+ return 'l';
+ break;
+ case UNITS_S:
+ return 's';
+ break;
+ case UNITS_M:
+ return 'm';
+ break;
+ default:
+ gcc_unreachable ();
+ }
+}
+
+/* Prints the unit specifier field. */
+static void
+c6x_print_unit_specifier_field (FILE *file, rtx insn)
+{
+ enum attr_units units = get_attr_units (insn);
+ enum attr_cross cross = get_attr_cross (insn);
+ enum attr_dest_regfile rf = get_attr_dest_regfile (insn);
+ int half;
+ char unitspec;
+
+ if (units == UNITS_D_ADDR)
+ {
+ enum attr_addr_regfile arf = get_attr_addr_regfile (insn);
+ int t_half;
+ gcc_assert (arf != ADDR_REGFILE_UNKNOWN);
+ half = arf == ADDR_REGFILE_A ? 1 : 2;
+ t_half = rf == DEST_REGFILE_A ? 1 : 2;
+ fprintf (file, ".d%dt%d", half, t_half);
+ return;
+ }
+
+ if (insn_info)
+ {
+ int unit = INSN_INFO_ENTRY (INSN_UID (insn)).reservation;
+ fputs (".", file);
+ fputs (c6x_unit_names[unit], file);
+ if (cross == CROSS_Y)
+ fputs ("x", file);
+ return;
+ }
+
+ gcc_assert (rf != DEST_REGFILE_UNKNOWN);
+ unitspec = c6x_get_unit_specifier (insn);
+ half = rf == DEST_REGFILE_A ? 1 : 2;
+ fprintf (file, ".%c%d%s", unitspec, half, cross == CROSS_Y ? "x" : "");
+}
+
+/* Output assembly language output for the address ADDR to FILE. */
+static void
+c6x_print_operand_address (FILE *file, rtx addr)
+{
+ c6x_print_address_operand (file, addr, VOIDmode);
+}
+
+/* Print an operand, X, to FILE, with an optional modifier in CODE.
+
+ Meaning of CODE:
+ $ -- print the unit specifier field for the instruction.
+ . -- print the predicate for the instruction or an emptry string for an
+ unconditional one.
+ | -- print "||" if the insn should be issued in parallel with the previous
+ one.
+
+ C -- print an opcode suffix for a reversed condition
+ d -- H, W or D as a suffix for ADDA, based on the factor given by the
+ operand
+ D -- print either B, H, W or D as a suffix for ADDA, based on the size of
+ the operand
+ J -- print a predicate
+ j -- like J, but use reverse predicate
+ k -- treat a CONST_INT as a register number and print it as a register
+ k -- like k, but print out a doubleword register
+ n -- print an integer operand, negated
+ p -- print the low part of a DImode register
+ P -- print the high part of a DImode register
+ r -- print the absolute value of an integer operand, shifted right by 1
+ R -- print the absolute value of an integer operand, shifted right by 2
+ f -- the first clear bit in an integer operand assumed to be a mask for
+ a clr instruction
+ F -- the last clear bit in such a mask
+ s -- the first set bit in an integer operand assumed to be a mask for
+ a set instruction
+ S -- the last set bit in such a mask
+ U -- print either 1 or 2, depending on the side of the machine used by
+ the operand */
+
+static void
+c6x_print_operand (FILE *file, rtx x, int code)
+{
+ int i;
+ HOST_WIDE_INT v;
+ tree t;
+ enum machine_mode mode;
+
+ if (code == '|')
+ {
+ if (GET_MODE (c6x_current_insn) != TImode)
+ fputs ("||", file);
+ return;
+ }
+ if (code == '$')
+ {
+ c6x_print_unit_specifier_field (file, c6x_current_insn);
+ return;
+ }
+
+ if (code == '.')
+ {
+ x = current_insn_predicate;
+ if (x)
+ {
+ unsigned int regno = REGNO (XEXP (x, 0));
+ fputs ("[", file);
+ if (GET_CODE (x) == EQ)
+ fputs ("!", file);
+ fputs (reg_names [regno], file);
+ fputs ("]", file);
+ }
+ return;
+ }
+
+ mode = GET_MODE (x);
+
+ switch (code)
+ {
+ case 'C':
+ case 'c':
+ {
+ enum rtx_code c = GET_CODE (x);
+ if (code == 'C')
+ c = swap_condition (c);
+ fputs (GET_RTX_NAME (c), file);
+ }
+ return;
+
+ case 'J':
+ case 'j':
+ {
+ unsigned int regno = REGNO (XEXP (x, 0));
+ if ((GET_CODE (x) == EQ) == (code == 'J'))
+ fputs ("!", file);
+ fputs (reg_names [regno], file);
+ }
+ return;
+
+ case 'k':
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ v = INTVAL (x);
+ fprintf (file, "%s", reg_names[v]);
+ return;
+ case 'K':
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ v = INTVAL (x);
+ gcc_assert ((v & 1) == 0);
+ fprintf (file, "%s:%s", reg_names[v + 1], reg_names[v]);
+ return;
+
+ case 's':
+ case 'S':
+ case 'f':
+ case 'F':
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ v = INTVAL (x);
+ for (i = 0; i < 32; i++)
+ {
+ HOST_WIDE_INT tst = v & 1;
+ if (((code == 'f' || code == 'F') && !tst)
+ || ((code == 's' || code == 'S') && tst))
+ break;
+ v >>= 1;
+ }
+ if (code == 'f' || code == 's')
+ {
+ fprintf (file, "%d", i);
+ return;
+ }
+ for (;i < 32; i++)
+ {
+ HOST_WIDE_INT tst = v & 1;
+ if ((code == 'F' && tst) || (code == 'S' && !tst))
+ break;
+ v >>= 1;
+ }
+ fprintf (file, "%d", i - 1);
+ return;
+
+ case 'n':
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ output_addr_const (file, GEN_INT (-INTVAL (x)));
+ return;
+
+ case 'r':
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ v = INTVAL (x);
+ if (v < 0)
+ v = -v;
+ output_addr_const (file, GEN_INT (v >> 1));
+ return;
+
+ case 'R':
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ v = INTVAL (x);
+ if (v < 0)
+ v = -v;
+ output_addr_const (file, GEN_INT (v >> 2));
+ return;
+
+ case 'd':
+ gcc_assert (GET_CODE (x) == CONST_INT);
+ v = INTVAL (x);
+ fputs (v == 2 ? "h" : v == 4 ? "w" : "d", file);
+ return;
+
+ case 'p':
+ case 'P':
+ gcc_assert (GET_CODE (x) == REG);
+ v = REGNO (x);
+ if (code == 'P')
+ v++;
+ fputs (reg_names[v], file);
+ return;
+
+ case 'D':
+ v = 0;
+ if (GET_CODE (x) == CONST)
+ {
+ x = XEXP (x, 0);
+ gcc_assert (GET_CODE (x) == PLUS);
+ gcc_assert (GET_CODE (XEXP (x, 1)) == CONST_INT);
+ v = INTVAL (XEXP (x, 1));
+ x = XEXP (x, 0);
+
+ }
+ gcc_assert (GET_CODE (x) == SYMBOL_REF);
+
+ t = SYMBOL_REF_DECL (x);
+ if (DECL_P (t))
+ v |= DECL_ALIGN_UNIT (t);
+ else
+ v |= TYPE_ALIGN_UNIT (TREE_TYPE (t));
+ if (v & 1)
+ fputs ("b", file);
+ else if (v & 2)
+ fputs ("h", file);
+ else
+ fputs ("w", file);
+ return;
+
+ case 'U':
+ if (MEM_P (x))
+ {
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == PLUS
+ || GET_RTX_CLASS (GET_CODE (x)) == RTX_AUTOINC)
+ x = XEXP (x, 0);
+ if (GET_CODE (x) == CONST || GET_CODE (x) == SYMBOL_REF)
+ {
+ gcc_assert (sdata_symbolic_operand (x, Pmode));
+ fputs ("2", file);
+ return;
+ }
+ }
+ gcc_assert (REG_P (x));
+ if (A_REGNO_P (REGNO (x)))
+ fputs ("1", file);
+ if (B_REGNO_P (REGNO (x)))
+ fputs ("2", file);
+ return;
+
+ default:
+ switch (GET_CODE (x))
+ {
+ case REG:
+ if (GET_MODE_SIZE (mode) == 8)
+ fprintf (file, "%s:%s", reg_names[REGNO (x) + 1],
+ reg_names[REGNO (x)]);
+ else
+ fprintf (file, "%s", reg_names[REGNO (x)]);
+ break;
+
+ case MEM:
+ fputc ('*', file);
+ gcc_assert (XEXP (x, 0) != stack_pointer_rtx);
+ c6x_print_address_operand (file, XEXP (x, 0), GET_MODE (x));
+ break;
+
+ case SYMBOL_REF:
+ fputc ('(', file);
+ output_addr_const (file, x);
+ fputc (')', file);
+ break;
+
+ case CONST_INT:
+ output_addr_const (file, x);
+ break;
+
+ case CONST_DOUBLE:
+ output_operand_lossage ("invalid const_double operand");
+ break;
+
+ default:
+ output_addr_const (file, x);
+ }
+ }
+}
+\f
+/* Return TRUE if OP is a valid memory address with a base register of
+ class C. If SMALL_OFFSET is true, we disallow memory references which would
+ require a long offset with B14/B15. */
+
+bool
+c6x_mem_operand (rtx op, enum reg_class c, bool small_offset)
+{
+ enum machine_mode mode = GET_MODE (op);
+ rtx base = XEXP (op, 0);
+ switch (GET_CODE (base))
+ {
+ case REG:
+ break;
+ case PLUS:
+ if (small_offset
+ && (XEXP (base, 0) == stack_pointer_rtx
+ || XEXP (base, 0) == pic_offset_table_rtx))
+ {
+ if (!c6x_legitimate_address_p_1 (mode, base, true, true))
+ return false;
+ }
+
+ /* fall through */
+ case PRE_INC:
+ case PRE_DEC:
+ case PRE_MODIFY:
+ case POST_INC:
+ case POST_DEC:
+ case POST_MODIFY:
+ base = XEXP (base, 0);
+ break;
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ gcc_assert (sdata_symbolic_operand (base, Pmode));
+ return !small_offset && c == B_REGS;
+
+ default:
+ return false;
+ }
+ return TEST_HARD_REG_BIT (reg_class_contents[ (int) (c)], REGNO (base));
+}
+
+/* Returns true if X is a valid address for use in a memory reference
+ of mode MODE. If STRICT is true, we do not allow pseudo registers
+ in the address. NO_LARGE_OFFSET is true if we are examining an
+ address for use in a load or store misaligned instruction, or
+ recursively examining an operand inside a PRE/POST_MODIFY. */
+
+bool
+c6x_legitimate_address_p_1 (enum machine_mode mode, rtx x, bool strict,
+ bool no_large_offset)
+{
+ int size, size1;
+ HOST_WIDE_INT off;
+ enum rtx_code code = GET_CODE (x);
+
+ switch (code)
+ {
+ case PRE_MODIFY:
+ case POST_MODIFY:
+ /* We can't split these into word-sized pieces yet. */
+ if (!TARGET_STDW && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return false;
+ if (GET_CODE (XEXP (x, 1)) != PLUS)
+ return false;
+ if (!c6x_legitimate_address_p_1 (mode, XEXP (x, 1), strict, true))
+ return false;
+ if (!rtx_equal_p (XEXP (x, 0), XEXP (XEXP (x, 1), 0)))
+ return false;
+
+ /* fall through */
+ case PRE_INC:
+ case PRE_DEC:
+ case POST_INC:
+ case POST_DEC:
+ /* We can't split these into word-sized pieces yet. */
+ if (!TARGET_STDW && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
+ return false;
+ x = XEXP (x, 0);
+ if (!REG_P (x))
+ return false;
+
+ /* fall through */
+ case REG:
+ if (strict)
+ return REGNO_OK_FOR_BASE_STRICT_P (REGNO (x));
+ else
+ return REGNO_OK_FOR_BASE_NONSTRICT_P (REGNO (x));
+
+ case PLUS:
+ if (!REG_P (XEXP (x, 0))
+ || !c6x_legitimate_address_p_1 (mode, XEXP (x, 0), strict, false))
+ return false;
+ /* We cannot ensure currently that both registers end up in the
+ same register file. */
+ if (REG_P (XEXP (x, 1)))
+ return false;
+
+ if (mode == BLKmode)
+ size = 4;
+ else if (mode == VOIDmode)
+ /* ??? This can happen during ivopts. */
+ size = 1;
+ else
+ size = GET_MODE_SIZE (mode);
+
+ if (flag_pic
+ && GET_CODE (XEXP (x, 1)) == UNSPEC
+ && XINT (XEXP (x, 1), 1) == UNSPEC_LOAD_SDATA
+ && XEXP (x, 0) == pic_offset_table_rtx
+ && sdata_symbolic_operand (XVECEXP (XEXP (x, 1), 0, 0), SImode))
+ return !no_large_offset && size <= 4;
+ if (flag_pic == 1
+ && mode == Pmode
+ && GET_CODE (XEXP (x, 1)) == UNSPEC
+ && XINT (XEXP (x, 1), 1) == UNSPEC_LOAD_GOT
+ && XEXP (x, 0) == pic_offset_table_rtx
+ && (GET_CODE (XVECEXP (XEXP (x, 1), 0, 0)) == SYMBOL_REF
+ || GET_CODE (XVECEXP (XEXP (x, 1), 0, 0)) == LABEL_REF))
+ return !no_large_offset;
+ if (GET_CODE (XEXP (x, 1)) != CONST_INT)
+ return false;
+
+ off = INTVAL (XEXP (x, 1));
+
+ /* If the machine does not have doubleword load/stores, we'll use
+ word size accesses. */
+ size1 = size;
+ if (size == 2 * UNITS_PER_WORD && !TARGET_STDW)
+ size = UNITS_PER_WORD;
+
+ if (((HOST_WIDE_INT)size1 - 1) & off)
+ return false;
+ off /= size;
+ if (off > -32 && off < (size1 == size ? 32 : 28))
+ return true;
+ if (no_large_offset || code != PLUS || XEXP (x, 0) != stack_pointer_rtx
+ || size1 > UNITS_PER_WORD)
+ return false;
+ return off >= 0 && off < 32768;
+
+ case CONST:
+ case SYMBOL_REF:
+ case LABEL_REF:
+ return (!no_large_offset
+ /* With -fpic, we must wrap it in an unspec to show the B14
+ dependency. */
+ && !flag_pic
+ && GET_MODE_SIZE (mode) <= UNITS_PER_WORD
+ && sdata_symbolic_operand (x, Pmode));
+
+ default:
+ return false;
+ }
+}
+
+static bool
+c6x_legitimate_address_p (enum machine_mode mode, rtx x, bool strict)
+{
+ return c6x_legitimate_address_p_1 (mode, x, strict, false);
+}
+
+static bool
+c6x_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
+ rtx x ATTRIBUTE_UNUSED)
+{
+ return true;
+}
+\f
+/* Implements TARGET_PREFERRED_RENAME_CLASS. */
+static reg_class_t
+c6x_preferred_rename_class (reg_class_t cl)
+{
+ if (cl == A_REGS)
+ return NONPREDICATE_A_REGS;
+ if (cl == B_REGS)
+ return NONPREDICATE_B_REGS;
+ if (cl == ALL_REGS || cl == GENERAL_REGS)
+ return NONPREDICATE_REGS;
+ return NO_REGS;
+}
+\f
+/* Implements FINAL_PRESCAN_INSN. */
+void
+c6x_final_prescan_insn (rtx insn, rtx *opvec ATTRIBUTE_UNUSED,
+ int noperands ATTRIBUTE_UNUSED)
+{
+ c6x_current_insn = insn;
+}
+\f
+/* A structure to describe the stack layout of a function. The layout is
+ as follows:
+
+ [saved frame pointer (or possibly padding0)]
+ --> incoming stack pointer, new hard frame pointer
+ [saved call-used regs]
+ [optional padding1]
+ --> soft frame pointer
+ [frame]
+ [outgoing arguments]
+ [optional padding2]
+
+ The structure members are laid out in this order. */
+
+struct c6x_frame
+{
+ int padding0;
+ /* Number of registers to save. */
+ int nregs;
+ int padding1;
+ HOST_WIDE_INT frame;
+ int outgoing_arguments_size;
+ int padding2;
+
+ HOST_WIDE_INT to_allocate;
+ /* The offsets relative to the incoming stack pointer (which
+ becomes HARD_FRAME_POINTER). */
+ HOST_WIDE_INT frame_pointer_offset;
+ HOST_WIDE_INT b3_offset;
+
+ /* True if we should call push_rts/pop_rts to save and restore
+ registers. */
+ bool push_rts;
+};
+
+/* Return true if we need to save and modify the PIC register in the
+ prologue. */
+
+static bool
+must_reload_pic_reg_p (void)
+{
+ struct cgraph_local_info *i = NULL;
+
+ if (!TARGET_DSBT)
+ return false;
+
+ i = cgraph_local_info (current_function_decl);
+
+ if ((crtl->uses_pic_offset_table || !current_function_is_leaf) && !i->local)
+ return true;
+ return false;
+}
+
+/* Return 1 if we need to save REGNO. */
+static int
+c6x_save_reg (unsigned int regno)
+{
+ return ((df_regs_ever_live_p (regno)
+ && !call_used_regs[regno]
+ && !fixed_regs[regno])
+ || (regno == RETURN_ADDR_REGNO
+ && (df_regs_ever_live_p (regno)
+ || !current_function_is_leaf))
+ || (regno == PIC_OFFSET_TABLE_REGNUM && must_reload_pic_reg_p ()));
+}
+
+/* Examine the number of regs NREGS we've determined we must save.
+ Return true if we should use __c6xabi_push_rts/__c6xabi_pop_rts for
+ prologue and epilogue. */
+
+static bool
+use_push_rts_p (int nregs)
+{
+ if (TARGET_INSNS_64PLUS && optimize_function_for_size_p (cfun)
+ && !cfun->machine->contains_sibcall
+ && !cfun->returns_struct
+ && !TARGET_LONG_CALLS
+ && nregs >= 6 && !frame_pointer_needed)
+ return true;
+ return false;
+}
+
+/* Return number of saved general prupose registers. */
+
+int
+c6x_nsaved_regs (void)
+{
+ int nregs = 0;
+ int regno;
+
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (c6x_save_reg (regno))
+ nregs++;
+ return nregs;
+}
+
+/* The safe debug order mandated by the ABI. */
+static unsigned reg_save_order[] =
+{
+ REG_A10, REG_A11, REG_A12, REG_A13,
+ REG_A14, REG_B3,
+ REG_B10, REG_B11, REG_B12, REG_B13,
+ REG_B14, REG_A15
+};
+
+#define N_SAVE_ORDER (sizeof reg_save_order / sizeof *reg_save_order)
+
+/* Compute the layout of the stack frame and store it in FRAME. */
+
+static void
+c6x_compute_frame_layout (struct c6x_frame *frame)
+{
+ HOST_WIDE_INT size = get_frame_size ();
+ HOST_WIDE_INT offset;
+ int nregs;
+
+ /* We use the four bytes which are technically inside the caller's frame,
+ usually to save the frame pointer. */
+ offset = -4;
+ frame->padding0 = 0;
+ nregs = c6x_nsaved_regs ();
+ frame->push_rts = false;
+ frame->b3_offset = 0;
+ if (use_push_rts_p (nregs))
+ {
+ frame->push_rts = true;
+ frame->b3_offset = (TARGET_BIG_ENDIAN ? -12 : -13) * 4;
+ nregs = 14;
+ }
+ else if (c6x_save_reg (REG_B3))
+ {
+ int idx;
+ for (idx = N_SAVE_ORDER - 1; reg_save_order[idx] != REG_B3; idx--)
+ {
+ if (c6x_save_reg (reg_save_order[idx]))
+ frame->b3_offset -= 4;
+ }
+ }
+ frame->nregs = nregs;
+
+ if (size == 0 && nregs == 0)
+ {
+ frame->padding0 = 4;
+ frame->padding1 = frame->padding2 = 0;
+ frame->frame_pointer_offset = frame->to_allocate = 0;
+ frame->outgoing_arguments_size = 0;
+ return;
+ }
+
+ if (!frame->push_rts)
+ offset += frame->nregs * 4;
+
+ if (offset == 0 && size == 0 && crtl->outgoing_args_size == 0
+ && !current_function_is_leaf)
+ /* Don't use the bottom of the caller's frame if we have no
+ allocation of our own and call other functions. */
+ frame->padding0 = frame->padding1 = 4;
+ else if (offset & 4)
+ frame->padding1 = 4;
+ else
+ frame->padding1 = 0;
+
+ offset += frame->padding0 + frame->padding1;
+ frame->frame_pointer_offset = offset;
+ offset += size;
+
+ frame->outgoing_arguments_size = crtl->outgoing_args_size;
+ offset += frame->outgoing_arguments_size;
+
+ if ((offset & 4) == 0)
+ frame->padding2 = 8;
+ else
+ frame->padding2 = 4;
+ frame->to_allocate = offset + frame->padding2;
+}
+
+/* Return the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+
+HOST_WIDE_INT
+c6x_initial_elimination_offset (int from, int to)
+{
+ struct c6x_frame frame;
+ c6x_compute_frame_layout (&frame);
+
+ if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
+ return 0;
+ else if (from == FRAME_POINTER_REGNUM
+ && to == HARD_FRAME_POINTER_REGNUM)
+ return -frame.frame_pointer_offset;
+ else
+ {
+ gcc_assert (to == STACK_POINTER_REGNUM);
+
+ if (from == ARG_POINTER_REGNUM)
+ return frame.to_allocate + (frame.push_rts ? 56 : 0);
+
+ gcc_assert (from == FRAME_POINTER_REGNUM);
+ return frame.to_allocate - frame.frame_pointer_offset;
+ }
+}
+
+/* Given FROM and TO register numbers, say whether this elimination is
+ allowed. Frame pointer elimination is automatically handled. */
+
+static bool
+c6x_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ if (to == STACK_POINTER_REGNUM)
+ return !frame_pointer_needed;
+ return true;
+}
+
+/* Emit insns to increment the stack pointer by OFFSET. If
+ FRAME_RELATED_P, set the RTX_FRAME_RELATED_P flag on the insns.
+ Does nothing if the offset is zero. */
+
+static void
+emit_add_sp_const (HOST_WIDE_INT offset, bool frame_related_p)
+{
+ rtx to_add = GEN_INT (offset);
+ rtx orig_to_add = to_add;
+ rtx insn;
+
+ if (offset == 0)
+ return;
+
+ if (offset < -32768 || offset > 32767)
+ {
+ rtx reg = gen_rtx_REG (SImode, REG_A0);
+ rtx low = GEN_INT (trunc_int_for_mode (offset, HImode));
+
+ insn = emit_insn (gen_movsi_high (reg, low));
+ if (frame_related_p)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ insn = emit_insn (gen_movsi_lo_sum (reg, reg, to_add));
+ if (frame_related_p)
+ RTX_FRAME_RELATED_P (insn) = 1;
+ to_add = reg;
+ }
+ insn = emit_insn (gen_addsi3 (stack_pointer_rtx, stack_pointer_rtx,
+ to_add));
+ if (frame_related_p)
+ {
+ if (REG_P (to_add))
+ add_reg_note (insn, REG_FRAME_RELATED_EXPR,
+ gen_rtx_SET (VOIDmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ orig_to_add)));
+
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+}
+
+/* Prologue and epilogue. */
+void
+c6x_expand_prologue (void)
+{
+ struct c6x_frame frame;
+ rtx insn, mem;
+ int nsaved = 0;
+ HOST_WIDE_INT initial_offset, off, added_already;
+
+ c6x_compute_frame_layout (&frame);
+
+ if (flag_stack_usage_info)
+ current_function_static_stack_size = frame.to_allocate;
+
+ initial_offset = -frame.to_allocate;
+ if (frame.push_rts)
+ {
+ emit_insn (gen_push_rts ());
+ nsaved = frame.nregs;
+ }
+
+ /* If the offsets would be too large for the memory references we will
+ create to save registers, do the stack allocation in two parts.
+ Ensure by subtracting 8 that we don't store to the word pointed to
+ by the stack pointer. */
+ if (initial_offset < -32768)
+ initial_offset = -frame.frame_pointer_offset - 8;
+
+ if (frame.to_allocate > 0)
+ gcc_assert (initial_offset != 0);
+
+ off = -initial_offset + 4 - frame.padding0;
+
+ mem = gen_frame_mem (Pmode, stack_pointer_rtx);
+
+ added_already = 0;
+ if (frame_pointer_needed)
+ {
+ rtx fp_reg = gen_rtx_REG (SImode, REG_A15);
+ /* We go through some contortions here to both follow the ABI's
+ recommendation that FP == incoming SP, and to avoid writing or
+ reading the word pointed to by the stack pointer. */
+ rtx addr = gen_rtx_POST_MODIFY (Pmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (-8)));
+ insn = emit_move_insn (gen_frame_mem (Pmode, addr), fp_reg);
+ RTX_FRAME_RELATED_P (insn) = 1;
+ nsaved++;
+ insn = emit_insn (gen_addsi3 (hard_frame_pointer_rtx, stack_pointer_rtx,
+ GEN_INT (8)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ off -= 4;
+ added_already = -8;
+ }
+
+ emit_add_sp_const (initial_offset - added_already, true);
+
+ if (nsaved < frame.nregs)
+ {
+ unsigned i;
+
+ for (i = 0; i < N_SAVE_ORDER; i++)
+ {
+ int idx = N_SAVE_ORDER - i - 1;
+ unsigned regno = reg_save_order[idx];
+ rtx reg;
+ enum machine_mode save_mode = SImode;
+
+ if (regno == REG_A15 && frame_pointer_needed)
+ /* Already saved. */
+ continue;
+ if (!c6x_save_reg (regno))
+ continue;
+
+ if (TARGET_STDW && (off & 4) == 0 && off <= 256
+ && (regno & 1) == 1
+ && i + 1 < N_SAVE_ORDER
+ && reg_save_order[idx - 1] == regno - 1
+ && c6x_save_reg (regno - 1))
+ {
+ save_mode = DImode;
+ regno--;
+ i++;
+ }
+ reg = gen_rtx_REG (save_mode, regno);
+ off -= GET_MODE_SIZE (save_mode);
+
+ insn = emit_move_insn (adjust_address (mem, save_mode, off),
+ reg);
+ RTX_FRAME_RELATED_P (insn) = 1;
+
+ nsaved += HARD_REGNO_NREGS (regno, save_mode);
+ }
+ }
+ gcc_assert (nsaved == frame.nregs);
+ emit_add_sp_const (-frame.to_allocate - initial_offset, true);
+ if (must_reload_pic_reg_p ())
+ {
+ if (dsbt_decl == NULL)
+ {
+ tree t;
+
+ t = build_index_type (integer_one_node);
+ t = build_array_type (integer_type_node, t);
+ t = build_decl (BUILTINS_LOCATION, VAR_DECL,
+ get_identifier ("__c6xabi_DSBT_BASE"), t);
+ DECL_ARTIFICIAL (t) = 1;
+ DECL_IGNORED_P (t) = 1;
+ DECL_EXTERNAL (t) = 1;
+ TREE_STATIC (t) = 1;
+ TREE_PUBLIC (t) = 1;
+ TREE_USED (t) = 1;
+
+ dsbt_decl = t;
+ }
+ emit_insn (gen_setup_dsbt (pic_offset_table_rtx,
+ XEXP (DECL_RTL (dsbt_decl), 0)));
+ }
+}
+
+void
+c6x_expand_epilogue (bool sibcall)
+{
+ unsigned i;
+ struct c6x_frame frame;
+ rtx mem;
+ HOST_WIDE_INT off;
+ int nsaved = 0;
+
+ c6x_compute_frame_layout (&frame);
+
+ mem = gen_frame_mem (Pmode, stack_pointer_rtx);
+
+ /* Insert a dummy set/use of the stack pointer. This creates a
+ scheduler barrier between the prologue saves and epilogue restores. */
+ emit_insn (gen_epilogue_barrier (stack_pointer_rtx, stack_pointer_rtx));
+
+ /* If the offsets would be too large for the memory references we will
+ create to restore registers, do a preliminary stack adjustment here. */
+ off = frame.to_allocate - frame.frame_pointer_offset + frame.padding1;
+ if (frame.push_rts)
+ {
+ nsaved = frame.nregs;
+ }
+ else
+ {
+ if (frame.to_allocate > 32768)
+ {
+ /* Don't add the entire offset so that we leave an unused word
+ above the stack pointer. */
+ emit_add_sp_const ((off - 16) & ~7, false);
+ off &= 7;
+ off += 16;
+ }
+ for (i = 0; i < N_SAVE_ORDER; i++)
+ {
+ unsigned regno = reg_save_order[i];
+ rtx reg;
+ enum machine_mode save_mode = SImode;
+
+ if (!c6x_save_reg (regno))
+ continue;
+ if (regno == REG_A15 && frame_pointer_needed)
+ continue;
+
+ if (TARGET_STDW && (off & 4) == 0 && off < 256
+ && (regno & 1) == 0
+ && i + 1 < N_SAVE_ORDER
+ && reg_save_order[i + 1] == regno + 1
+ && c6x_save_reg (regno + 1))
+ {
+ save_mode = DImode;
+ i++;
+ }
+ reg = gen_rtx_REG (save_mode, regno);
+
+ emit_move_insn (reg, adjust_address (mem, save_mode, off));
+
+ off += GET_MODE_SIZE (save_mode);
+ nsaved += HARD_REGNO_NREGS (regno, save_mode);
+ }
+ }
+ if (!frame_pointer_needed)
+ emit_add_sp_const (off + frame.padding0 - 4, false);
+ else
+ {
+ rtx fp_reg = gen_rtx_REG (SImode, REG_A15);
+ rtx addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx,
+ gen_rtx_PLUS (Pmode, stack_pointer_rtx,
+ GEN_INT (8)));
+ emit_insn (gen_addsi3 (stack_pointer_rtx, hard_frame_pointer_rtx,
+ GEN_INT (-8)));
+ emit_move_insn (fp_reg, gen_frame_mem (Pmode, addr));
+ nsaved++;
+ }
+ gcc_assert (nsaved == frame.nregs);
+ if (!sibcall)
+ {
+ if (frame.push_rts)
+ emit_jump_insn (gen_pop_rts ());
+ else
+ emit_jump_insn (gen_return_internal (gen_rtx_REG (SImode,
+ RETURN_ADDR_REGNO)));
+ }
+}
+
+/* Return the value of the return address for the frame COUNT steps up
+ from the current frame, after the prologue.
+ We punt for everything but the current frame by returning const0_rtx. */
+
+rtx
+c6x_return_addr_rtx (int count)
+{
+ if (count != 0)
+ return const0_rtx;
+
+ return get_hard_reg_initial_val (Pmode, RETURN_ADDR_REGNO);
+}
+\f
+/* Return true iff TYPE is one of the shadow types. */
+static bool
+shadow_type_p (enum attr_type type)
+{
+ return (type == TYPE_SHADOW || type == TYPE_LOAD_SHADOW
+ || type == TYPE_MULT_SHADOW);
+}
+
+/* Return true iff INSN is a shadow pattern. */
+static bool
+shadow_p (rtx insn)
+{
+ if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
+ return false;
+ return shadow_type_p (get_attr_type (insn));
+}
+
+/* Return true iff INSN is a shadow or blockage pattern. */
+static bool
+shadow_or_blockage_p (rtx insn)
+{
+ enum attr_type type;
+ if (!NONDEBUG_INSN_P (insn) || recog_memoized (insn) < 0)
+ return false;
+ type = get_attr_type (insn);
+ return shadow_type_p (type) || type == TYPE_BLOCKAGE;
+}
+\f
+/* Translate UNITS into a bitmask of units we can reserve for this
+ insn. */
+static int
+get_reservation_flags (enum attr_units units)
+{
+ switch (units)
+ {
+ case UNITS_D:
+ case UNITS_D_ADDR:
+ return RESERVATION_FLAG_D;
+ case UNITS_L:
+ return RESERVATION_FLAG_L;
+ case UNITS_S:
+ return RESERVATION_FLAG_S;
+ case UNITS_M:
+ return RESERVATION_FLAG_M;
+ case UNITS_LS:
+ return RESERVATION_FLAG_LS;
+ case UNITS_DL:
+ return RESERVATION_FLAG_DL;
+ case UNITS_DS:
+ return RESERVATION_FLAG_DS;
+ case UNITS_DLS:
+ return RESERVATION_FLAG_DLS;
+ default:
+ return 0;
+ }
+}
+
+/* Compute the side of the machine used by INSN, which reserves UNITS.
+ This must match the reservations in the scheduling description. */
+static int
+get_insn_side (rtx insn, enum attr_units units)
+{
+ if (units == UNITS_D_ADDR)
+ return (get_attr_addr_regfile (insn) == ADDR_REGFILE_A ? 0 : 1);
+ else
+ {
+ enum attr_dest_regfile rf = get_attr_dest_regfile (insn);
+ if (rf == DEST_REGFILE_ANY)
+ return get_attr_type (insn) == TYPE_BRANCH ? 0 : 1;
+ else
+ return rf == DEST_REGFILE_A ? 0 : 1;
+ }
+}
+
+/* After scheduling, walk the insns between HEAD and END and assign unit
+ reservations. */
+static void
+assign_reservations (rtx head, rtx end)
+{
+ rtx insn;
+ for (insn = head; insn != NEXT_INSN (end); insn = NEXT_INSN (insn))
+ {
+ rtx within;
+ int pass;
+ int rsrv[2];
+ int rsrv_count[2][4];
+
+ if (GET_MODE (insn) != TImode)
+ continue;
+
+ rsrv[0] = rsrv[1] = 0;
+ memset (rsrv_count, 0, sizeof rsrv_count);
+
+ /* Walk through the insns that occur in the same cycle. We use multiple
+ passes to assign units, assigning for insns with the most specific
+ requirements first. */
+ for (pass = 0; pass < 4; pass++)
+ for (within = insn;
+ (within != NEXT_INSN (end)
+ && (within == insn || GET_MODE (within) != TImode));
+ within = NEXT_INSN (within))
+ {
+ int this_rsrv, side;
+ int icode;
+ enum attr_units units;
+ int j;
+
+ if (!NONDEBUG_INSN_P (within))
+ continue;
+ icode = recog_memoized (within);
+ if (icode < 0)
+ continue;
+ units = get_attr_units (within);
+ this_rsrv = get_reservation_flags (units);
+ if (this_rsrv == 0)
+ continue;
+ side = get_insn_side (within, units);
+
+ if ((this_rsrv & (this_rsrv - 1)) == 0)
+ {
+ int t = exact_log2 (this_rsrv) + side * 4;
+ rsrv[side] |= this_rsrv;
+ INSN_INFO_ENTRY (INSN_UID (within)).reservation = t;
+ continue;
+ }
+
+ if (pass == 1)
+ {
+ for (j = 0; j < 4; j++)
+ if (this_rsrv & (1 << j))
+ rsrv_count[side][j]++;
+ continue;
+ }
+ if ((pass == 2 && this_rsrv != RESERVATION_FLAG_DLS)
+ || (pass == 3 && this_rsrv == RESERVATION_FLAG_DLS))
+ {
+ int best = -1, best_cost = INT_MAX;
+ for (j = 0; j < 4; j++)
+ if ((this_rsrv & (1 << j))
+ && !(rsrv[side] & (1 << j))
+ && rsrv_count[side][j] < best_cost)
+ {
+ best_cost = rsrv_count[side][j];
+ best = j;
+ }
+ gcc_assert (best != -1);
+ rsrv[side] |= 1 << best;
+ for (j = 0; j < 4; j++)
+ if ((this_rsrv & (1 << j)) && j != best)
+ rsrv_count[side][j]--;
+
+ INSN_INFO_ENTRY (INSN_UID (within)).reservation
+ = best + side * 4;
+ }
+ }
+ }
+}
+\f
+/* Backend scheduling state. */
+typedef struct c6x_sched_context
+{
+ /* The current scheduler clock, saved in the sched_reorder hook. */
+ int curr_sched_clock;
+
+ /* Number of insns issued so far in this cycle. */
+ int issued_this_cycle;
+
+ /* We record the time at which each jump occurs in JUMP_CYCLES. The
+ theoretical maximum for number of jumps in flight is 12: 2 every
+ cycle, with a latency of 6 cycles each. This is a circular
+ buffer; JUMP_CYCLE_INDEX is the pointer to the start. Earlier
+ jumps have a higher index. This array should be accessed through
+ the jump_cycle function. */
+ int jump_cycles[12];
+ int jump_cycle_index;
+
+ /* In parallel with jump_cycles, this array records the opposite of
+ the condition used in each pending jump. This is used to
+ predicate insns that are scheduled in the jump's delay slots. If
+ this is NULL_RTX no such predication happens. */
+ rtx jump_cond[12];
+
+ /* Similar to the jump_cycles mechanism, but here we take into
+ account all insns with delay slots, to avoid scheduling asms into
+ the delay slots. */
+ int delays_finished_at;
+
+ /* The following variable value is the last issued insn. */
+ rtx last_scheduled_insn;
+
+ int reg_n_accesses[FIRST_PSEUDO_REGISTER];
+ int reg_n_xaccesses[FIRST_PSEUDO_REGISTER];
+ int reg_set_in_cycle[FIRST_PSEUDO_REGISTER];
+
+ int tmp_reg_n_accesses[FIRST_PSEUDO_REGISTER];
+ int tmp_reg_n_xaccesses[FIRST_PSEUDO_REGISTER];
+} *c6x_sched_context_t;
+
+/* The current scheduling state. */
+static struct c6x_sched_context ss;
+
+/* Set when we discover while processing an insn that it would lead to too
+ many accesses of the same register. */
+static bool reg_access_stall;
+
+/* Look up the jump cycle with index N. For an out-of-bounds N, we return 0,
+ so the caller does not specifically have to test for it. */
+static int
+get_jump_cycle (int n)
+{
+ if (n >= 12)
+ return 0;
+ n += ss.jump_cycle_index;
+ if (n >= 12)
+ n -= 12;
+ return ss.jump_cycles[n];
+}
+
+/* Look up the jump condition with index N. */
+static rtx
+get_jump_cond (int n)
+{
+ if (n >= 12)
+ return NULL_RTX;
+ n += ss.jump_cycle_index;
+ if (n >= 12)
+ n -= 12;
+ return ss.jump_cond[n];
+}
+
+/* Return the index of the first jump that occurs after CLOCK_VAR. If no jump
+ has delay slots beyond CLOCK_VAR, return -1. */
+static int
+first_jump_index (int clock_var)
+{
+ int retval = -1;
+ int n = 0;
+ for (;;)
+ {
+ int t = get_jump_cycle (n);
+ if (t <= clock_var)
+ break;
+ retval = n;
+ n++;
+ }
+ return retval;
+}
+
+/* Add a new entry in our scheduling state for a jump that occurs in CYCLE
+ and has the opposite condition of COND. */
+static void
+record_jump (int cycle, rtx cond)
+{
+ if (ss.jump_cycle_index == 0)
+ ss.jump_cycle_index = 11;
+ else
+ ss.jump_cycle_index--;
+ ss.jump_cycles[ss.jump_cycle_index] = cycle;
+ ss.jump_cond[ss.jump_cycle_index] = cond;
+}
+
+/* Set the clock cycle of INSN to CYCLE. Also clears the insn's entry in
+ new_conditions. */
+static void
+insn_set_clock (rtx insn, int cycle)
+{
+ unsigned uid = INSN_UID (insn);
+
+ if (uid >= INSN_INFO_LENGTH)
+ VEC_safe_grow (c6x_sched_insn_info, heap, insn_info, uid * 5 / 4 + 10);
+
+ INSN_INFO_ENTRY (uid).clock = cycle;
+ INSN_INFO_ENTRY (uid).new_cond = NULL;
+ INSN_INFO_ENTRY (uid).ebb_start = false;
+}
+
+/* Return the clock cycle we set for the insn with uid UID. */
+static int
+insn_uid_get_clock (int uid)
+{
+ return INSN_INFO_ENTRY (uid).clock;
+}
+
+/* Return the clock cycle we set for INSN. */
+static int
+insn_get_clock (rtx insn)
+{
+ return insn_uid_get_clock (INSN_UID (insn));
+}
+
+/* Examine INSN, and if it is a conditional jump of any kind, return
+ the opposite of the condition in which it branches. Otherwise,
+ return NULL_RTX. */
+static rtx
+condjump_opposite_condition (rtx insn)
+{
+ rtx pat = PATTERN (insn);
+ int icode = INSN_CODE (insn);
+ rtx x = NULL;
+
+ if (icode == CODE_FOR_br_true || icode == CODE_FOR_br_false)
+ {
+ x = XEXP (SET_SRC (pat), 0);
+ if (icode == CODE_FOR_br_false)
+ return x;
+ }
+ if (GET_CODE (pat) == COND_EXEC)
+ {
+ rtx t = COND_EXEC_CODE (pat);
+ if ((GET_CODE (t) == PARALLEL
+ && GET_CODE (XVECEXP (t, 0, 0)) == RETURN)
+ || (GET_CODE (t) == UNSPEC && XINT (t, 1) == UNSPEC_REAL_JUMP)
+ || (GET_CODE (t) == SET && SET_DEST (t) == pc_rtx))
+ x = COND_EXEC_TEST (pat);
+ }
+
+ if (x != NULL_RTX)
+ {
+ enum rtx_code code = GET_CODE (x);
+ x = gen_rtx_fmt_ee (code == EQ ? NE : EQ,
+ GET_MODE (x), XEXP (x, 0),
+ XEXP (x, 1));
+ }
+ return x;
+}
+
+/* Return true iff COND1 and COND2 are exactly opposite conditions
+ one of them NE and the other EQ. */
+static bool
+conditions_opposite_p (rtx cond1, rtx cond2)
+{
+ return (rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
+ && rtx_equal_p (XEXP (cond1, 1), XEXP (cond2, 1))
+ && GET_CODE (cond1) == reverse_condition (GET_CODE (cond2)));
+}
+
+/* Return true if we can add a predicate COND to INSN, or if INSN
+ already has that predicate. If DOIT is true, also perform the
+ modification. */
+static bool
+predicate_insn (rtx insn, rtx cond, bool doit)
+{
+ int icode;
+ if (cond == NULL_RTX)
+ {
+ gcc_assert (!doit);
+ return false;
+ }
+
+ if (get_attr_predicable (insn) == PREDICABLE_YES
+ && GET_CODE (PATTERN (insn)) != COND_EXEC)
+ {
+ if (doit)
+ {
+ rtx newpat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (insn));
+ PATTERN (insn) = newpat;
+ INSN_CODE (insn) = -1;
+ }
+ return true;
+ }
+ if (GET_CODE (PATTERN (insn)) == COND_EXEC
+ && rtx_equal_p (COND_EXEC_TEST (PATTERN (insn)), cond))
+ return true;
+ icode = INSN_CODE (insn);
+ if (icode == CODE_FOR_real_jump
+ || icode == CODE_FOR_jump
+ || icode == CODE_FOR_indirect_jump)
+ {
+ rtx pat = PATTERN (insn);
+ rtx dest = (icode == CODE_FOR_real_jump ? XVECEXP (pat, 0, 0)
+ : icode == CODE_FOR_jump ? XEXP (SET_SRC (pat), 0)
+ : SET_SRC (pat));
+ if (doit)
+ {
+ rtx newpat;
+ if (REG_P (dest))
+ newpat = gen_rtx_COND_EXEC (VOIDmode, cond, PATTERN (insn));
+ else
+ newpat = gen_br_true (cond, XEXP (cond, 0), dest);
+ PATTERN (insn) = newpat;
+ INSN_CODE (insn) = -1;
+ }
+ return true;
+ }
+ if (INSN_CODE (insn) == CODE_FOR_br_true)
+ {
+ rtx br_cond = XEXP (SET_SRC (PATTERN (insn)), 0);
+ return rtx_equal_p (br_cond, cond);
+ }
+ if (INSN_CODE (insn) == CODE_FOR_br_false)
+ {
+ rtx br_cond = XEXP (SET_SRC (PATTERN (insn)), 0);
+ return conditions_opposite_p (br_cond, cond);
+ }
+ return false;
+}
+
+/* Initialize SC. Used by c6x_init_sched_context and c6x_sched_init. */
+static void
+init_sched_state (c6x_sched_context_t sc)
+{
+ sc->last_scheduled_insn = NULL_RTX;
+ sc->issued_this_cycle = 0;
+ memset (sc->jump_cycles, 0, sizeof sc->jump_cycles);
+ memset (sc->jump_cond, 0, sizeof sc->jump_cond);
+ sc->jump_cycle_index = 0;
+ sc->delays_finished_at = 0;
+ sc->curr_sched_clock = 0;
+
+ memset (sc->reg_n_accesses, 0, sizeof sc->reg_n_accesses);
+ memset (sc->reg_n_xaccesses, 0, sizeof sc->reg_n_xaccesses);
+ memset (sc->reg_set_in_cycle, 0, sizeof sc->reg_set_in_cycle);
+}
+
+/* Allocate store for new scheduling context. */
+static void *
+c6x_alloc_sched_context (void)
+{
+ return xmalloc (sizeof (struct c6x_sched_context));
+}
+
+/* If CLEAN_P is true then initializes _SC with clean data,
+ and from the global context otherwise. */
+static void
+c6x_init_sched_context (void *_sc, bool clean_p)
+{
+ c6x_sched_context_t sc = (c6x_sched_context_t) _sc;
+
+ if (clean_p)
+ {
+ init_sched_state (sc);
+ }
+ else
+ *sc = ss;
+}
+
+/* Sets the global scheduling context to the one pointed to by _SC. */
+static void
+c6x_set_sched_context (void *_sc)
+{
+ c6x_sched_context_t sc = (c6x_sched_context_t) _sc;
+
+ gcc_assert (sc != NULL);
+ ss = *sc;
+}
+
+/* Free _SC. */
+static void
+c6x_free_sched_context (void *_sc)
+{
+ free (_sc);
+}
+
+/* Provide information about speculation capabilities, and set the
+ DO_BACKTRACKING flag. */
+static void
+c6x_set_sched_flags (spec_info_t spec_info)
+{
+ unsigned int *flags = &(current_sched_info->flags);
+
+ if (*flags & SCHED_EBB)
+ {
+ *flags |= DO_BACKTRACKING;
+ }
+
+ spec_info->mask = 0;
+}
+
+/* Implement the TARGET_SCHED_ISSUE_RATE hook. */
+
+static int
+c6x_issue_rate (void)
+{
+ return 8;
+}
+
+/* We're beginning a new block. Initialize data structures as necessary. */
+
+static void
+c6x_sched_init (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ int max_ready ATTRIBUTE_UNUSED)
+{
+ init_sched_state (&ss);
+}
+
+static void
+c6x_mark_regno_read (int regno, bool cross)
+{
+ int t = ++ss.tmp_reg_n_accesses[regno];
+
+ if (t > 4)
+ reg_access_stall = true;
+
+ if (cross)
+ {
+ int set_cycle = ss.reg_set_in_cycle[regno];
+ /* This must be done in this way rather than by tweaking things in
+ adjust_cost, since the stall occurs even for insns with opposite
+ predicates, and the scheduler may not even see a dependency. */
+ if (set_cycle > 0 && set_cycle == ss.curr_sched_clock)
+ reg_access_stall = true;
+ /* This doesn't quite do anything yet as we're only modeling one
+ x unit. */
+ ++ss.tmp_reg_n_xaccesses[regno];
+ }
+}
+
+/* Note that REG is read in the insn being examined. If CROSS, it
+ means the access is through a cross path. Update the temporary reg
+ access arrays, and set REG_ACCESS_STALL if the insn can't be issued
+ in the current cycle. */
+
+static void
+c6x_mark_reg_read (rtx reg, bool cross)
+{
+ unsigned regno = REGNO (reg);
+ unsigned nregs = hard_regno_nregs[regno][GET_MODE (reg)];
+
+ while (nregs-- > 0)
+ c6x_mark_regno_read (regno + nregs, cross);
+}
+
+/* Note that register REG is written in cycle CYCLES. */
+
+static void
+c6x_mark_reg_written (rtx reg, int cycles)
+{
+ unsigned regno = REGNO (reg);
+ unsigned nregs = hard_regno_nregs[regno][GET_MODE (reg)];
+
+ while (nregs-- > 0)
+ ss.reg_set_in_cycle[regno + nregs] = cycles;
+}
+
+/* Update the register state information for an instruction whose
+ body is X. Return true if the instruction has to be delayed until the
+ next cycle. */
+
+static bool
+c6x_registers_update (rtx insn)
+{
+ enum attr_cross cross;
+ enum attr_dest_regfile destrf;
+ int i, nops;
+ rtx x;
+
+ if (!reload_completed || recog_memoized (insn) < 0)
+ return false;
+
+ reg_access_stall = false;
+ memcpy (ss.tmp_reg_n_accesses, ss.reg_n_accesses,
+ sizeof ss.tmp_reg_n_accesses);
+ memcpy (ss.tmp_reg_n_xaccesses, ss.reg_n_xaccesses,
+ sizeof ss.tmp_reg_n_xaccesses);
+
+ extract_insn (insn);
+
+ cross = get_attr_cross (insn);
+ destrf = get_attr_dest_regfile (insn);
+
+ nops = recog_data.n_operands;
+ x = PATTERN (insn);
+ if (GET_CODE (x) == COND_EXEC)
+ {
+ c6x_mark_reg_read (XEXP (XEXP (x, 0), 0), false);
+ nops -= 2;
+ }
+
+ for (i = 0; i < nops; i++)
+ {
+ rtx op = recog_data.operand[i];
+ if (recog_data.operand_type[i] == OP_OUT)
+ continue;
+ if (REG_P (op))
+ {
+ bool this_cross = cross;
+ if (destrf == DEST_REGFILE_A && A_REGNO_P (REGNO (op)))
+ this_cross = false;
+ if (destrf == DEST_REGFILE_B && B_REGNO_P (REGNO (op)))
+ this_cross = false;
+ c6x_mark_reg_read (op, this_cross);
+ }
+ else if (MEM_P (op))
+ {
+ op = XEXP (op, 0);
+ switch (GET_CODE (op))
+ {
+ case POST_INC:
+ case PRE_INC:
+ case POST_DEC:
+ case PRE_DEC:
+ op = XEXP (op, 0);
+ /* fall through */
+ case REG:
+ c6x_mark_reg_read (op, false);
+ break;
+ case POST_MODIFY:
+ case PRE_MODIFY:
+ op = XEXP (op, 1);
+ gcc_assert (GET_CODE (op) == PLUS);
+ /* fall through */
+ case PLUS:
+ c6x_mark_reg_read (XEXP (op, 0), false);
+ if (REG_P (XEXP (op, 1)))
+ c6x_mark_reg_read (XEXP (op, 1), false);
+ break;
+ case SYMBOL_REF:
+ case LABEL_REF:
+ case CONST:
+ c6x_mark_regno_read (REG_B14, false);
+ break;
+ default:
+ gcc_unreachable ();
+ }
+ }
+ else if (!CONSTANT_P (op) && strlen (recog_data.constraints[i]) > 0)
+ gcc_unreachable ();
+ }
+ return reg_access_stall;
+}
+
+/* Helper function for the TARGET_SCHED_REORDER and
+ TARGET_SCHED_REORDER2 hooks. If scheduling an insn would be unsafe
+ in the current cycle, move it down in the ready list and return the
+ number of non-unsafe insns. */
+
+static int
+c6x_sched_reorder_1 (rtx *ready, int *pn_ready, int clock_var)
+{
+ int n_ready = *pn_ready;
+ rtx *e_ready = ready + n_ready;
+ rtx *insnp;
+ int first_jump;
+
+ /* Keep track of conflicts due to a limit number of register accesses,
+ and due to stalls incurred by too early accesses of registers using
+ cross paths. */
+
+ for (insnp = ready; insnp < e_ready; insnp++)
+ {
+ rtx insn = *insnp;
+ int icode = recog_memoized (insn);
+ bool is_asm = (icode < 0
+ && (GET_CODE (PATTERN (insn)) == ASM_INPUT
+ || asm_noperands (PATTERN (insn)) >= 0));
+ bool no_parallel = (is_asm
+ || (icode >= 0
+ && get_attr_type (insn) == TYPE_ATOMIC));
+
+ /* We delay asm insns until all delay slots are exhausted. We can't
+ accurately tell how many cycles an asm takes, and the main scheduling
+ code always assumes at least 1 cycle, which may be wrong. */
+ if ((no_parallel
+ && (ss.issued_this_cycle > 0 || clock_var < ss.delays_finished_at))
+ || c6x_registers_update (insn))
+ {
+ memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
+ *ready = insn;
+ n_ready--;
+ ready++;
+ }
+ else if (shadow_p (insn))
+ {
+ memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
+ *ready = insn;
+ }
+ }
+
+ /* Ensure that no other jump is scheduled in jump delay slots, since
+ it would put the machine into the wrong state. Also, we must
+ avoid scheduling insns that have a latency longer than the
+ remaining jump delay slots, as the code at the jump destination
+ won't be prepared for it.
+
+ However, we can relax this condition somewhat. The rest of the
+ scheduler will automatically avoid scheduling an insn on which
+ the jump shadow depends so late that its side effect happens
+ after the jump. This means that if we see an insn with a longer
+ latency here, it can safely be scheduled if we can ensure that it
+ has a predicate opposite of the previous jump: the side effect
+ will happen in what we think of as the same basic block. In
+ c6x_variable_issue, we will record the necessary predicate in
+ new_conditions, and after scheduling is finished, we will modify
+ the insn.
+
+ Special care must be taken whenever there is more than one jump
+ in flight. */
+
+ first_jump = first_jump_index (clock_var);
+ if (first_jump != -1)
+ {
+ int first_cycle = get_jump_cycle (first_jump);
+ rtx first_cond = get_jump_cond (first_jump);
+ int second_cycle = 0;
+
+ if (first_jump > 0)
+ second_cycle = get_jump_cycle (first_jump - 1);
+
+ for (insnp = ready; insnp < e_ready; insnp++)
+ {
+ rtx insn = *insnp;
+ int icode = recog_memoized (insn);
+ bool is_asm = (icode < 0
+ && (GET_CODE (PATTERN (insn)) == ASM_INPUT
+ || asm_noperands (PATTERN (insn)) >= 0));
+ int this_cycles;
+ enum attr_type type;
+
+ gcc_assert (!is_asm);
+ if (icode < 0)
+ continue;
+ this_cycles = get_attr_cycles (insn);
+ type = get_attr_type (insn);
+ /* Treat branches specially; there is also a hazard if two jumps
+ end at the same cycle. */
+ if (type == TYPE_BRANCH || type == TYPE_CALL)
+ this_cycles++;
+ if (clock_var + this_cycles <= first_cycle)
+ continue;
+ if ((first_jump > 0 && clock_var + this_cycles > second_cycle)
+ || !predicate_insn (insn, first_cond, false))
+ {
+ memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
+ *ready = insn;
+ n_ready--;
+ ready++;
+ }
+ }
+ }
+
+ return n_ready;
+}
+
+/* Implement the TARGET_SCHED_REORDER hook. We save the current clock
+ for later and clear the register access information for the new
+ cycle. We also move asm statements out of the way if they would be
+ scheduled in a delay slot. */
+
+static int
+c6x_sched_reorder (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx *ready ATTRIBUTE_UNUSED,
+ int *pn_ready ATTRIBUTE_UNUSED, int clock_var)
+{
+ ss.curr_sched_clock = clock_var;
+ ss.issued_this_cycle = 0;
+ memset (ss.reg_n_accesses, 0, sizeof ss.reg_n_accesses);
+ memset (ss.reg_n_xaccesses, 0, sizeof ss.reg_n_xaccesses);
+
+ if (ready == NULL)
+ return 0;
+
+ return c6x_sched_reorder_1 (ready, pn_ready, clock_var);
+}
+
+/* Implement the TARGET_SCHED_REORDER2 hook. We use this to record the clock
+ cycle for every insn. */
+
+static int
+c6x_sched_reorder2 (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx *ready ATTRIBUTE_UNUSED,
+ int *pn_ready ATTRIBUTE_UNUSED, int clock_var)
+{
+ /* FIXME: the assembler rejects labels inside an execute packet.
+ This can occur if prologue insns are scheduled in parallel with
+ others, so we avoid this here. Also make sure that nothing is
+ scheduled in parallel with a TYPE_ATOMIC insn or after a jump. */
+ if (RTX_FRAME_RELATED_P (ss.last_scheduled_insn)
+ || JUMP_P (ss.last_scheduled_insn)
+ || (recog_memoized (ss.last_scheduled_insn) >= 0
+ && get_attr_type (ss.last_scheduled_insn) == TYPE_ATOMIC))
+ {
+ int n_ready = *pn_ready;
+ rtx *e_ready = ready + n_ready;
+ rtx *insnp;
+
+ for (insnp = ready; insnp < e_ready; insnp++)
+ {
+ rtx insn = *insnp;
+ if (!shadow_p (insn))
+ {
+ memmove (ready + 1, ready, (insnp - ready) * sizeof (rtx));
+ *ready = insn;
+ n_ready--;
+ ready++;
+ }
+ }
+ return n_ready;
+ }
+
+ return c6x_sched_reorder_1 (ready, pn_ready, clock_var);
+}
+
+/* Subroutine of maybe_clobber_cond, called through note_stores. */
+
+static void
+clobber_cond_1 (rtx x, const_rtx pat ATTRIBUTE_UNUSED, void *data1)
+{
+ rtx *cond = (rtx *)data1;
+ if (*cond != NULL_RTX && reg_overlap_mentioned_p (x, *cond))
+ *cond = NULL_RTX;
+}
+
+/* Examine INSN, and if it destroys the conditions have recorded for
+ any of the jumps in flight, clear that condition so that we don't
+ predicate any more insns. CLOCK_VAR helps us limit the search to
+ only those jumps which are still in flight. */
+
+static void
+maybe_clobber_cond (rtx insn, int clock_var)
+{
+ int n, idx;
+ idx = ss.jump_cycle_index;
+ for (n = 0; n < 12; n++, idx++)
+ {
+ rtx cond, link;
+ int cycle;
+
+ if (idx >= 12)
+ idx -= 12;
+ cycle = ss.jump_cycles[idx];
+ if (cycle <= clock_var)
+ return;
+
+ cond = ss.jump_cond[idx];
+ if (cond == NULL_RTX)
+ continue;
+
+ if (CALL_P (insn))
+ {
+ ss.jump_cond[idx] = NULL_RTX;
+ continue;
+ }
+
+ note_stores (PATTERN (insn), clobber_cond_1, ss.jump_cond + idx);
+ for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
+ if (REG_NOTE_KIND (link) == REG_INC)
+ clobber_cond_1 (XEXP (link, 0), NULL_RTX, ss.jump_cond + idx);
+ }
+}
+
+/* Implement the TARGET_SCHED_VARIABLE_ISSUE hook. We are about to
+ issue INSN. Return the number of insns left on the ready queue
+ that can be issued this cycle.
+ We use this hook to record clock cycles and reservations for every insn. */
+
+static int
+c6x_variable_issue (FILE *dump ATTRIBUTE_UNUSED,
+ int sched_verbose ATTRIBUTE_UNUSED,
+ rtx insn, int can_issue_more ATTRIBUTE_UNUSED)
+{
+ ss.last_scheduled_insn = insn;
+ if (GET_CODE (PATTERN (insn)) != USE && GET_CODE (PATTERN (insn)) != CLOBBER)
+ ss.issued_this_cycle++;
+ if (insn_info)
+ {
+ int curr_clock = ss.curr_sched_clock;
+ int uid = INSN_UID (insn);
+ int icode = recog_memoized (insn);
+ rtx first_cond;
+ int first, first_cycle;
+
+ insn_set_clock (insn, curr_clock);
+ INSN_INFO_ENTRY (uid).ebb_start
+ = curr_clock == 0 && ss.issued_this_cycle == 1;
+
+ first = first_jump_index (ss.curr_sched_clock);
+ if (first == -1)
+ {
+ first_cycle = 0;
+ first_cond = NULL_RTX;
+ }
+ else
+ {
+ first_cycle = get_jump_cycle (first);
+ first_cond = get_jump_cond (first);
+ }
+ if (icode >= 0
+ && first_cycle > curr_clock
+ && first_cond != NULL_RTX
+ && (curr_clock + get_attr_cycles (insn) > first_cycle
+ || get_attr_type (insn) == TYPE_BRANCH
+ || get_attr_type (insn) == TYPE_CALL))
+ INSN_INFO_ENTRY (uid).new_cond = first_cond;
+
+ maybe_clobber_cond (insn, curr_clock);
+
+ if (icode >= 0)
+ {
+ int i, cycles;
+
+ c6x_registers_update (insn);
+ memcpy (ss.reg_n_accesses, ss.tmp_reg_n_accesses,
+ sizeof ss.reg_n_accesses);
+ memcpy (ss.reg_n_xaccesses, ss.tmp_reg_n_accesses,
+ sizeof ss.reg_n_xaccesses);
+
+ cycles = get_attr_cycles (insn);
+ if (ss.delays_finished_at < ss.curr_sched_clock + cycles)
+ ss.delays_finished_at = ss.curr_sched_clock + cycles;
+ if (get_attr_type (insn) == TYPE_BRANCH
+ || get_attr_type (insn) == TYPE_CALL)
+ {
+ rtx opposite = condjump_opposite_condition (insn);
+ record_jump (ss.curr_sched_clock + cycles, opposite);
+ }
+
+ /* Mark the cycles in which the destination registers are written.
+ This is used for calculating stalls when using cross units. */
+ extract_insn (insn);
+ /* Cross-path stalls don't apply to results of load insns. */
+ if (get_attr_type (insn) == TYPE_LOAD
+ || get_attr_type (insn) == TYPE_LOADN
+ || get_attr_type (insn) == TYPE_LOAD_SHADOW)
+ cycles--;
+ for (i = 0; i < recog_data.n_operands; i++)
+ {
+ rtx op = recog_data.operand[i];
+ if (MEM_P (op))
+ {
+ rtx addr = XEXP (op, 0);
+ if (GET_RTX_CLASS (GET_CODE (addr)) == RTX_AUTOINC)
+ c6x_mark_reg_written (XEXP (addr, 0),
+ insn_uid_get_clock (uid) + 1);
+ }
+ if (recog_data.operand_type[i] != OP_IN
+ && REG_P (op))
+ {
+ c6x_mark_reg_written (op,
+ insn_uid_get_clock (uid) + cycles);
+ }
+ }
+ }
+ }
+ return can_issue_more;
+}
+
+/* Implement the TARGET_SCHED_ADJUST_COST hook. We need special handling for
+ anti- and output dependencies. */
+
+static int
+c6x_adjust_cost (rtx insn, rtx link, rtx dep_insn, int cost)
+{
+ enum attr_type insn_type = TYPE_UNKNOWN, dep_insn_type = TYPE_UNKNOWN;
+ int dep_insn_code_number, insn_code_number;
+ int shadow_bonus = 0;
+ enum reg_note kind;
+ dep_insn_code_number = recog_memoized (dep_insn);
+ insn_code_number = recog_memoized (insn);
+
+ if (dep_insn_code_number >= 0)
+ dep_insn_type = get_attr_type (dep_insn);
+
+ if (insn_code_number >= 0)
+ insn_type = get_attr_type (insn);
+
+ kind = REG_NOTE_KIND (link);
+ if (kind == 0)
+ {
+ /* If we have a dependency on a load, and it's not for the result of
+ the load, it must be for an autoincrement. Reduce the cost in that
+ case. */
+ if (dep_insn_type == TYPE_LOAD)
+ {
+ rtx set = PATTERN (dep_insn);
+ if (GET_CODE (set) == COND_EXEC)
+ set = COND_EXEC_CODE (set);
+ if (GET_CODE (set) == UNSPEC)
+ cost = 1;
+ else
+ {
+ gcc_assert (GET_CODE (set) == SET);
+ if (!reg_overlap_mentioned_p (SET_DEST (set), PATTERN (insn)))
+ cost = 1;
+ }
+ }
+ }
+
+ /* A jump shadow needs to have its latency decreased by one. Conceptually,
+ it occurs in between two cycles, but we schedule it at the end of the
+ first cycle. */
+ if (shadow_type_p (insn_type))
+ shadow_bonus = 1;
+
+ /* Anti and output dependencies usually have zero cost, but we want
+ to insert a stall after a jump, and after certain floating point
+ insns that take more than one cycle to read their inputs. In the
+ future, we should try to find a better algorithm for scheduling
+ jumps. */
+ if (kind != 0)
+ {
+ /* We can get anti-dependencies against shadow insns. Treat these
+ like output dependencies, so that the insn is entirely finished
+ before the branch takes place. */
+ if (kind == REG_DEP_ANTI && insn_type == TYPE_SHADOW)
+ kind = REG_DEP_OUTPUT;
+ switch (dep_insn_type)
+ {
+ case TYPE_CALLP:
+ return 1;
+ case TYPE_BRANCH:
+ case TYPE_CALL:
+ if (get_attr_has_shadow (dep_insn) == HAS_SHADOW_Y)
+ /* This is a real_jump/real_call insn. These don't have
+ outputs, and ensuring the validity of scheduling things
+ in the delay slot is the job of
+ c6x_sched_reorder_1. */
+ return 0;
+ /* Unsplit calls can happen - e.g. for divide insns. */
+ return 6;
+ case TYPE_LOAD:
+ case TYPE_LOADN:
+ case TYPE_INTDP:
+ if (kind == REG_DEP_OUTPUT)
+ return 5 - shadow_bonus;
+ return 0;
+ case TYPE_MPY4:
+ case TYPE_FP4:
+ if (kind == REG_DEP_OUTPUT)
+ return 4 - shadow_bonus;
+ return 0;
+ case TYPE_MPY2:
+ if (kind == REG_DEP_OUTPUT)
+ return 2 - shadow_bonus;
+ return 0;
+ case TYPE_CMPDP:
+ if (kind == REG_DEP_OUTPUT)
+ return 2 - shadow_bonus;
+ return 2;
+ case TYPE_ADDDP:
+ case TYPE_MPYSPDP:
+ if (kind == REG_DEP_OUTPUT)
+ return 7 - shadow_bonus;
+ return 2;
+ case TYPE_MPYSP2DP:
+ if (kind == REG_DEP_OUTPUT)
+ return 5 - shadow_bonus;
+ return 2;
+ case TYPE_MPYI:
+ if (kind == REG_DEP_OUTPUT)
+ return 9 - shadow_bonus;
+ return 4;
+ case TYPE_MPYID:
+ case TYPE_MPYDP:
+ if (kind == REG_DEP_OUTPUT)
+ return 10 - shadow_bonus;
+ return 4;
+
+ default:
+ if (insn_type == TYPE_SPKERNEL)
+ return 0;
+ if (kind == REG_DEP_OUTPUT)
+ return 1 - shadow_bonus;
+
+ return 0;
+ }
+ }
+
+ return cost - shadow_bonus;
+}
+\f
+/* Create a SEQUENCE rtx to replace the instructions in SLOT, of which there
+ are N_FILLED. REAL_FIRST identifies the slot if the insn that appears
+ first in the original stream. */
+
+static void
+gen_one_bundle (rtx *slot, int n_filled, int real_first)
+{
+ rtx bundle;
+ rtx t;
+ int i;
+
+ bundle = gen_rtx_SEQUENCE (VOIDmode, gen_rtvec_v (n_filled, slot));
+ bundle = make_insn_raw (bundle);
+ BLOCK_FOR_INSN (bundle) = BLOCK_FOR_INSN (slot[0]);
+ INSN_LOCATOR (bundle) = INSN_LOCATOR (slot[0]);
+ PREV_INSN (bundle) = PREV_INSN (slot[real_first]);
+
+ t = NULL_RTX;
+
+ for (i = 0; i < n_filled; i++)
+ {
+ rtx insn = slot[i];
+ remove_insn (insn);
+ PREV_INSN (insn) = t ? t : PREV_INSN (bundle);
+ if (t != NULL_RTX)
+ NEXT_INSN (t) = insn;
+ t = insn;
+ if (i > 0)
+ INSN_LOCATOR (slot[i]) = INSN_LOCATOR (bundle);
+ }
+
+ NEXT_INSN (bundle) = NEXT_INSN (PREV_INSN (bundle));
+ NEXT_INSN (t) = NEXT_INSN (bundle);
+ NEXT_INSN (PREV_INSN (bundle)) = bundle;
+ PREV_INSN (NEXT_INSN (bundle)) = bundle;
+}
+
+/* Move all parallel instructions into SEQUENCEs, so that no subsequent passes
+ try to insert labels in the middle. */
+
+static void
+c6x_gen_bundles (void)
+{
+ basic_block bb;
+ rtx insn, next, last_call;
+
+ FOR_EACH_BB (bb)
+ {
+ rtx insn, next;
+ /* The machine is eight insns wide. We can have up to six shadow
+ insns, plus an extra slot for merging the jump shadow. */
+ rtx slot[15];
+ int n_filled = 0;
+ int first_slot = 0;
+
+ for (insn = BB_HEAD (bb);; insn = next)
+ {
+ int at_end;
+ rtx delete_this = NULL_RTX;
+
+ if (NONDEBUG_INSN_P (insn))
+ {
+ /* Put calls at the start of the sequence. */
+ if (CALL_P (insn))
+ {
+ first_slot++;
+ if (n_filled)
+ {
+ memmove (&slot[1], &slot[0],
+ n_filled * sizeof (slot[0]));
+ }
+ if (!shadow_p (insn))
+ {
+ PUT_MODE (insn, TImode);
+ if (n_filled)
+ PUT_MODE (slot[1], VOIDmode);
+ }
+ n_filled++;
+ slot[0] = insn;
+ }
+ else
+ {
+ slot[n_filled++] = insn;
+ }
+ }
+
+ next = NEXT_INSN (insn);
+ while (next && insn != BB_END (bb)
+ && !(NONDEBUG_INSN_P (next)
+ && GET_CODE (PATTERN (next)) != USE
+ && GET_CODE (PATTERN (next)) != CLOBBER))
+ {
+ insn = next;
+ next = NEXT_INSN (insn);
+ }
+
+ at_end = insn == BB_END (bb);
+ if (delete_this == NULL_RTX
+ && (at_end || (GET_MODE (next) == TImode
+ && !(shadow_p (next) && CALL_P (next)))))
+ {
+ if (n_filled >= 2)
+ gen_one_bundle (slot, n_filled, first_slot);
+
+ n_filled = 0;
+ first_slot = 0;
+ }
+ if (at_end)
+ break;
+ }
+ }
+ /* Bundling, and emitting nops, can separate
+ NOTE_INSN_CALL_ARG_LOCATION from the corresponding calls. Fix
+ that up here. */
+ last_call = NULL_RTX;
+ for (insn = get_insns (); insn; insn = next)
+ {
+ next = NEXT_INSN (insn);
+ if (CALL_P (insn)
+ || (INSN_P (insn) && GET_CODE (PATTERN (insn)) == SEQUENCE
+ && CALL_P (XVECEXP (PATTERN (insn), 0, 0))))
+ last_call = insn;
+ if (!NOTE_P (insn) || NOTE_KIND (insn) != NOTE_INSN_CALL_ARG_LOCATION)
+ continue;
+ if (NEXT_INSN (last_call) == insn)
+ continue;
+ NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
+ PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
+ PREV_INSN (insn) = last_call;
+ NEXT_INSN (insn) = NEXT_INSN (last_call);
+ PREV_INSN (NEXT_INSN (insn)) = insn;
+ NEXT_INSN (PREV_INSN (insn)) = insn;
+ last_call = insn;
+ }
+}
+
+/* Emit a NOP instruction for CYCLES cycles after insn AFTER. Return it. */
+
+static rtx
+emit_nop_after (int cycles, rtx after)
+{
+ rtx insn;
+
+ /* mpydp has 9 delay slots, and we may schedule a stall for a cross-path
+ operation. We don't need the extra NOP since in this case, the hardware
+ will automatically insert the required stall. */
+ if (cycles == 10)
+ cycles--;
+
+ gcc_assert (cycles < 10);
+
+ insn = emit_insn_after (gen_nop_count (GEN_INT (cycles)), after);
+ PUT_MODE (insn, TImode);
+
+ return insn;
+}
+
+/* Determine whether INSN is a call that needs to have a return label
+ placed. */
+
+static bool
+returning_call_p (rtx insn)
+{
+ if (CALL_P (insn))
+ return (!SIBLING_CALL_P (insn)
+ && get_attr_type (insn) != TYPE_CALLP
+ && get_attr_type (insn) != TYPE_SHADOW);
+ if (recog_memoized (insn) < 0)
+ return false;
+ if (get_attr_type (insn) == TYPE_CALL)
+ return true;
+ return false;
+}
+
+/* Determine whether INSN's pattern can be converted to use callp. */
+static bool
+can_use_callp (rtx insn)
+{
+ int icode = recog_memoized (insn);
+ if (!TARGET_INSNS_64PLUS
+ || icode < 0
+ || GET_CODE (PATTERN (insn)) == COND_EXEC)
+ return false;
+
+ return ((icode == CODE_FOR_real_call
+ || icode == CODE_FOR_call_internal
+ || icode == CODE_FOR_call_value_internal)
+ && get_attr_dest_regfile (insn) == DEST_REGFILE_ANY);
+}
+
+/* Convert the pattern of INSN, which must be a CALL_INSN, into a callp. */
+static void
+convert_to_callp (rtx insn)
+{
+ rtx lab;
+ extract_insn (insn);
+ if (GET_CODE (PATTERN (insn)) == SET)
+ {
+ rtx dest = recog_data.operand[0];
+ lab = recog_data.operand[1];
+ PATTERN (insn) = gen_callp_value (dest, lab);
+ INSN_CODE (insn) = CODE_FOR_callp_value;
+ }
+ else
+ {
+ lab = recog_data.operand[0];
+ PATTERN (insn) = gen_callp (lab);
+ INSN_CODE (insn) = CODE_FOR_callp;
+ }
+}
+
+/* Scan forwards from INSN until we find the next insn that has mode TImode
+ (indicating it starts a new cycle), and occurs in cycle CLOCK.
+ Return it if we find such an insn, NULL_RTX otherwise. */
+static rtx
+find_next_cycle_insn (rtx insn, int clock)
+{
+ rtx t = insn;
+ if (GET_MODE (t) == TImode)
+ t = next_real_insn (t);
+ while (t && GET_MODE (t) != TImode)
+ t = next_real_insn (t);
+
+ if (t && insn_get_clock (t) == clock)
+ return t;
+ return NULL_RTX;
+}
+
+/* If COND_INSN has a COND_EXEC condition, wrap the same condition
+ around PAT. Return PAT either unchanged or modified in this
+ way. */
+static rtx
+duplicate_cond (rtx pat, rtx cond_insn)
+{
+ rtx cond_pat = PATTERN (cond_insn);
+ if (GET_CODE (cond_pat) == COND_EXEC)
+ pat = gen_rtx_COND_EXEC (VOIDmode, copy_rtx (COND_EXEC_TEST (cond_pat)),
+ pat);
+ return pat;
+}
+
+/* Walk forward from INSN to find the last insn that issues in the same clock
+ cycle. */
+static rtx
+find_last_same_clock (rtx insn)
+{
+ rtx retval = insn;
+ rtx t = next_real_insn (insn);
+
+ while (t && GET_MODE (t) != TImode)
+ {
+ if (!DEBUG_INSN_P (t) && recog_memoized (t) >= 0)
+ retval = t;
+ t = next_real_insn (t);
+ }
+ return retval;
+}
+
+/* For every call insn in the function, emit code to load the return
+ address. For each call we create a return label and store it in
+ CALL_LABELS. If are not scheduling, we emit the labels here,
+ otherwise the caller will do it later.
+ This function is called after final insn scheduling, but before creating
+ the SEQUENCEs that represent execute packets. */
+
+static void
+reorg_split_calls (rtx *call_labels)
+{
+ unsigned int reservation_mask = 0;
+ rtx insn = get_insns ();
+ gcc_assert (GET_CODE (insn) == NOTE);
+ insn = next_real_insn (insn);
+ while (insn)
+ {
+ int uid;
+ rtx next = next_real_insn (insn);
+
+ if (DEBUG_INSN_P (insn))
+ goto done;
+
+ if (GET_MODE (insn) == TImode)
+ reservation_mask = 0;
+ uid = INSN_UID (insn);
+ if (c6x_flag_schedule_insns2 && recog_memoized (insn) >= 0)
+ reservation_mask |= 1 << INSN_INFO_ENTRY (uid).reservation;
+
+ if (returning_call_p (insn))
+ {
+ rtx label = gen_label_rtx ();
+ rtx labelref = gen_rtx_LABEL_REF (Pmode, label);
+ rtx reg = gen_rtx_REG (SImode, RETURN_ADDR_REGNO);
+
+ LABEL_NUSES (label) = 2;
+ if (!c6x_flag_schedule_insns2)
+ {
+ if (can_use_callp (insn))
+ convert_to_callp (insn);
+ else
+ {
+ rtx t;
+ rtx slot[4];
+ emit_label_after (label, insn);
+
+ /* Bundle the call and its delay slots into a single
+ SEQUENCE. While these do not issue in parallel
+ we need to group them into a single EH region. */
+ slot[0] = insn;
+ PUT_MODE (insn, TImode);
+ if (TARGET_INSNS_64)
+ {
+ t = gen_addkpc (reg, labelref, GEN_INT (4));
+ slot[1] = emit_insn_after (duplicate_cond (t, insn),
+ insn);
+ PUT_MODE (slot[1], TImode);
+ gen_one_bundle (slot, 2, 0);
+ }
+ else
+ {
+ slot[3] = emit_insn_after (gen_nop_count (GEN_INT (3)),
+ insn);
+ PUT_MODE (slot[3], TImode);
+ t = gen_movsi_lo_sum (reg, reg, labelref);
+ slot[2] = emit_insn_after (duplicate_cond (t, insn),
+ insn);
+ PUT_MODE (slot[2], TImode);
+ t = gen_movsi_high (reg, labelref);
+ slot[1] = emit_insn_after (duplicate_cond (t, insn),
+ insn);
+ PUT_MODE (slot[1], TImode);
+ gen_one_bundle (slot, 4, 0);
+ }
+ }
+ }
+ else
+ {
+ /* If we scheduled, we reserved the .S2 unit for one or two
+ cycles after the call. Emit the insns in these slots,
+ unless it's possible to create a CALLP insn.
+ Note that this works because the dependencies ensure that
+ no insn setting/using B3 is scheduled in the delay slots of
+ a call. */
+ int this_clock = insn_get_clock (insn);
+ rtx last_same_clock;
+ rtx after1;
+
+ call_labels[INSN_UID (insn)] = label;
+
+ last_same_clock = find_last_same_clock (insn);
+
+ if (can_use_callp (insn))
+ {
+ /* Find the first insn of the next execute packet. If it
+ is outside the branch delay slots of this call, we may
+ use a CALLP insn. */
+ rtx next_cycle_start = next_nonnote_nondebug_insn (last_same_clock);
+
+ if (CALL_P (next_cycle_start)
+ && (insn_get_clock (next_cycle_start) == this_clock + 5))
+ {
+ convert_to_callp (next_cycle_start);
+ insn_set_clock (next_cycle_start, this_clock);
+ if (GET_MODE (insn) == TImode)
+ {
+ rtx new_cycle_first = NEXT_INSN (insn);
+ while (!NONDEBUG_INSN_P (new_cycle_first)
+ || GET_CODE (PATTERN (new_cycle_first)) == USE
+ || GET_CODE (PATTERN (new_cycle_first)) == CLOBBER)
+ new_cycle_first = NEXT_INSN (new_cycle_first);
+ PUT_MODE (new_cycle_first, TImode);
+ if (new_cycle_first != next_cycle_start)
+ PUT_MODE (next_cycle_start, VOIDmode);
+ INSN_INFO_ENTRY (INSN_UID (new_cycle_first)).ebb_start
+ = INSN_INFO_ENTRY (INSN_UID (insn)).ebb_start;
+ }
+ else
+ PUT_MODE (next_cycle_start, VOIDmode);
+ delete_insn (insn);
+ goto done;
+ }
+ }
+ after1 = find_next_cycle_insn (last_same_clock, this_clock + 1);
+ if (after1 == NULL_RTX)
+ after1 = last_same_clock;
+ else
+ after1 = find_last_same_clock (after1);
+ if (TARGET_INSNS_64)
+ {
+ rtx x1 = gen_addkpc (reg, labelref, const0_rtx);
+ x1 = emit_insn_after (duplicate_cond (x1, insn), after1);
+ insn_set_clock (x1, this_clock + 1);
+ INSN_INFO_ENTRY (INSN_UID (x1)).reservation = RESERVATION_S2;
+ if (after1 == last_same_clock)
+ PUT_MODE (x1, TImode);
+ }
+ else
+ {
+ rtx x1, x2;
+ rtx after2 = find_next_cycle_insn (after1, this_clock + 2);
+ if (after2 == NULL_RTX)
+ after2 = after1;
+ x2 = gen_movsi_lo_sum (reg, reg, labelref);
+ x2 = emit_insn_after (duplicate_cond (x2, insn), after2);
+ x1 = gen_movsi_high (reg, labelref);
+ x1 = emit_insn_after (duplicate_cond (x1, insn), after1);
+ insn_set_clock (x1, this_clock + 1);
+ insn_set_clock (x2, this_clock + 2);
+ INSN_INFO_ENTRY (INSN_UID (x1)).reservation = RESERVATION_S2;
+ INSN_INFO_ENTRY (INSN_UID (x2)).reservation = RESERVATION_S2;
+ if (after1 == last_same_clock)
+ PUT_MODE (x1, TImode);
+ if (after1 == after2)
+ PUT_MODE (x2, TImode);
+ }
+ }
+ }
+ done:
+ insn = next;
+ }
+}
+
+/* Called as part of c6x_reorg. This function emits multi-cycle NOP
+ insns as required for correctness. CALL_LABELS is the array that
+ holds the return labels for call insns; we emit these here if
+ scheduling was run earlier. */
+
+static void
+reorg_emit_nops (rtx *call_labels)
+{
+ bool first;
+ rtx prev, last_call;
+ int prev_clock, earliest_bb_end;
+ int prev_implicit_nops;
+ rtx insn = get_insns ();
+
+ /* We look at one insn (or bundle inside a sequence) in each iteration, storing
+ its issue time in PREV_CLOCK for the next iteration. If there is a gap in
+ clocks, we must insert a NOP.
+ EARLIEST_BB_END tracks in which cycle all insns that have been issued in the
+ current basic block will finish. We must not allow the next basic block to
+ begin before this cycle.
+ PREV_IMPLICIT_NOPS tells us whether we've seen an insn that implicitly contains
+ a multi-cycle nop. The code is scheduled such that subsequent insns will
+ show the cycle gap, but we needn't insert a real NOP instruction. */
+ insn = next_real_insn (insn);
+ last_call = prev = NULL_RTX;
+ prev_clock = -1;
+ earliest_bb_end = 0;
+ prev_implicit_nops = 0;
+ first = true;
+ while (insn)
+ {
+ int this_clock = -1;
+ rtx next;
+ int max_cycles = 0;
+
+ next = next_real_insn (insn);
+
+ if (DEBUG_INSN_P (insn)
+ || GET_CODE (PATTERN (insn)) == USE
+ || GET_CODE (PATTERN (insn)) == CLOBBER
+ || shadow_or_blockage_p (insn)
+ || (JUMP_P (insn)
+ && (GET_CODE (PATTERN (insn)) == ADDR_DIFF_VEC
+ || GET_CODE (PATTERN (insn)) == ADDR_VEC)))
+ goto next_insn;
+
+ if (!c6x_flag_schedule_insns2)
+ /* No scheduling; ensure that no parallel issue happens. */
+ PUT_MODE (insn, TImode);
+ else
+ {
+ int cycles;
+
+ this_clock = insn_get_clock (insn);
+ if (this_clock != prev_clock)
+ {
+ PUT_MODE (insn, TImode);
+
+ if (!first)
+ {
+ cycles = this_clock - prev_clock;
+
+ cycles -= prev_implicit_nops;
+ if (cycles > 1)
+ {
+ rtx nop = emit_nop_after (cycles - 1, prev);
+ insn_set_clock (nop, prev_clock + prev_implicit_nops + 1);
+ }
+ }
+ prev_clock = this_clock;
+
+ if (last_call
+ && insn_get_clock (last_call) + 6 <= this_clock)
+ {
+ emit_label_before (call_labels[INSN_UID (last_call)], insn);
+ last_call = NULL_RTX;
+ }
+ prev_implicit_nops = 0;
+ }
+ }
+
+ /* Examine how many cycles the current insn takes, and adjust
+ LAST_CALL, EARLIEST_BB_END and PREV_IMPLICIT_NOPS. */
+ if (recog_memoized (insn) >= 0
+ /* If not scheduling, we've emitted NOPs after calls already. */
+ && (c6x_flag_schedule_insns2 || !returning_call_p (insn)))
+ {
+ max_cycles = get_attr_cycles (insn);
+ if (get_attr_type (insn) == TYPE_CALLP)
+ prev_implicit_nops = 5;
+ }
+ else
+ max_cycles = 1;
+ if (returning_call_p (insn))
+ last_call = insn;
+
+ if (c6x_flag_schedule_insns2)
+ {
+ gcc_assert (this_clock >= 0);
+ if (earliest_bb_end < this_clock + max_cycles)
+ earliest_bb_end = this_clock + max_cycles;
+ }
+ else if (max_cycles > 1)
+ emit_nop_after (max_cycles - 1, insn);
+
+ prev = insn;
+ first = false;
+
+ next_insn:
+ if (c6x_flag_schedule_insns2
+ && (next == NULL_RTX
+ || (GET_MODE (next) == TImode
+ && INSN_INFO_ENTRY (INSN_UID (next)).ebb_start))
+ && earliest_bb_end > 0)
+ {
+ int cycles = earliest_bb_end - prev_clock;
+ if (cycles > 1)
+ {
+ prev = emit_nop_after (cycles - 1, prev);
+ insn_set_clock (prev, prev_clock + prev_implicit_nops + 1);
+ }
+ earliest_bb_end = 0;
+ prev_clock = -1;
+ first = true;
+
+ if (last_call)
+ emit_label_after (call_labels[INSN_UID (last_call)], prev);
+ last_call = NULL_RTX;
+ }
+ insn = next;
+ }
+}
+
+/* If possible, split INSN, which we know is either a jump or a call, into a real
+ insn and its shadow. */
+static void
+split_delayed_branch (rtx insn)
+{
+ int code = recog_memoized (insn);
+ rtx i1, newpat;
+ rtx pat = PATTERN (insn);
+
+ if (GET_CODE (pat) == COND_EXEC)
+ pat = COND_EXEC_CODE (pat);
+
+ if (CALL_P (insn))
+ {
+ rtx src = pat, dest = NULL_RTX;
+ rtx callee;
+ if (GET_CODE (pat) == SET)
+ {
+ dest = SET_DEST (pat);
+ src = SET_SRC (pat);
+ }
+ callee = XEXP (XEXP (src, 0), 0);
+ if (SIBLING_CALL_P (insn))
+ {
+ if (REG_P (callee))
+ newpat = gen_indirect_sibcall_shadow ();
+ else
+ newpat = gen_sibcall_shadow (callee);
+ pat = gen_real_jump (callee);
+ }
+ else if (dest != NULL_RTX)
+ {
+ if (REG_P (callee))
+ newpat = gen_indirect_call_value_shadow (dest);
+ else
+ newpat = gen_call_value_shadow (dest, callee);
+ pat = gen_real_call (callee);
+ }
+ else
+ {
+ if (REG_P (callee))
+ newpat = gen_indirect_call_shadow ();
+ else
+ newpat = gen_call_shadow (callee);
+ pat = gen_real_call (callee);
+ }
+ pat = duplicate_cond (pat, insn);
+ newpat = duplicate_cond (newpat, insn);
+ }
+ else
+ {
+ rtx src, op;
+ if (GET_CODE (pat) == PARALLEL
+ && GET_CODE (XVECEXP (pat, 0, 0)) == RETURN)
+ {
+ newpat = gen_return_shadow ();
+ pat = gen_real_ret (XEXP (XVECEXP (pat, 0, 1), 0));
+ newpat = duplicate_cond (newpat, insn);
+ }
+ else
+ switch (code)
+ {
+ case CODE_FOR_br_true:
+ case CODE_FOR_br_false:
+ src = SET_SRC (pat);
+ op = XEXP (src, code == CODE_FOR_br_true ? 1 : 2);
+ newpat = gen_condjump_shadow (op);
+ pat = gen_real_jump (op);
+ if (code == CODE_FOR_br_true)
+ pat = gen_rtx_COND_EXEC (VOIDmode, XEXP (src, 0), pat);
+ else
+ pat = gen_rtx_COND_EXEC (VOIDmode,
+ reversed_comparison (XEXP (src, 0),
+ VOIDmode),
+ pat);
+ break;
+
+ case CODE_FOR_jump:
+ op = SET_SRC (pat);
+ newpat = gen_jump_shadow (op);
+ break;
+
+ case CODE_FOR_indirect_jump:
+ newpat = gen_indirect_jump_shadow ();
+ break;
+
+ case CODE_FOR_return_internal:
+ newpat = gen_return_shadow ();
+ pat = gen_real_ret (XEXP (XVECEXP (pat, 0, 1), 0));
+ break;
+
+ default:
+ return;
+ }
+ }
+ i1 = emit_insn_before (pat, insn);
+ PATTERN (insn) = newpat;
+ INSN_CODE (insn) = -1;
+ record_delay_slot_pair (i1, insn, 5);
+}
+
+/* Split every insn (i.e. jumps and calls) which can have delay slots into
+ two parts: the first one is scheduled normally and emits the instruction,
+ while the second one is a shadow insn which shows the side effect taking
+ place. The second one is placed in the right cycle by the scheduler, but
+ not emitted as an assembly instruction. */
+
+static void
+split_delayed_insns (void)
+{
+ rtx insn;
+ for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
+ {
+ if (JUMP_P (insn) || CALL_P (insn))
+ split_delayed_branch (insn);
+ }
+}
+
+/* For every insn that has an entry in the new_conditions vector, give it
+ the appropriate predicate. */
+static void
+conditionalize_after_sched (void)
+{
+ basic_block bb;
+ rtx insn;
+ FOR_EACH_BB (bb)
+ FOR_BB_INSNS (bb, insn)
+ {
+ unsigned uid = INSN_UID (insn);
+ rtx cond;
+ if (!NONDEBUG_INSN_P (insn) || uid >= INSN_INFO_LENGTH)
+ continue;
+ cond = INSN_INFO_ENTRY (uid).new_cond;
+ if (cond == NULL_RTX)
+ continue;
+ if (dump_file)
+ fprintf (dump_file, "Conditionalizing insn %d\n", uid);
+ predicate_insn (insn, cond, true);
+ }
+}
+
+/* Implement the TARGET_MACHINE_DEPENDENT_REORG pass. We split call insns here
+ into a sequence that loads the return register and performs the call,
+ and emit the return label.
+ If scheduling after reload is requested, it happens here. */
+
+static void
+c6x_reorg (void)
+{
+ basic_block bb;
+ rtx *call_labels;
+ bool do_selsched = (c6x_flag_schedule_insns2 && flag_selective_scheduling2
+ && !maybe_skip_selective_scheduling ());
+
+ /* We are freeing block_for_insn in the toplev to keep compatibility
+ with old MDEP_REORGS that are not CFG based. Recompute it now. */
+ compute_bb_for_insn ();
+
+ df_clear_flags (DF_LR_RUN_DCE);
+
+ /* If optimizing, we'll have split before scheduling. */
+ if (optimize == 0)
+ split_all_insns ();
+
+ if (c6x_flag_schedule_insns2)
+ {
+ int sz = get_max_uid () * 3 / 2 + 1;
+
+ insn_info = VEC_alloc (c6x_sched_insn_info, heap, sz);
+
+ /* Make sure the real-jump insns we create are not deleted. */
+ sched_no_dce = true;
+
+ split_delayed_insns ();
+ timevar_push (TV_SCHED2);
+ if (do_selsched)
+ run_selective_scheduling ();
+ else
+ schedule_ebbs ();
+ conditionalize_after_sched ();
+ timevar_pop (TV_SCHED2);
+
+ free_delay_pairs ();
+ sched_no_dce = false;
+ }
+
+ call_labels = XCNEWVEC (rtx, get_max_uid () + 1);
+
+ reorg_split_calls (call_labels);
+
+ if (c6x_flag_schedule_insns2)
+ {
+ FOR_EACH_BB (bb)
+ if ((bb->flags & BB_DISABLE_SCHEDULE) == 0)
+ assign_reservations (BB_HEAD (bb), BB_END (bb));
+ }
+
+ if (c6x_flag_var_tracking)
+ {
+ timevar_push (TV_VAR_TRACKING);
+ variable_tracking_main ();
+ timevar_pop (TV_VAR_TRACKING);
+ }
+
+ reorg_emit_nops (call_labels);
+
+ /* Post-process the schedule to move parallel insns into SEQUENCEs. */
+ if (c6x_flag_schedule_insns2)
+ {
+ free_delay_pairs ();
+ c6x_gen_bundles ();
+ }
+
+ df_finish_pass (false);
+}
+
+/* Called when a function has been assembled. It should perform all the
+ tasks of ASM_DECLARE_FUNCTION_SIZE in elfos.h, plus target-specific
+ tasks.
+ We free the reservation (and other scheduling) information here now that
+ all insns have been output. */
+void
+c6x_function_end (FILE *file, const char *fname)
+{
+ c6x_output_fn_unwind (file);
+
+ if (insn_info)
+ VEC_free (c6x_sched_insn_info, heap, insn_info);
+ insn_info = NULL;
+
+ if (!flag_inhibit_size_directive)
+ ASM_OUTPUT_MEASURED_SIZE (file, fname);
+}
+\f
+/* Determine whether X is a shift with code CODE and an integer amount
+ AMOUNT. */
+static bool
+shift_p (rtx x, enum rtx_code code, int amount)
+{
+ return (GET_CODE (x) == code && GET_CODE (XEXP (x, 1)) == CONST_INT
+ && INTVAL (XEXP (x, 1)) == amount);
+}
+
+/* Compute a (partial) cost for rtx X. Return true if the complete
+ cost has been computed, and false if subexpressions should be
+ scanned. In either case, *TOTAL contains the cost result. */
+
+static bool
+c6x_rtx_costs (rtx x, int code, int outer_code, int *total, bool speed)
+{
+ int cost2 = COSTS_N_INSNS (1);
+ rtx op0, op1;
+
+ switch (code)
+ {
+ case CONST_INT:
+ if (outer_code == SET || outer_code == PLUS)
+ *total = satisfies_constraint_IsB (x) ? 0 : cost2;
+ else if (outer_code == AND || outer_code == IOR || outer_code == XOR
+ || outer_code == MINUS)
+ *total = satisfies_constraint_Is5 (x) ? 0 : cost2;
+ else if (GET_RTX_CLASS (outer_code) == RTX_COMPARE
+ || GET_RTX_CLASS (outer_code) == RTX_COMM_COMPARE)
+ *total = satisfies_constraint_Iu4 (x) ? 0 : cost2;
+ else if (outer_code == ASHIFT || outer_code == ASHIFTRT
+ || outer_code == LSHIFTRT)
+ *total = satisfies_constraint_Iu5 (x) ? 0 : cost2;
+ else
+ *total = cost2;
+ return true;
+
+ case CONST:
+ case LABEL_REF:
+ case SYMBOL_REF:
+ case CONST_DOUBLE:
+ *total = COSTS_N_INSNS (2);
+ return true;
+
+ case TRUNCATE:
+ /* Recognize a mult_highpart operation. */
+ if ((GET_MODE (x) == HImode || GET_MODE (x) == SImode)
+ && GET_CODE (XEXP (x, 0)) == LSHIFTRT
+ && GET_MODE (XEXP (x, 0)) == GET_MODE_2XWIDER_MODE (GET_MODE (x))
+ && GET_CODE (XEXP (XEXP (x, 0), 0)) == MULT
+ && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
+ && INTVAL (XEXP (XEXP (x, 0), 1)) == GET_MODE_BITSIZE (GET_MODE (x)))
+ {
+ rtx mul = XEXP (XEXP (x, 0), 0);
+ rtx op0 = XEXP (mul, 0);
+ rtx op1 = XEXP (mul, 1);
+ enum rtx_code code0 = GET_CODE (op0);
+ enum rtx_code code1 = GET_CODE (op1);
+
+ if ((code0 == code1
+ && (code0 == SIGN_EXTEND || code0 == ZERO_EXTEND))
+ || (GET_MODE (x) == HImode
+ && code0 == ZERO_EXTEND && code1 == SIGN_EXTEND))
+ {
+ if (GET_MODE (x) == HImode)
+ *total = COSTS_N_INSNS (2);
+ else
+ *total = COSTS_N_INSNS (12);
+ *total += rtx_cost (XEXP (op0, 0), code0, speed);
+ *total += rtx_cost (XEXP (op1, 0), code1, speed);
+ return true;
+ }
+ }
+ return false;
+
+ case ASHIFT:
+ case ASHIFTRT:
+ case LSHIFTRT:
+ if (GET_MODE (x) == DImode)
+ *total = COSTS_N_INSNS (CONSTANT_P (XEXP (x, 1)) ? 4 : 15);
+ else
+ *total = COSTS_N_INSNS (1);
+ return false;
+
+ case PLUS:
+ case MINUS:
+ *total = COSTS_N_INSNS (1);
+ op0 = code == PLUS ? XEXP (x, 0) : XEXP (x, 1);
+ op1 = code == PLUS ? XEXP (x, 1) : XEXP (x, 0);
+ if (GET_MODE_SIZE (GET_MODE (x)) <= UNITS_PER_WORD
+ && INTEGRAL_MODE_P (GET_MODE (x))
+ && GET_CODE (op0) == MULT
+ && GET_CODE (XEXP (op0, 1)) == CONST_INT
+ && (INTVAL (XEXP (op0, 1)) == 2
+ || INTVAL (XEXP (op0, 1)) == 4
+ || (code == PLUS && INTVAL (XEXP (op0, 1)) == 8)))
+ {
+ *total += rtx_cost (XEXP (op0, 0), ASHIFT, speed);
+ *total += rtx_cost (op1, (enum rtx_code)code, speed);
+ return true;
+ }
+ return false;
+
+ case MULT:
+ op0 = XEXP (x, 0);
+ op1 = XEXP (x, 1);
+ if (GET_MODE (x) == DFmode)
+ {
+ if (TARGET_FP)
+ *total = COSTS_N_INSNS (speed ? 10 : 1);
+ else
+ *total = COSTS_N_INSNS (speed ? 200 : 4);
+ }
+ else if (GET_MODE (x) == SFmode)
+ {
+ if (TARGET_FP)
+ *total = COSTS_N_INSNS (speed ? 4 : 1);
+ else
+ *total = COSTS_N_INSNS (speed ? 100 : 4);
+ }
+ else if (GET_MODE (x) == DImode)
+ {
+ if (TARGET_MPY32
+ && GET_CODE (op0) == GET_CODE (op1)
+ && (GET_CODE (op0) == ZERO_EXTEND
+ || GET_CODE (op0) == SIGN_EXTEND))
+ {
+ *total = COSTS_N_INSNS (speed ? 2 : 1);
+ op0 = XEXP (op0, 0);
+ op1 = XEXP (op1, 0);
+ }
+ else
+ /* Maybe improve this laster. */
+ *total = COSTS_N_INSNS (20);
+ }
+ else if (GET_MODE (x) == SImode)
+ {
+ if (((GET_CODE (op0) == ZERO_EXTEND
+ || GET_CODE (op0) == SIGN_EXTEND
+ || shift_p (op0, LSHIFTRT, 16))
+ && (GET_CODE (op1) == SIGN_EXTEND
+ || GET_CODE (op1) == ZERO_EXTEND
+ || scst5_operand (op1, SImode)
+ || shift_p (op1, ASHIFTRT, 16)
+ || shift_p (op1, LSHIFTRT, 16)))
+ || (shift_p (op0, ASHIFTRT, 16)
+ && (GET_CODE (op1) == SIGN_EXTEND
+ || shift_p (op1, ASHIFTRT, 16))))
+ {
+ *total = COSTS_N_INSNS (speed ? 2 : 1);
+ op0 = XEXP (op0, 0);
+ if (scst5_operand (op1, SImode))
+ op1 = NULL_RTX;
+ else
+ op1 = XEXP (op1, 0);
+ }
+ else if (!speed)
+ *total = COSTS_N_INSNS (1);
+ else if (TARGET_MPY32)
+ *total = COSTS_N_INSNS (4);
+ else
+ *total = COSTS_N_INSNS (6);
+ }
+ else if (GET_MODE (x) == HImode)
+ *total = COSTS_N_INSNS (speed ? 2 : 1);
+
+ if (GET_CODE (op0) != REG
+ && (GET_CODE (op0) != SUBREG || GET_CODE (SUBREG_REG (op0)) != REG))
+ *total += rtx_cost (op0, MULT, speed);
+ if (op1 && GET_CODE (op1) != REG
+ && (GET_CODE (op1) != SUBREG || GET_CODE (SUBREG_REG (op1)) != REG))
+ *total += rtx_cost (op1, MULT, speed);
+ return true;
+
+ case UDIV:
+ case DIV:
+ /* This is a bit random; assuming on average there'll be 16 leading
+ zeros. FIXME: estimate better for constant dividends. */
+ *total = COSTS_N_INSNS (6 + 3 * 16);
+ return false;
+
+ case IF_THEN_ELSE:
+ /* Recognize the cmp_and/ior patterns. */
+ op0 = XEXP (x, 0);
+ if ((GET_CODE (op0) == EQ || GET_CODE (op0) == NE)
+ && REG_P (XEXP (op0, 0))
+ && XEXP (op0, 1) == const0_rtx
+ && rtx_equal_p (XEXP (x, 1), XEXP (op0, 0)))
+ {
+ *total = rtx_cost (XEXP (x, 1), (enum rtx_code)outer_code, speed);
+ return false;
+ }
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+/* Implements target hook vector_mode_supported_p. */
+
+static bool
+c6x_vector_mode_supported_p (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case V2HImode:
+ case V4QImode:
+ case V2SImode:
+ case V4HImode:
+ case V8QImode:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Implements TARGET_VECTORIZE_PREFERRED_SIMD_MODE. */
+static enum machine_mode
+c6x_preferred_simd_mode (enum machine_mode mode)
+{
+ switch (mode)
+ {
+ case HImode:
+ return V2HImode;
+ case QImode:
+ return V4QImode;
+
+ default:
+ return word_mode;
+ }
+}
+
+/* Implement TARGET_SCALAR_MODE_SUPPORTED_P. */
+
+static bool
+c6x_scalar_mode_supported_p (enum machine_mode mode)
+{
+ if (ALL_FIXED_POINT_MODE_P (mode)
+ && GET_MODE_PRECISION (mode) <= 2 * BITS_PER_WORD)
+ return true;
+
+ return default_scalar_mode_supported_p (mode);
+}
+
+/* Output a reference from a function exception table to the type_info
+ object X. Output these via a special assembly directive. */
+
+static bool
+c6x_output_ttype (rtx x)
+{
+ /* Use special relocations for symbol references. */
+ if (GET_CODE (x) != CONST_INT)
+ fputs ("\t.ehtype\t", asm_out_file);
+ else
+ fputs ("\t.word\t", asm_out_file);
+ output_addr_const (asm_out_file, x);
+ fputc ('\n', asm_out_file);
+
+ return TRUE;
+}
+
+/* Modify the return address of the current function. */
+
+void
+c6x_set_return_address (rtx source, rtx scratch)
+{
+ struct c6x_frame frame;
+ rtx addr;
+ HOST_WIDE_INT offset;
+
+ c6x_compute_frame_layout (&frame);
+ if (! c6x_save_reg (RETURN_ADDR_REGNO))
+ emit_move_insn (gen_rtx_REG (Pmode, RETURN_ADDR_REGNO), source);
+ else
+ {
+
+ if (frame_pointer_needed)
+ {
+ addr = hard_frame_pointer_rtx;
+ offset = frame.b3_offset;
+ }
+ else
+ {
+ addr = stack_pointer_rtx;
+ offset = frame.to_allocate - frame.b3_offset;
+ }
+
+ /* TODO: Use base+offset loads where possible. */
+ if (offset)
+ {
+ HOST_WIDE_INT low = trunc_int_for_mode (offset, HImode);
+
+ emit_insn (gen_movsi_high (scratch, GEN_INT (low)));
+ if (low != offset)
+ emit_insn (gen_movsi_lo_sum (scratch, scratch, GEN_INT(offset)));
+ emit_insn (gen_addsi3 (scratch, addr, scratch));
+ addr = scratch;
+ }
+
+ emit_move_insn (gen_frame_mem (Pmode, addr), source);
+ }
+}
+
+/* We save pairs of registers using a DImode store. Describe the component
+ registers for DWARF generation code. */
+
+static rtx
+c6x_dwarf_register_span (rtx rtl)
+{
+ unsigned regno;
+ unsigned real_regno;
+ int nregs;
+ int i;
+ rtx p;
+
+ regno = REGNO (rtl);
+ nregs = HARD_REGNO_NREGS (regno, GET_MODE (rtl));
+ if (nregs == 1)
+ return NULL_RTX;
+
+ p = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc(nregs));
+ for (i = 0; i < nregs; i++)
+ {
+ if (TARGET_BIG_ENDIAN)
+ real_regno = regno + nregs - (i + 1);
+ else
+ real_regno = regno + i;
+
+ XVECEXP (p, 0, i) = gen_rtx_REG (SImode, real_regno);
+ }
+
+ return p;
+}
+\f
+/* Codes for all the C6X builtins. */
+enum c6x_builtins
+{
+ C6X_BUILTIN_SADD,
+ C6X_BUILTIN_SSUB,
+ C6X_BUILTIN_ADD2,
+ C6X_BUILTIN_SUB2,
+ C6X_BUILTIN_ADD4,
+ C6X_BUILTIN_SUB4,
+ C6X_BUILTIN_SADD2,
+ C6X_BUILTIN_SSUB2,
+ C6X_BUILTIN_SADDU4,
+
+ C6X_BUILTIN_SMPY,
+ C6X_BUILTIN_SMPYH,
+ C6X_BUILTIN_SMPYHL,
+ C6X_BUILTIN_SMPYLH,
+ C6X_BUILTIN_MPY2,
+ C6X_BUILTIN_SMPY2,
+
+ C6X_BUILTIN_CLRR,
+ C6X_BUILTIN_EXTR,
+ C6X_BUILTIN_EXTRU,
+
+ C6X_BUILTIN_SSHL,
+ C6X_BUILTIN_SUBC,
+ C6X_BUILTIN_ABS,
+ C6X_BUILTIN_ABS2,
+ C6X_BUILTIN_AVG2,
+ C6X_BUILTIN_AVGU4,
+
+ C6X_BUILTIN_MAX
+};
+
+
+static GTY(()) tree c6x_builtin_decls[C6X_BUILTIN_MAX];
+
+/* Return the C6X builtin for CODE. */
+static tree
+c6x_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
+{
+ if (code >= C6X_BUILTIN_MAX)
+ return error_mark_node;
+
+ return c6x_builtin_decls[code];
+}
+
+#define def_builtin(NAME, TYPE, CODE) \
+do { \
+ tree bdecl; \
+ bdecl = add_builtin_function ((NAME), (TYPE), (CODE), BUILT_IN_MD, \
+ NULL, NULL_TREE); \
+ c6x_builtin_decls[CODE] = bdecl; \
+} while (0)
+
+/* Set up all builtin functions for this target. */
+static void
+c6x_init_builtins (void)
+{
+ tree V4QI_type_node = build_vector_type (unsigned_intQI_type_node, 4);
+ tree V2HI_type_node = build_vector_type (intHI_type_node, 2);
+ tree V2SI_type_node = build_vector_type (intSI_type_node, 2);
+ tree int_ftype_int
+ = build_function_type_list (integer_type_node, integer_type_node,
+ NULL_TREE);
+ tree int_ftype_int_int
+ = build_function_type_list (integer_type_node, integer_type_node,
+ integer_type_node, NULL_TREE);
+ tree v2hi_ftype_v2hi
+ = build_function_type_list (V2HI_type_node, V2HI_type_node, NULL_TREE);
+ tree v4qi_ftype_v4qi_v4qi
+ = build_function_type_list (V4QI_type_node, V4QI_type_node,
+ V4QI_type_node, NULL_TREE);
+ tree v2hi_ftype_v2hi_v2hi
+ = build_function_type_list (V2HI_type_node, V2HI_type_node,
+ V2HI_type_node, NULL_TREE);
+ tree v2si_ftype_v2hi_v2hi
+ = build_function_type_list (V2SI_type_node, V2HI_type_node,
+ V2HI_type_node, NULL_TREE);
+
+ def_builtin ("__builtin_c6x_sadd", int_ftype_int_int,
+ C6X_BUILTIN_SADD);
+ def_builtin ("__builtin_c6x_ssub", int_ftype_int_int,
+ C6X_BUILTIN_SSUB);
+ def_builtin ("__builtin_c6x_add2", v2hi_ftype_v2hi_v2hi,
+ C6X_BUILTIN_ADD2);
+ def_builtin ("__builtin_c6x_sub2", v2hi_ftype_v2hi_v2hi,
+ C6X_BUILTIN_SUB2);
+ def_builtin ("__builtin_c6x_add4", v4qi_ftype_v4qi_v4qi,
+ C6X_BUILTIN_ADD4);
+ def_builtin ("__builtin_c6x_sub4", v4qi_ftype_v4qi_v4qi,
+ C6X_BUILTIN_SUB4);
+ def_builtin ("__builtin_c6x_mpy2", v2si_ftype_v2hi_v2hi,
+ C6X_BUILTIN_MPY2);
+ def_builtin ("__builtin_c6x_sadd2", v2hi_ftype_v2hi_v2hi,
+ C6X_BUILTIN_SADD2);
+ def_builtin ("__builtin_c6x_ssub2", v2hi_ftype_v2hi_v2hi,
+ C6X_BUILTIN_SSUB2);
+ def_builtin ("__builtin_c6x_saddu4", v4qi_ftype_v4qi_v4qi,
+ C6X_BUILTIN_SADDU4);
+ def_builtin ("__builtin_c6x_smpy2", v2si_ftype_v2hi_v2hi,
+ C6X_BUILTIN_SMPY2);
+
+ def_builtin ("__builtin_c6x_smpy", int_ftype_int_int,
+ C6X_BUILTIN_SMPY);
+ def_builtin ("__builtin_c6x_smpyh", int_ftype_int_int,
+ C6X_BUILTIN_SMPYH);
+ def_builtin ("__builtin_c6x_smpyhl", int_ftype_int_int,
+ C6X_BUILTIN_SMPYHL);
+ def_builtin ("__builtin_c6x_smpylh", int_ftype_int_int,
+ C6X_BUILTIN_SMPYLH);
+
+ def_builtin ("__builtin_c6x_sshl", int_ftype_int_int,
+ C6X_BUILTIN_SSHL);
+ def_builtin ("__builtin_c6x_subc", int_ftype_int_int,
+ C6X_BUILTIN_SUBC);
+
+ def_builtin ("__builtin_c6x_avg2", v2hi_ftype_v2hi_v2hi,
+ C6X_BUILTIN_AVG2);
+ def_builtin ("__builtin_c6x_avgu4", v4qi_ftype_v4qi_v4qi,
+ C6X_BUILTIN_AVGU4);
+
+ def_builtin ("__builtin_c6x_clrr", int_ftype_int_int,
+ C6X_BUILTIN_CLRR);
+ def_builtin ("__builtin_c6x_extr", int_ftype_int_int,
+ C6X_BUILTIN_EXTR);
+ def_builtin ("__builtin_c6x_extru", int_ftype_int_int,
+ C6X_BUILTIN_EXTRU);
+
+ def_builtin ("__builtin_c6x_abs", int_ftype_int, C6X_BUILTIN_ABS);
+ def_builtin ("__builtin_c6x_abs2", v2hi_ftype_v2hi, C6X_BUILTIN_ABS2);
+}
+
+
+struct builtin_description
+{
+ const enum insn_code icode;
+ const char *const name;
+ const enum c6x_builtins code;
+};
+
+static const struct builtin_description bdesc_2arg[] =
+{
+ { CODE_FOR_saddsi3, "__builtin_c6x_sadd", C6X_BUILTIN_SADD },
+ { CODE_FOR_ssubsi3, "__builtin_c6x_ssub", C6X_BUILTIN_SSUB },
+ { CODE_FOR_addv2hi3, "__builtin_c6x_add2", C6X_BUILTIN_ADD2 },
+ { CODE_FOR_subv2hi3, "__builtin_c6x_sub2", C6X_BUILTIN_SUB2 },
+ { CODE_FOR_addv4qi3, "__builtin_c6x_add4", C6X_BUILTIN_ADD4 },
+ { CODE_FOR_subv4qi3, "__builtin_c6x_sub4", C6X_BUILTIN_SUB4 },
+ { CODE_FOR_ss_addv2hi3, "__builtin_c6x_sadd2", C6X_BUILTIN_SADD2 },
+ { CODE_FOR_ss_subv2hi3, "__builtin_c6x_ssub2", C6X_BUILTIN_SSUB2 },
+ { CODE_FOR_us_addv4qi3, "__builtin_c6x_saddu4", C6X_BUILTIN_SADDU4 },
+
+ { CODE_FOR_subcsi3, "__builtin_c6x_subc", C6X_BUILTIN_SUBC },
+ { CODE_FOR_ss_ashlsi3, "__builtin_c6x_sshl", C6X_BUILTIN_SSHL },
+
+ { CODE_FOR_avgv2hi3, "__builtin_c6x_avg2", C6X_BUILTIN_AVG2 },
+ { CODE_FOR_uavgv4qi3, "__builtin_c6x_avgu4", C6X_BUILTIN_AVGU4 },
+
+ { CODE_FOR_mulhqsq3, "__builtin_c6x_smpy", C6X_BUILTIN_SMPY },
+ { CODE_FOR_mulhqsq3_hh, "__builtin_c6x_smpyh", C6X_BUILTIN_SMPYH },
+ { CODE_FOR_mulhqsq3_lh, "__builtin_c6x_smpylh", C6X_BUILTIN_SMPYLH },
+ { CODE_FOR_mulhqsq3_hl, "__builtin_c6x_smpyhl", C6X_BUILTIN_SMPYHL },
+
+ { CODE_FOR_mulv2hqv2sq3, "__builtin_c6x_smpy2", C6X_BUILTIN_SMPY2 },
+
+ { CODE_FOR_clrr, "__builtin_c6x_clrr", C6X_BUILTIN_CLRR },
+ { CODE_FOR_extr, "__builtin_c6x_extr", C6X_BUILTIN_EXTR },
+ { CODE_FOR_extru, "__builtin_c6x_extru", C6X_BUILTIN_EXTRU }
+};
+
+static const struct builtin_description bdesc_1arg[] =
+{
+ { CODE_FOR_ssabssi2, "__builtin_c6x_abs", C6X_BUILTIN_ABS },
+ { CODE_FOR_ssabsv2hi2, "__builtin_c6x_abs2", C6X_BUILTIN_ABS2 }
+};
+
+/* Errors in the source file can cause expand_expr to return const0_rtx
+ where we expect a vector. To avoid crashing, use one of the vector
+ clear instructions. */
+static rtx
+safe_vector_operand (rtx x, enum machine_mode mode)
+{
+ if (x != const0_rtx)
+ return x;
+ x = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (x, CONST0_RTX (SImode)));
+ return gen_lowpart (mode, x);
+}
+
+/* Subroutine of c6x_expand_builtin to take care of binop insns. MACFLAG is -1
+ if this is a normal binary op, or one of the MACFLAG_xxx constants. */
+
+static rtx
+c6x_expand_binop_builtin (enum insn_code icode, tree exp, rtx target,
+ bool match_op)
+{
+ int offs = match_op ? 1 : 0;
+ rtx pat;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ tree arg1 = CALL_EXPR_ARG (exp, 1);
+ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ rtx op1 = expand_expr (arg1, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ enum machine_mode op0mode = GET_MODE (op0);
+ enum machine_mode op1mode = GET_MODE (op1);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1 + offs].mode;
+ enum machine_mode mode1 = insn_data[icode].operand[2 + offs].mode;
+ rtx ret = target;
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+ if (VECTOR_MODE_P (mode1))
+ op1 = safe_vector_operand (op1, mode1);
+
+ if (! target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ {
+ if (tmode == SQmode || tmode == V2SQmode)
+ {
+ ret = gen_reg_rtx (tmode == SQmode ? SImode : V2SImode);
+ target = gen_lowpart (tmode, ret);
+ }
+ else
+ target = gen_reg_rtx (tmode);
+ }
+
+ if ((op0mode == V2HImode || op0mode == SImode || op0mode == VOIDmode)
+ && (mode0 == V2HQmode || mode0 == HQmode || mode0 == SQmode))
+ {
+ op0mode = mode0;
+ op0 = gen_lowpart (mode0, op0);
+ }
+ if ((op1mode == V2HImode || op1mode == SImode || op1mode == VOIDmode)
+ && (mode1 == V2HQmode || mode1 == HQmode || mode1 == SQmode))
+ {
+ op1mode = mode1;
+ op1 = gen_lowpart (mode1, op1);
+ }
+ /* In case the insn wants input operands in modes different from
+ the result, abort. */
+ gcc_assert ((op0mode == mode0 || op0mode == VOIDmode)
+ && (op1mode == mode1 || op1mode == VOIDmode));
+
+ if (! (*insn_data[icode].operand[1 + offs].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+ if (! (*insn_data[icode].operand[2 + offs].predicate) (op1, mode1))
+ op1 = copy_to_mode_reg (mode1, op1);
+
+ if (match_op)
+ pat = GEN_FCN (icode) (target, target, op0, op1);
+ else
+ pat = GEN_FCN (icode) (target, op0, op1);
+
+ if (! pat)
+ return 0;
+
+ emit_insn (pat);
+
+ return ret;
+}
+
+/* Subroutine of c6x_expand_builtin to take care of unop insns. */
+
+static rtx
+c6x_expand_unop_builtin (enum insn_code icode, tree exp,
+ rtx target)
+{
+ rtx pat;
+ tree arg0 = CALL_EXPR_ARG (exp, 0);
+ rtx op0 = expand_expr (arg0, NULL_RTX, VOIDmode, EXPAND_NORMAL);
+ enum machine_mode op0mode = GET_MODE (op0);
+ enum machine_mode tmode = insn_data[icode].operand[0].mode;
+ enum machine_mode mode0 = insn_data[icode].operand[1].mode;
+
+ if (! target
+ || GET_MODE (target) != tmode
+ || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
+ target = gen_reg_rtx (tmode);
+
+ if (VECTOR_MODE_P (mode0))
+ op0 = safe_vector_operand (op0, mode0);
+
+ if (op0mode == SImode && mode0 == HImode)
+ {
+ op0mode = HImode;
+ op0 = gen_lowpart (HImode, op0);
+ }
+ gcc_assert (op0mode == mode0 || op0mode == VOIDmode);
+
+ if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
+ op0 = copy_to_mode_reg (mode0, op0);
+
+ pat = GEN_FCN (icode) (target, op0);
+ if (! pat)
+ return 0;
+ emit_insn (pat);
+ return target;
+}
+
+/* Expand an expression EXP that calls a built-in function,
+ with result going to TARGET if that's convenient
+ (and in mode MODE if that's convenient).
+ SUBTARGET may be used as the target for computing one of EXP's operands.
+ IGNORE is nonzero if the value is to be ignored. */
+
+static rtx
+c6x_expand_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
+ rtx subtarget ATTRIBUTE_UNUSED,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ int ignore ATTRIBUTE_UNUSED)
+{
+ size_t i;
+ const struct builtin_description *d;
+ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+ unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
+
+ for (i = 0, d = bdesc_2arg; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
+ if (d->code == fcode)
+ return c6x_expand_binop_builtin (d->icode, exp, target,
+ fcode == C6X_BUILTIN_CLRR);
+
+ for (i = 0, d = bdesc_1arg; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
+ if (d->code == fcode)
+ return c6x_expand_unop_builtin (d->icode, exp, target);
+
+ gcc_unreachable ();
+}
+\f
+/* Target Structure. */
+
+/* Initialize the GCC target structure. */
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG c6x_function_arg
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE c6x_function_arg_advance
+#undef TARGET_FUNCTION_ARG_BOUNDARY
+#define TARGET_FUNCTION_ARG_BOUNDARY c6x_function_arg_boundary
+#undef TARGET_FUNCTION_ARG_ROUND_BOUNDARY
+#define TARGET_FUNCTION_ARG_ROUND_BOUNDARY \
+ c6x_function_arg_round_boundary
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P c6x_function_value_regno_p
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE c6x_function_value
+#undef TARGET_LIBCALL_VALUE
+#define TARGET_LIBCALL_VALUE c6x_libcall_value
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY c6x_return_in_memory
+#undef TARGET_RETURN_IN_MSB
+#define TARGET_RETURN_IN_MSB c6x_return_in_msb
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE c6x_pass_by_reference
+#undef TARGET_CALLEE_COPIES
+#define TARGET_CALLEE_COPIES c6x_callee_copies
+#undef TARGET_STRUCT_VALUE_RTX
+#define TARGET_STRUCT_VALUE_RTX c6x_struct_value_rtx
+#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+#define TARGET_FUNCTION_OK_FOR_SIBCALL c6x_function_ok_for_sibcall
+
+#undef TARGET_ASM_OUTPUT_MI_THUNK
+#define TARGET_ASM_OUTPUT_MI_THUNK c6x_output_mi_thunk
+#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+#define TARGET_ASM_CAN_OUTPUT_MI_THUNK c6x_can_output_mi_thunk
+
+#undef TARGET_BUILD_BUILTIN_VA_LIST
+#define TARGET_BUILD_BUILTIN_VA_LIST c6x_build_builtin_va_list
+
+#undef TARGET_ASM_TRAMPOLINE_TEMPLATE
+#define TARGET_ASM_TRAMPOLINE_TEMPLATE c6x_asm_trampoline_template
+#undef TARGET_TRAMPOLINE_INIT
+#define TARGET_TRAMPOLINE_INIT c6x_initialize_trampoline
+
+#undef TARGET_LEGITIMATE_CONSTANT_P
+#define TARGET_LEGITIMATE_CONSTANT_P c6x_legitimate_constant_p
+#undef TARGET_LEGITIMATE_ADDRESS_P
+#define TARGET_LEGITIMATE_ADDRESS_P c6x_legitimate_address_p
+
+#undef TARGET_IN_SMALL_DATA_P
+#define TARGET_IN_SMALL_DATA_P c6x_in_small_data_p
+#undef TARGET_ASM_SELECT_RTX_SECTION
+#define TARGET_ASM_SELECT_RTX_SECTION c6x_select_rtx_section
+#undef TARGET_ASM_SELECT_SECTION
+#define TARGET_ASM_SELECT_SECTION c6x_elf_select_section
+#undef TARGET_ASM_UNIQUE_SECTION
+#define TARGET_ASM_UNIQUE_SECTION c6x_elf_unique_section
+#undef TARGET_SECTION_TYPE_FLAGS
+#define TARGET_SECTION_TYPE_FLAGS c6x_section_type_flags
+#undef TARGET_HAVE_SRODATA_SECTION
+#define TARGET_HAVE_SRODATA_SECTION true
+#undef TARGET_ASM_MERGEABLE_RODATA_PREFIX
+#define TARGET_ASM_MERGEABLE_RODATA_PREFIX ".const"
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE c6x_option_override
+#undef TARGET_CONDITIONAL_REGISTER_USAGE
+#define TARGET_CONDITIONAL_REGISTER_USAGE c6x_conditional_register_usage
+
+#undef TARGET_INIT_LIBFUNCS
+#define TARGET_INIT_LIBFUNCS c6x_init_libfuncs
+#undef TARGET_LIBFUNC_GNU_PREFIX
+#define TARGET_LIBFUNC_GNU_PREFIX true
+
+#undef TARGET_SCALAR_MODE_SUPPORTED_P
+#define TARGET_SCALAR_MODE_SUPPORTED_P c6x_scalar_mode_supported_p
+#undef TARGET_VECTOR_MODE_SUPPORTED_P
+#define TARGET_VECTOR_MODE_SUPPORTED_P c6x_vector_mode_supported_p
+#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
+#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE c6x_preferred_simd_mode
+
+#undef TARGET_RTX_COSTS
+#define TARGET_RTX_COSTS c6x_rtx_costs
+
+#undef TARGET_SCHED_INIT
+#define TARGET_SCHED_INIT c6x_sched_init
+#undef TARGET_SCHED_SET_SCHED_FLAGS
+#define TARGET_SCHED_SET_SCHED_FLAGS c6x_set_sched_flags
+#undef TARGET_SCHED_ADJUST_COST
+#define TARGET_SCHED_ADJUST_COST c6x_adjust_cost
+#undef TARGET_SCHED_ISSUE_RATE
+#define TARGET_SCHED_ISSUE_RATE c6x_issue_rate
+#undef TARGET_SCHED_VARIABLE_ISSUE
+#define TARGET_SCHED_VARIABLE_ISSUE c6x_variable_issue
+#undef TARGET_SCHED_REORDER
+#define TARGET_SCHED_REORDER c6x_sched_reorder
+#undef TARGET_SCHED_REORDER2
+#define TARGET_SCHED_REORDER2 c6x_sched_reorder2
+#undef TARGET_SCHED_EXPOSED_PIPELINE
+#define TARGET_SCHED_EXPOSED_PIPELINE true
+
+#undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
+#define TARGET_SCHED_ALLOC_SCHED_CONTEXT c6x_alloc_sched_context
+#undef TARGET_SCHED_INIT_SCHED_CONTEXT
+#define TARGET_SCHED_INIT_SCHED_CONTEXT c6x_init_sched_context
+#undef TARGET_SCHED_SET_SCHED_CONTEXT
+#define TARGET_SCHED_SET_SCHED_CONTEXT c6x_set_sched_context
+#undef TARGET_SCHED_FREE_SCHED_CONTEXT
+#define TARGET_SCHED_FREE_SCHED_CONTEXT c6x_free_sched_context
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE c6x_can_eliminate
+
+#undef TARGET_PREFERRED_RENAME_CLASS
+#define TARGET_PREFERRED_RENAME_CLASS c6x_preferred_rename_class
+
+#undef TARGET_MACHINE_DEPENDENT_REORG
+#define TARGET_MACHINE_DEPENDENT_REORG c6x_reorg
+
+#undef TARGET_ASM_FILE_START
+#define TARGET_ASM_FILE_START c6x_file_start
+
+#undef TARGET_PRINT_OPERAND
+#define TARGET_PRINT_OPERAND c6x_print_operand
+#undef TARGET_PRINT_OPERAND_ADDRESS
+#define TARGET_PRINT_OPERAND_ADDRESS c6x_print_operand_address
+#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
+#define TARGET_PRINT_OPERAND_PUNCT_VALID_P c6x_print_operand_punct_valid_p
+
+/* C6x unwinding tables use a different format for the typeinfo tables. */
+#undef TARGET_ASM_TTYPE
+#define TARGET_ASM_TTYPE c6x_output_ttype
+
+#undef TARGET_DWARF_REGISTER_SPAN
+#define TARGET_DWARF_REGISTER_SPAN c6x_dwarf_register_span
+
+#undef TARGET_INIT_BUILTINS
+#define TARGET_INIT_BUILTINS c6x_init_builtins
+#undef TARGET_EXPAND_BUILTIN
+#define TARGET_EXPAND_BUILTIN c6x_expand_builtin
+#undef TARGET_BUILTIN_DECL
+#define TARGET_BUILTIN_DECL c6x_builtin_decl
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-c6x.h"
--- /dev/null
+/* Target Definitions for TI C6X.
+ Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Andrew Jenner <andrew@codesourcery.com>
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_C6X_H
+#define GCC_C6X_H
+
+/* Feature bit definitions that enable specific insns. */
+#define C6X_INSNS_C62X 1
+#define C6X_INSNS_C64X 2
+#define C6X_INSNS_C64XP 4
+#define C6X_INSNS_C67X 8
+#define C6X_INSNS_C67XP 16
+#define C6X_INSNS_C674X 32
+#define C6X_INSNS_ATOMIC 64
+#define C6X_INSNS_ALL_CPU_BITS 127
+
+#define C6X_DEFAULT_INSN_MASK \
+ (C6X_INSNS_C62X | C6X_INSNS_C64X | C6X_INSNS_C64XP)
+
+/* A mask of allowed insn types, as defined above. */
+extern unsigned long c6x_insn_mask;
+
+/* Value of -march= */
+extern c6x_cpu_t c6x_arch;
+#define C6X_DEFAULT_ARCH C6X_CPU_C64XP
+
+/* True if the target has C64x instructions. */
+#define TARGET_INSNS_64 ((c6x_insn_mask & C6X_INSNS_C64X) != 0)
+/* True if the target has C64x+ instructions. */
+#define TARGET_INSNS_64PLUS ((c6x_insn_mask & C6X_INSNS_C64XP) != 0)
+/* True if the target has C67x instructions. */
+#define TARGET_INSNS_67 ((c6x_insn_mask & C6X_INSNS_C67X) != 0)
+/* True if the target has C67x+ instructions. */
+#define TARGET_INSNS_67PLUS ((c6x_insn_mask & C6X_INSNS_C67XP) != 0)
+
+/* True if the target supports doubleword loads. */
+#define TARGET_LDDW (TARGET_INSNS_64 || TARGET_INSNS_67)
+/* True if the target supports doubleword loads. */
+#define TARGET_STDW TARGET_INSNS_64
+/* True if the target supports the MPY32 family of instructions. */
+#define TARGET_MPY32 TARGET_INSNS_64PLUS
+/* True if the target has floating point hardware. */
+#define TARGET_FP TARGET_INSNS_67
+/* True if the target has C67x+ floating point extensions. */
+#define TARGET_FP_EXT TARGET_INSNS_67PLUS
+
+#define TARGET_DEFAULT 0
+
+/* Run-time Target. */
+
+#define TARGET_CPU_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_assert ("machine=tic6x"); \
+ builtin_assert ("cpu=tic6x"); \
+ builtin_define ("__TMS320C6X__"); \
+ builtin_define ("_TMS320C6X"); \
+ \
+ if (TARGET_DSBT) \
+ builtin_define ("__DSBT__"); \
+ \
+ if (TARGET_BIG_ENDIAN) \
+ builtin_define ("_BIG_ENDIAN"); \
+ else \
+ builtin_define ("_LITTLE_ENDIAN"); \
+ \
+ switch (c6x_arch) \
+ { \
+ case C6X_CPU_C62X: \
+ builtin_define ("_TMS320C6200"); \
+ break; \
+ \
+ case C6X_CPU_C64XP: \
+ builtin_define ("_TMS320C6400_PLUS"); \
+ /* ... fall through ... */ \
+ case C6X_CPU_C64X: \
+ builtin_define ("_TMS320C6400"); \
+ break; \
+ \
+ case C6X_CPU_C67XP: \
+ builtin_define ("_TMS320C6700_PLUS"); \
+ /* ... fall through ... */ \
+ case C6X_CPU_C67X: \
+ builtin_define ("_TMS320C6700"); \
+ break; \
+ \
+ case C6X_CPU_C674X: \
+ builtin_define ("_TMS320C6740"); \
+ builtin_define ("_TMS320C6700_PLUS"); \
+ builtin_define ("_TMS320C6700"); \
+ builtin_define ("_TMS320C6400_PLUS"); \
+ builtin_define ("_TMS320C6400"); \
+ break; \
+ } \
+ } while (0)
+
+#define OPTION_DEFAULT_SPECS \
+ {"arch", "%{!march=*:-march=%(VALUE)}" }
+
+/* Storage Layout. */
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0)
+#define WORDS_BIG_ENDIAN (TARGET_BIG_ENDIAN != 0)
+
+#define REG_WORDS_BIG_ENDIAN 0
+
+#define UNITS_PER_WORD 4
+#define PARM_BOUNDARY 8
+#define STACK_BOUNDARY 64
+#define FUNCTION_BOUNDARY 32
+#define BIGGEST_ALIGNMENT 64
+#define STRICT_ALIGNMENT 1
+
+/* The ABI requires static arrays must be at least 8 byte aligned.
+ Really only externally visible arrays must be aligned this way, as
+ only those are directly visible from another compilation unit. But
+ we don't have that information available here. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (((ALIGN) < BITS_PER_UNIT * 8 && TREE_CODE (TYPE) == ARRAY_TYPE) \
+ ? BITS_PER_UNIT * 8 : (ALIGN))
+
+/* Type Layout. */
+
+#define DEFAULT_SIGNED_CHAR 1
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+/* Registers. */
+
+#define FIRST_PSEUDO_REGISTER 67
+#define FIXED_REGISTERS \
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, \
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1}
+#define CALL_USED_REGISTERS \
+ { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, \
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, \
+ 1, 1, 1}
+
+/* This lists call-used non-predicate registers first, followed by call-used
+ registers, followed by predicate registers. We want to avoid allocating
+ the predicate registers for other uses as much as possible. */
+#define REG_ALLOC_ORDER \
+ { \
+ REG_A0, REG_A3, REG_A4, REG_A5, REG_A6, REG_A7, REG_A8, REG_A9, \
+ REG_A16, REG_A17, REG_A18, REG_A19, REG_A20, REG_A21, REG_A22, REG_A23, \
+ REG_A24, REG_A25, REG_A26, REG_A27, REG_A28, REG_A29, REG_A30, REG_A31, \
+ REG_B4, REG_B5, REG_B6, REG_B7, REG_B8, REG_B9, REG_B16, \
+ REG_B17, REG_B18, REG_B19, REG_B20, REG_B21, REG_B22, REG_B23, REG_B24, \
+ REG_B25, REG_B26, REG_B27, REG_B28, REG_B29, REG_B30, REG_B31, \
+ REG_A10, REG_A11, REG_A12, REG_A13, REG_A14, REG_A15, \
+ REG_B3, REG_B10, REG_B11, REG_B12, REG_B13, REG_B14, REG_B15, \
+ REG_A1, REG_A2, REG_B0, REG_B1, REG_B2, REG_ILC \
+ }
+
+#define HARD_REGNO_NREGS(regno, mode) \
+ ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+#define HARD_REGNO_MODE_OK(reg, mode) (GET_MODE_SIZE (mode) <= UNITS_PER_WORD \
+ ? 1 : ((reg) & 1) == 0)
+
+#define MODES_TIEABLE_P(mode1, mode2) \
+ ((mode1) == (mode2) || \
+ (GET_MODE_SIZE (mode1) <= UNITS_PER_WORD && \
+ GET_MODE_SIZE (mode2) <= UNITS_PER_WORD))
+
+
+/* Register Classes. */
+
+enum reg_class
+ {
+ NO_REGS,
+ PREDICATE_A_REGS,
+ PREDICATE_B_REGS,
+ PREDICATE_REGS,
+ PICREG,
+ SPREG,
+ CALL_USED_B_REGS,
+ NONPREDICATE_A_REGS,
+ NONPREDICATE_B_REGS,
+ NONPREDICATE_REGS,
+ A_REGS,
+ B_REGS,
+ GENERAL_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+ };
+
+#define N_REG_CLASSES (int) LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES { \
+ "NO_REGS", \
+ "PREDICATE_A_REGS", \
+ "PREDICATE_B_REGS", \
+ "PREDICATE_REGS", \
+ "PICREG", \
+ "SPREG", \
+ "CALL_USED_B_REGS", \
+ "NONPREDICATE_A_REGS", \
+ "NONPREDICATE_B_REGS", \
+ "NONPREDICATE_REGS", \
+ "A_REGS", \
+ "B_REGS", \
+ "GENERAL_REGS", \
+ "ALL_REGS" }
+
+#define REG_CLASS_CONTENTS \
+{ \
+ /* NO_REGS. */ \
+ { 0x00000000, 0x00000000, 0 }, \
+ /* PREDICATE_A_REGS. */ \
+ { 0x00000006, 0x00000000, 0 }, \
+ /* PREDICATE_B_REGS. */ \
+ { 0x00000000, 0x00000007, 0 }, \
+ /* PREDICATE_REGS. */ \
+ { 0x00000006, 0x00000007, 0 }, \
+ /* PICREG. */ \
+ { 0x00000000, 0x00004000, 0 }, \
+ /* SPREG. */ \
+ { 0x00000000, 0x00008000, 0 }, \
+ /* CALL_USED_B_REGS. */ \
+ { 0x00000000, 0xFFFF03FF, 0 }, \
+ /* NONPREDICATE_A_REGS. */ \
+ { 0xFFFFFFF9, 0x00000000, 0 }, \
+ /* NONPREDICATE_B_REGS. */ \
+ { 0x00000000, 0xFFFFFFF8, 0 }, \
+ /* NONPREDICATE_REGS. */ \
+ { 0xFFFFFFF9, 0xFFFFFFF8, 0 }, \
+ /* A_REGS. */ \
+ { 0xFFFFFFFF, 0x00000000, 3 }, \
+ /* B_REGS. */ \
+ { 0x00000000, 0xFFFFFFFF, 3 }, \
+ /* GENERAL_REGS. */ \
+ { 0xFFFFFFFF, 0xFFFFFFFF, 3 }, \
+ /* ALL_REGS. */ \
+ { 0xFFFFFFFF, 0xFFFFFFFF, 7 }, \
+}
+
+#define A_REGNO_P(N) ((N) <= REG_A31)
+#define B_REGNO_P(N) ((N) >= REG_B0 && (N) <= REG_B31)
+
+#define A_REG_P(X) (REG_P (X) && A_REGNO_P (REGNO (X)))
+#define CROSS_OPERANDS(X0,X1) \
+ (A_REG_P (X0) == A_REG_P (X1) ? CROSS_N : CROSS_Y)
+
+#define REGNO_REG_CLASS(reg) \
+ ((reg) >= REG_A1 && (reg) <= REG_A2 ? PREDICATE_A_REGS \
+ : (reg) == REG_A0 && TARGET_INSNS_64 ? PREDICATE_A_REGS \
+ : (reg) >= REG_B0 && (reg) <= REG_B2 ? PREDICATE_B_REGS \
+ : A_REGNO_P (reg) ? NONPREDICATE_A_REGS \
+ : call_used_regs[reg] ? CALL_USED_B_REGS : B_REGS)
+
+#define BASE_REG_CLASS ALL_REGS
+#define INDEX_REG_CLASS ALL_REGS
+
+#define REGNO_OK_FOR_BASE_STRICT_P(X) \
+ ((X) < FIRST_PSEUDO_REGISTER \
+ || (reg_renumber[X] >= 0 && reg_renumber[X] < FIRST_PSEUDO_REGISTER))
+#define REGNO_OK_FOR_BASE_NONSTRICT_P(X) 1
+
+#define REGNO_OK_FOR_INDEX_STRICT_P(X) \
+ ((X) < FIRST_PSEUDO_REGISTER \
+ || (reg_renumber[X] >= 0 && reg_renumber[X] < FIRST_PSEUDO_REGISTER))
+#define REGNO_OK_FOR_INDEX_NONSTRICT_P(X) 1
+
+#ifdef REG_OK_STRICT
+#define REGNO_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_STRICT_P (X)
+#define REGNO_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_STRICT_P (X)
+#else
+#define REGNO_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_NONSTRICT_P (X)
+#define REGNO_OK_FOR_INDEX_P(X) REGNO_OK_FOR_INDEX_NONSTRICT_P (X)
+#endif
+
+#define CLASS_MAX_NREGS(class, mode) \
+ ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD)
+
+#define REGNO_OK_FOR_INDIRECT_JUMP_P(REGNO, MODE) B_REGNO_P (REGNO)
+
+/* Stack and Calling. */
+
+/* SP points to 4 bytes below the first word of the frame. */
+#define STACK_POINTER_OFFSET 4
+/* Likewise for AP (which is the incoming stack pointer). */
+#define FIRST_PARM_OFFSET(fundecl) 4
+#define STARTING_FRAME_OFFSET 0
+#define FRAME_GROWS_DOWNWARD 1
+#define STACK_GROWS_DOWNWARD
+
+#define STACK_POINTER_REGNUM REG_B15
+#define HARD_FRAME_POINTER_REGNUM REG_A15
+/* These two always get eliminated in favour of the stack pointer
+ or the hard frame pointer. */
+#define FRAME_POINTER_REGNUM REG_FRAME
+#define ARG_POINTER_REGNUM REG_ARGP
+
+#define PIC_OFFSET_TABLE_REGNUM REG_B14
+
+/* We keep the stack pointer constant rather than using push/pop
+ instructions. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* Before the prologue, the return address is in the B3 register. */
+#define RETURN_ADDR_REGNO REG_B3
+#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, RETURN_ADDR_REGNO)
+
+#define RETURN_ADDR_RTX(COUNT, FRAME) c6x_return_addr_rtx (COUNT)
+
+#define INCOMING_FRAME_SP_OFFSET 0
+#define ARG_POINTER_CFA_OFFSET(fundecl) 0
+
+#define STATIC_CHAIN_REGNUM REG_A2
+
+struct c6x_args {
+ /* Number of arguments to pass in registers. */
+ int nregs;
+ /* Number of arguments passed in registers so far. */
+ int count;
+};
+
+#define CUMULATIVE_ARGS struct c6x_args
+
+#define INIT_CUMULATIVE_ARGS(cum, fntype, libname, fndecl, n_named_args) \
+ c6x_init_cumulative_args (&cum, fntype, libname, n_named_args)
+
+#define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
+ (c6x_block_reg_pad_upward (MODE, TYPE, FIRST) ? upward : downward)
+
+#define FUNCTION_ARG_REGNO_P(r) \
+ (((r) >= REG_A4 && (r) <= REG_A13) || ((r) >= REG_B4 && (r) <= REG_B13))
+
+#define DEFAULT_PCC_STRUCT_RETURN 0
+
+#define FUNCTION_PROFILER(file, labelno) \
+ fatal_error ("profiling is not yet implemented for this architecture")
+
+
+/* Trampolines. */
+#define TRAMPOLINE_SIZE 32
+#define TRAMPOLINE_ALIGNMENT 256
+\f
+#define ELIMINABLE_REGS \
+{{ ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}} \
+
+/* Define the offset between two registers, one to be eliminated, and the other
+ its replacement, at the start of a routine. */
+
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ ((OFFSET) = c6x_initial_elimination_offset ((FROM), (TO)))
+\f
+/* Addressing Modes. */
+
+#define CONSTANT_ADDRESS_P(x) (CONSTANT_P(x) && GET_CODE(x) != CONST_DOUBLE)
+#define MAX_REGS_PER_ADDRESS 2
+
+#define HAVE_PRE_DECREMENT 1
+#define HAVE_POST_DECREMENT 1
+#define HAVE_PRE_INCREMENT 1
+#define HAVE_POST_INCREMENT 1
+
+/* Register forms are available, but due to scaling we currently don't
+ support them. */
+#define HAVE_PRE_MODIFY_DISP 1
+#define HAVE_POST_MODIFY_DISP 1
+
+#define LEGITIMATE_PIC_OPERAND_P(X) \
+ (!symbolic_operand (X, SImode))
+\f
+struct GTY(()) machine_function
+{
+ /* True if we expanded a sibling call. */
+ int contains_sibcall;
+};
+\f
+/* Costs. */
+#define NO_FUNCTION_CSE 1
+
+#define SLOW_BYTE_ACCESS 0
+
+#define BRANCH_COST(speed_p, predictable_p) 6
+
+\f
+/* Model costs for the vectorizer. */
+
+/* Cost of conditional branch. */
+#ifndef TARG_COND_BRANCH_COST
+#define TARG_COND_BRANCH_COST 6
+#endif
+
+/* Cost of any scalar operation, excluding load and store. */
+#ifndef TARG_SCALAR_STMT_COST
+#define TARG_SCALAR_STMT_COST 1
+#endif
+
+/* Cost of scalar load. */
+#undef TARG_SCALAR_LOAD_COST
+#define TARG_SCALAR_LOAD_COST 2 /* load + rotate */
+
+/* Cost of scalar store. */
+#undef TARG_SCALAR_STORE_COST
+#define TARG_SCALAR_STORE_COST 10
+
+/* Cost of any vector operation, excluding load, store,
+ or vector to scalar operation. */
+#undef TARG_VEC_STMT_COST
+#define TARG_VEC_STMT_COST 1
+
+/* Cost of vector to scalar operation. */
+#undef TARG_VEC_TO_SCALAR_COST
+#define TARG_VEC_TO_SCALAR_COST 1
+
+/* Cost of scalar to vector operation. */
+#undef TARG_SCALAR_TO_VEC_COST
+#define TARG_SCALAR_TO_VEC_COST 1
+
+/* Cost of aligned vector load. */
+#undef TARG_VEC_LOAD_COST
+#define TARG_VEC_LOAD_COST 1
+
+/* Cost of misaligned vector load. */
+#undef TARG_VEC_UNALIGNED_LOAD_COST
+#define TARG_VEC_UNALIGNED_LOAD_COST 2
+
+/* Cost of vector store. */
+#undef TARG_VEC_STORE_COST
+#define TARG_VEC_STORE_COST 1
+
+/* Cost of vector permutation. */
+#ifndef TARG_VEC_PERMUTE_COST
+#define TARG_VEC_PERMUTE_COST 1
+#endif
+
+/* Exception handling. */
+#define TARGET_EXTRA_CFI_SECTION(unwind) ((unwind) ? ".c6xabi.exidx" : NULL)
+/* ttype entries (the only interesting data references used) are
+ sb-relative got-indirect (aka .ehtype). */
+#define ASM_PREFERRED_EH_DATA_FORMAT(code, data) \
+ (((code) == 0 && (data) == 1) ? (DW_EH_PE_datarel | DW_EH_PE_indirect) \
+ : DW_EH_PE_absptr)
+
+/* This should be the same as the definition in elfos.h, plus the call
+ to output special unwinding directives. */
+#undef ASM_DECLARE_FUNCTION_NAME
+#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \
+ do \
+ { \
+ c6x_output_file_unwind (FILE); \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "function"); \
+ ASM_DECLARE_RESULT (FILE, DECL_RESULT (DECL)); \
+ ASM_OUTPUT_LABEL (FILE, NAME); \
+ } \
+ while (0)
+
+/* This should be the same as the definition in elfos.h, plus the call
+ to output special unwinding directives. */
+#undef ASM_DECLARE_FUNCTION_SIZE
+#define ASM_DECLARE_FUNCTION_SIZE(STREAM, NAME, DECL) \
+ c6x_function_end (STREAM, NAME)
+
+/* Arbitrarily choose A4/A5. */
+#define EH_RETURN_DATA_REGNO(N) (((N) < 2) ? (N) + 4 : INVALID_REGNUM)
+
+/* The register that holds the return address in exception handlers. */
+#define C6X_EH_STACKADJ_REGNUM 3
+#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (SImode, C6X_EH_STACKADJ_REGNUM)
+
+
+/* Assembler Format. */
+
+#define DWARF2_ASM_LINE_DEBUG_INFO 1
+
+#undef ASM_APP_ON
+#define ASM_APP_ON "\t; #APP \n"
+#undef ASM_APP_OFF
+#define ASM_APP_OFF "\t; #NO_APP \n"
+
+#define ASM_OUTPUT_COMMON(stream, name, size, rounded)
+#define ASM_OUTPUT_LOCAL(stream, name, size, rounded)
+
+#define GLOBAL_ASM_OP "\t.global\t"
+
+#define REGISTER_NAMES \
+ { \
+ "A0", "A1", "A2", "A3", "A4", "A5", "A6", "A7", \
+ "A8", "A9", "A10", "A11", "A12", "A13", "A14", "A15", \
+ "A16", "A17", "A18", "A19", "A20", "A21", "A22", "A23", \
+ "A24", "A25", "A26", "A27", "A28", "A29", "A30", "A31", \
+ "B0", "B1", "B2", "B3", "B4", "B5", "B6", "B7", \
+ "B8", "B9", "B10", "B11", "B12", "B13", "B14", "B15", \
+ "B16", "B17", "B18", "B19", "B20", "B21", "B22", "B23", \
+ "B24", "B25", "B26", "B27", "B28", "B29", "B30", "B31", \
+ "FP", "ARGP", "ILC" }
+
+#define DBX_REGISTER_NUMBER(N) (dbx_register_map[(N)])
+
+extern int const dbx_register_map[FIRST_PSEUDO_REGISTER];
+
+#define FINAL_PRESCAN_INSN c6x_final_prescan_insn
+
+#define TEXT_SECTION_ASM_OP ".text;"
+#define DATA_SECTION_ASM_OP ".data;"
+
+#define ASM_OUTPUT_ALIGN(stream, power) \
+ do \
+ { \
+ if (power) \
+ fprintf ((stream), "\t.align\t%d\n", power); \
+ } \
+ while (0)
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+do { char __buf[256]; \
+ fprintf (FILE, "\t.long\t"); \
+ ASM_GENERATE_INTERNAL_LABEL (__buf, "L", VALUE); \
+ assemble_name (FILE, __buf); \
+ fputc ('\n', FILE); \
+ } while (0)
+
+/* Determine whether to place EXP (an expression or a decl) should be
+ placed into one of the small data sections. */
+#define PLACE_IN_SDATA_P(EXP) \
+ (c6x_sdata_mode == C6X_SDATA_NONE ? false \
+ : c6x_sdata_mode == C6X_SDATA_ALL ? true \
+ : !AGGREGATE_TYPE_P (TREE_TYPE (EXP)))
+
+#define SCOMMON_ASM_OP "\t.scomm\t"
+
+#undef ASM_OUTPUT_ALIGNED_DECL_COMMON
+#define ASM_OUTPUT_ALIGNED_DECL_COMMON(FILE, DECL, NAME, SIZE, ALIGN) \
+ do \
+ { \
+ if (DECL != NULL && PLACE_IN_SDATA_P (DECL)) \
+ fprintf ((FILE), "%s", SCOMMON_ASM_OP); \
+ else \
+ fprintf ((FILE), "%s", COMMON_ASM_OP); \
+ assemble_name ((FILE), (NAME)); \
+ fprintf ((FILE), ",%u,%u\n", (int)(SIZE), (ALIGN) / BITS_PER_UNIT);\
+ } \
+ while (0)
+
+/* This says how to output assembler code to declare an
+ uninitialized internal linkage data object. */
+
+#undef ASM_OUTPUT_ALIGNED_DECL_LOCAL
+#define ASM_OUTPUT_ALIGNED_DECL_LOCAL(FILE, DECL, NAME, SIZE, ALIGN) \
+do { \
+ if (PLACE_IN_SDATA_P (DECL)) \
+ switch_to_section (sbss_section); \
+ else \
+ switch_to_section (bss_section); \
+ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \
+ if (!flag_inhibit_size_directive) \
+ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, SIZE); \
+ ASM_OUTPUT_ALIGN ((FILE), exact_log2((ALIGN) / BITS_PER_UNIT)); \
+ ASM_OUTPUT_LABEL(FILE, NAME); \
+ ASM_OUTPUT_SKIP((FILE), (SIZE) ? (SIZE) : 1); \
+} while (0)
+
+#define CASE_VECTOR_PC_RELATIVE flag_pic
+#define JUMP_TABLES_IN_TEXT_SECTION flag_pic
+
+#define ADDR_VEC_ALIGN(VEC) (JUMP_TABLES_IN_TEXT_SECTION ? 5 : 2)
+
+/* This is how to output an element of a case-vector that is relative. */
+#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \
+ do { char buf[100]; \
+ fputs ("\t.long ", FILE); \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", VALUE); \
+ assemble_name (FILE, buf); \
+ putc ('-', FILE); \
+ ASM_GENERATE_INTERNAL_LABEL (buf, "L", REL); \
+ assemble_name (FILE, buf); \
+ putc ('\n', FILE); \
+ } while (0)
+
+/* Misc. */
+
+#define CASE_VECTOR_MODE SImode
+#define MOVE_MAX 4
+#define MOVE_RATIO(SPEED) 4
+#define TRULY_NOOP_TRUNCATION(outprec, inprec) 1
+#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) ((VALUE) = 32, 1)
+#define Pmode SImode
+#define FUNCTION_MODE QImode
+
+extern int c6x_initial_flag_pic;
+
+#endif /* GCC_C6X_H */
--- /dev/null
+;; Machine description for TI C6X.
+;; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+;; Contributed by Andrew Jenner <andrew@codesourcery.com>
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+
+;; Register names
+
+(define_constants
+ [(REG_A0 0)
+ (REG_A1 1)
+ (REG_A2 2)
+ (REG_A3 3)
+ (REG_A4 4)
+ (REG_A5 5)
+ (REG_A6 6)
+ (REG_A7 7)
+ (REG_A8 8)
+ (REG_A9 9)
+ (REG_A10 10)
+ (REG_A11 11)
+ (REG_A12 12)
+ (REG_A13 13)
+ (REG_A14 14)
+ (REG_A15 15)
+ (REG_A16 16)
+ (REG_A17 17)
+ (REG_A18 18)
+ (REG_A19 19)
+ (REG_A20 20)
+ (REG_A21 21)
+ (REG_A22 22)
+ (REG_A23 23)
+ (REG_A24 24)
+ (REG_A25 25)
+ (REG_A26 26)
+ (REG_A27 27)
+ (REG_A28 28)
+ (REG_A29 29)
+ (REG_A30 30)
+ (REG_A31 31)
+ (REG_B0 32)
+ (REG_B1 33)
+ (REG_B2 34)
+ (REG_B3 35)
+ (REG_B4 36)
+ (REG_B5 37)
+ (REG_B6 38)
+ (REG_B7 39)
+ (REG_B8 40)
+ (REG_B9 41)
+ (REG_B10 42)
+ (REG_B11 43)
+ (REG_B12 44)
+ (REG_B13 45)
+ (REG_B14 46)
+ (REG_SP 47)
+ (REG_B15 47)
+ (REG_B16 48)
+ (REG_B17 49)
+ (REG_B18 50)
+ (REG_B19 51)
+ (REG_B20 52)
+ (REG_B21 53)
+ (REG_B22 54)
+ (REG_B23 55)
+ (REG_B24 56)
+ (REG_B25 57)
+ (REG_B26 58)
+ (REG_B27 59)
+ (REG_B28 60)
+ (REG_B29 61)
+ (REG_B30 62)
+ (REG_B31 63)
+ (REG_FRAME 64)
+ (REG_ARGP 65)
+ (REG_ILC 66)])
+
+(define_c_enum "unspec" [
+ UNSPEC_NOP
+ UNSPEC_RCP
+ UNSPEC_MISALIGNED_ACCESS
+ UNSPEC_ADDKPC
+ UNSPEC_SETUP_DSBT
+ UNSPEC_LOAD_GOT
+ UNSPEC_LOAD_SDATA
+ UNSPEC_BITREV
+ UNSPEC_GOTOFF
+ UNSPEC_MVILC
+ UNSPEC_REAL_JUMP
+ UNSPEC_REAL_LOAD
+ UNSPEC_REAL_MULT
+ UNSPEC_JUMP_SHADOW
+ UNSPEC_LOAD_SHADOW
+ UNSPEC_MULT_SHADOW
+ UNSPEC_EPILOGUE_BARRIER
+ UNSPEC_ATOMIC
+ UNSPEC_CLR
+ UNSPEC_EXT
+ UNSPEC_EXTU
+ UNSPEC_SUBC
+ UNSPEC_AVG
+])
+
+(define_c_enum "unspecv" [
+ UNSPECV_BLOCKAGE
+ UNSPECV_SPLOOP
+ UNSPECV_SPKERNEL
+ UNSPECV_EH_RETURN
+ UNSPECV_CAS
+])
+
+;; -------------------------------------------------------------------------
+;; Instruction attributes
+;; -------------------------------------------------------------------------
+
+(define_attr "cpu"
+ "c62x,c64x,c64xp,c67x,c67xp,c674x"
+ (const (symbol_ref "(enum attr_cpu)c6x_arch")))
+
+;; Define a type for each insn which is used in the scheduling description.
+;; These correspond to the types defined in chapter 4 of the C674x manual.
+(define_attr "type"
+ "unknown,single,mpy2,store,storen,mpy4,load,loadn,branch,call,callp,dp2,fp4,
+ intdp,cmpdp,adddp,mpy,mpyi,mpyid,mpydp,mpyspdp,mpysp2dp,spkernel,sploop,
+ mvilc,blockage,shadow,load_shadow,mult_shadow,atomic"
+ (const_string "single"))
+
+;; The register file used by an instruction's destination register.
+;; The function destreg_file computes this; instructions can override the
+;; attribute if they aren't a single_set.
+(define_attr "dest_regfile"
+ "unknown,any,a,b"
+ (cond [(eq_attr "type" "single,load,mpy2,mpy4,dp2,fp4,intdp,cmpdp,adddp,mpy,mpyi,mpyid,mpydp,mpyspdp,mpysp2dp")
+ (cond [(match_operand 0 "a_register" "") (const_string "a")
+ (match_operand 0 "b_register" "") (const_string "b")]
+ (const_string "unknown"))
+ (eq_attr "type" "store")
+ (cond [(match_operand 1 "a_register" "") (const_string "a")
+ (match_operand 1 "b_register" "") (const_string "b")]
+ (const_string "unknown"))]
+ (const_string "unknown")))
+
+(define_attr "addr_regfile"
+ "unknown,a,b"
+ (const_string "unknown"))
+
+(define_attr "cross"
+ "n,y"
+ (const_string "n"))
+
+(define_attr "has_shadow"
+ "n,y"
+ (const_string "n"))
+
+;; The number of cycles the instruction takes to finish. Any cycles above
+;; the first are delay slots.
+(define_attr "cycles" ""
+ (cond [(eq_attr "type" "branch,call") (const_int 6)
+ (eq_attr "type" "load,loadn") (const_int 5)
+ (eq_attr "type" "dp2") (const_int 2)
+ (eq_attr "type" "mpy2") (const_int 2)
+ (eq_attr "type" "mpy4") (const_int 4)
+ (eq_attr "type" "fp4") (const_int 4)
+ (eq_attr "type" "mvilc") (const_int 4)
+ (eq_attr "type" "cmpdp") (const_int 2)
+ (eq_attr "type" "intdp") (const_int 5)
+ (eq_attr "type" "adddp") (const_int 7)
+ (eq_attr "type" "mpydp") (const_int 10)
+ (eq_attr "type" "mpyi") (const_int 9)
+ (eq_attr "type" "mpyid") (const_int 10)
+ (eq_attr "type" "mpyspdp") (const_int 7)
+ (eq_attr "type" "mpysp2dp") (const_int 5)]
+ (const_int 1)))
+
+(define_attr "predicable" "no,yes"
+ (const_string "yes"))
+
+(define_attr "enabled" "no,yes"
+ (const_string "yes"))
+
+;; Specify which units can be used by a given instruction. Normally,
+;; dest_regfile is used to select between the two halves of the machine.
+;; D_ADDR is for load/store instructions; they use the D unit and use
+;; addr_regfile to choose between D1 and D2.
+
+(define_attr "units62"
+ "unknown,d,d_addr,l,m,s,dl,ds,dls,ls"
+ (const_string "unknown"))
+
+(define_attr "units64"
+ "unknown,d,d_addr,l,m,s,dl,ds,dls,ls"
+ (const_string "unknown"))
+
+(define_attr "units64p"
+ "unknown,d,d_addr,l,m,s,dl,ds,dls,ls"
+ (attr "units64"))
+
+(define_attr "units67"
+ "unknown,d,d_addr,l,m,s,dl,ds,dls,ls"
+ (attr "units62"))
+
+(define_attr "units67p"
+ "unknown,d,d_addr,l,m,s,dl,ds,dls,ls"
+ (attr "units67"))
+
+(define_attr "units674"
+ "unknown,d,d_addr,l,m,s,dl,ds,dls,ls"
+ (attr "units64"))
+
+(define_attr "units"
+ "unknown,d,d_addr,l,m,s,dl,ds,dls,ls"
+ (cond [(eq_attr "cpu" "c62x")
+ (attr "units62")
+ (eq_attr "cpu" "c67x")
+ (attr "units67")
+ (eq_attr "cpu" "c67xp")
+ (attr "units67p")
+ (eq_attr "cpu" "c64x")
+ (attr "units64")
+ (eq_attr "cpu" "c64xp")
+ (attr "units64p")
+ (eq_attr "cpu" "c674x")
+ (attr "units674")
+ ]
+ (const_string "unknown")))
+
+(define_automaton "c6x_1,c6x_w1,c6x_2,c6x_w2,c6x_m1,c6x_m2,c6x_t1,c6x_t2,c6x_branch")
+(automata_option "ndfa")
+
+(define_cpu_unit "d1,l1,s1" "c6x_1")
+(define_cpu_unit "x1" "c6x_1")
+(define_cpu_unit "l1w,s1w" "c6x_w1")
+(define_cpu_unit "m1" "c6x_m1")
+(define_cpu_unit "m1w" "c6x_m1")
+(define_cpu_unit "t1" "c6x_t1")
+(define_cpu_unit "d2,l2,s2" "c6x_2")
+(define_cpu_unit "x2" "c6x_2")
+(define_cpu_unit "l2w,s2w" "c6x_w2")
+(define_cpu_unit "m2" "c6x_m2")
+(define_cpu_unit "m2w" "c6x_m2")
+(define_cpu_unit "t2" "c6x_t2")
+
+;; There can be up to two branches in one cycle (on the .s1 and .s2
+;; units), but some instructions must not be scheduled in parallel
+;; with a branch. We model this by reserving either br0 or br1 for a
+;; normal branch, and both of them for an insn such as callp.
+;; Another constraint is that two branches may only execute in parallel
+;; if one uses an offset, and the other a register. We can distinguish
+;; these by the dest_regfile attribute; it is "any" iff the branch uses
+;; an offset. br0 is reserved for these, while br1 is reserved for
+;; branches using a register.
+(define_cpu_unit "br0,br1" "c6x_branch")
+
+(include "c6x-sched.md")
+
+;; Some reservations which aren't generated from c6x-sched.md.in
+
+(define_insn_reservation "branch_s1any" 6
+ (and (eq_attr "type" "branch")
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "any"))))
+ "s1+s1w+br0")
+
+;; For calls, we also reserve the units needed in the following cycles
+;; to load the return address. There are two options; using addkpc or
+;; mvkh/mvkl. The code in c6x_reorg knows whether to use one of these
+;; or whether to use callp. The actual insns are emitted only after
+;; the final scheduling pass is complete.
+;; We always reserve S2 for PC-relative call insns, since that allows
+;; us to turn them into callp insns later on.
+(define_insn_reservation "call_addkpc_s1any" 6
+ (and (eq_attr "type" "call")
+ (and (ne (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "any")))))
+ "s2+s2w+br0,s2+s2w+br0+br1")
+
+(define_insn_reservation "call_mvk_s1any" 6
+ (and (eq_attr "type" "call")
+ (and (eq (symbol_ref "TARGET_INSNS_64") (const_int 0))
+ (and (eq_attr "cross" "n")
+ (and (eq_attr "units" "s")
+ (eq_attr "dest_regfile" "any")))))
+ "s2+s2w+br0,s2+s2w,s2+s2w")
+
+(define_reservation "all" "s1+s2+d1+d2+l1+l2+m1+m2")
+
+(define_insn_reservation "callp_s1" 1
+ (and (eq_attr "type" "callp") (eq_attr "dest_regfile" "a"))
+ "s1+s1w,all*5")
+
+(define_insn_reservation "callp_s2" 1
+ (and (eq_attr "type" "callp") (eq_attr "dest_regfile" "b"))
+ "s2+s2w,all*5")
+
+;; Constraints
+
+(include "constraints.md")
+
+;; Predicates
+
+(include "predicates.md")
+
+;; General predication pattern.
+
+(define_cond_exec
+ [(match_operator 0 "eqne_operator"
+ [(match_operand 1 "predicate_register" "AB")
+ (const_int 0)])]
+ ""
+ "")
+
+;; -------------------------------------------------------------------------
+;; NOP instruction
+;; -------------------------------------------------------------------------
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop")
+
+(define_insn "nop_count"
+ [(unspec [(match_operand 0 "const_int_operand" "n")] UNSPEC_NOP)]
+ ""
+ "%|%.\\tnop\\t%0")
+
+;; -------------------------------------------------------------------------
+;; Move instructions
+;; -------------------------------------------------------------------------
+
+(define_mode_iterator QIHIM [QI HI])
+(define_mode_iterator SIDIM [SI DI])
+(define_mode_iterator SIDIVM [SI DI V2HI V4QI])
+(define_mode_iterator VEC4M [V2HI V4QI])
+(define_mode_iterator VEC8M [V2SI V4HI V8QI])
+(define_mode_iterator SISFVM [SI SF V2HI V4QI])
+(define_mode_iterator DIDFM [DI DF])
+(define_mode_iterator DIDFVM [DI DF V2SI V4HI V8QI])
+(define_mode_iterator SFDFM [SF DF])
+(define_mode_iterator M32 [QI HI SI SF V2HI V4QI])
+
+;; The C6X LO_SUM and HIGH are backwards - HIGH sets the low bits, and
+;; LO_SUM adds in the high bits. Fortunately these are opaque operations
+;; so this does not matter.
+(define_insn "movsi_lo_sum"
+ [(set (match_operand:SI 0 "register_operand" "=ab")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "0")
+ (match_operand:SI 2 "const_int_or_symbolic_operand" "i")))]
+ "reload_completed"
+ "%|%.\\tmvkh\\t%$\\t%2, %0"
+ [(set_attr "units" "s")])
+
+(define_insn "movsi_high"
+ [(set (match_operand:SI 0 "register_operand" "=ab")
+ (high:SI (match_operand:SI 1 "const_int_or_symbolic_operand" "i")))]
+ "reload_completed"
+ "%|%.\\tmvkl\\t%$\\t%1, %0"
+ [(set_attr "units" "s")])
+
+(define_insn "movsi_gotoff_lo_sum"
+ [(set (match_operand:SI 0 "register_operand" "=ab")
+ (lo_sum:SI (match_operand:SI 1 "register_operand" "0")
+ (unspec:SI [(match_operand:SI 2 "symbolic_operand" "S2")]
+ UNSPEC_GOTOFF)))]
+ "flag_pic == 2"
+ "%|%.\\tmvkh\\t%$\\t$dpr_got%2, %0"
+ [(set_attr "units" "s")])
+
+(define_insn "movsi_gotoff_high"
+ [(set (match_operand:SI 0 "register_operand" "=ab")
+ (high:SI (unspec:SI [(match_operand:SI 1 "symbolic_operand" "S2")]
+ UNSPEC_GOTOFF)))]
+ "flag_pic == 2"
+ "%|%.\\tmvkl\\t%$\\t$dpr_got%1, %0"
+ [(set_attr "units" "s")])
+
+;; Normally we'd represent this as a normal load insn, but we can't currently
+;; represent the addressing mode.
+(define_insn "load_got_gotoff"
+ [(set (match_operand:SI 0 "register_operand" "=a,b")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "Z,Z")
+ (match_operand:SI 2 "register_operand" "b,b")]
+ UNSPEC_GOTOFF))]
+ "flag_pic == 2"
+ "%|%.\\tldw\\t%$\\t*+%1[%2], %0"
+ [(set_attr "type" "load")
+ (set_attr "units" "d_addr")
+ (set_attr "dest_regfile" "a,b")
+ (set_attr "addr_regfile" "b")])
+
+(define_insn "*movstricthi_high"
+ [(set (match_operand:SI 0 "register_operand" "+ab")
+ (ior:SI (and:SI (match_dup 0) (const_int 65535))
+ (ashift:SI (match_operand:SI 1 "const_int_operand" "IuB")
+ (const_int 16))))]
+ "reload_completed"
+ "%|%.\\tmvklh\\t%$\\t%1, %0"
+ [(set_attr "units" "s")])
+
+;; Break up SImode loads of immediate operands.
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "const_int_operand" ""))]
+ "reload_completed
+ && !satisfies_constraint_IsB (operands[1])"
+ [(set (match_dup 0) (match_dup 2))
+ (set (match_dup 0) (ior:SI (and:SI (match_dup 0) (const_int 65535))
+ (ashift:SI (match_dup 3) (const_int 16))))]
+{
+ HOST_WIDE_INT val = INTVAL (operands[1]);
+ operands[2] = GEN_INT (trunc_int_for_mode (val, HImode));
+ operands[3] = GEN_INT ((val >> 16) & 65535);
+})
+
+(define_split
+ [(set (match_operand:VEC4M 0 "register_operand" "")
+ (match_operand:VEC4M 1 "const_vector_operand" ""))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 3))]
+{
+ unsigned HOST_WIDE_INT mask, val;
+ enum machine_mode inner_mode = GET_MODE_INNER (<MODE>mode);
+ int i;
+
+ val = 0;
+ mask = GET_MODE_MASK (inner_mode);
+ if (TARGET_BIG_ENDIAN)
+ {
+ for (i = 0; i < GET_MODE_NUNITS (<MODE>mode); i++)
+ {
+ val <<= GET_MODE_BITSIZE (inner_mode);
+ val |= INTVAL (CONST_VECTOR_ELT (operands[1], i)) & mask;
+ }
+ }
+ else
+ {
+ i = GET_MODE_NUNITS (<MODE>mode);
+ while (i-- > 0)
+ {
+ val <<= GET_MODE_BITSIZE (inner_mode);
+ val |= INTVAL (CONST_VECTOR_ELT (operands[1], i)) & mask;
+ }
+ }
+ operands[2] = gen_rtx_REG (SImode, REGNO (operands[0]));
+ operands[3] = GEN_INT (trunc_int_for_mode (val, SImode));
+})
+
+(define_split
+ [(set (match_operand:VEC8M 0 "register_operand" "")
+ (match_operand:VEC8M 1 "const_vector_operand" ""))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+{
+ unsigned HOST_WIDE_INT mask;
+ unsigned HOST_WIDE_INT val[2];
+ rtx lo_half, hi_half;
+ enum machine_mode inner_mode = GET_MODE_INNER (<MODE>mode);
+ int i, j;
+
+ split_di (operands, 1, &lo_half, &hi_half);
+
+ val[0] = val[1] = 0;
+ mask = GET_MODE_MASK (inner_mode);
+ if (TARGET_BIG_ENDIAN)
+ {
+ for (i = 0, j = 1; i < GET_MODE_NUNITS (<MODE>mode); i++)
+ {
+ if (i * 2 == GET_MODE_NUNITS (<MODE>mode))
+ j--;
+ val[j] <<= GET_MODE_BITSIZE (inner_mode);
+ val[j] |= INTVAL (CONST_VECTOR_ELT (operands[1], i)) & mask;
+ }
+ }
+ else
+ {
+ i = GET_MODE_NUNITS (<MODE>mode);
+ j = 1;
+ while (i-- > 0)
+ {
+ val[j] <<= GET_MODE_BITSIZE (inner_mode);
+ val[j] |= INTVAL (CONST_VECTOR_ELT (operands[1], i)) & mask;
+ if (i * 2 == GET_MODE_NUNITS (<MODE>mode))
+ j--;
+ }
+ }
+ operands[2] = lo_half;
+ operands[3] = GEN_INT (trunc_int_for_mode (val[0], SImode));
+ operands[4] = hi_half;
+ operands[5] = GEN_INT (trunc_int_for_mode (val[1], SImode));
+})
+
+(define_split
+ [(set (match_operand:SF 0 "register_operand" "")
+ (match_operand:SF 1 "immediate_operand" ""))]
+ "reload_completed"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 2) (ior:SI (and:SI (match_dup 2) (const_int 65535))
+ (ashift:SI (match_dup 4) (const_int 16))))]
+{
+ long values;
+ REAL_VALUE_TYPE value;
+
+ gcc_assert (GET_CODE (operands[1]) == CONST_DOUBLE);
+
+ REAL_VALUE_FROM_CONST_DOUBLE (value, operands[1]);
+ REAL_VALUE_TO_TARGET_SINGLE (value, values);
+
+ operands[2] = gen_rtx_REG (SImode, true_regnum (operands[0]));
+ operands[3] = GEN_INT (trunc_int_for_mode (values, HImode));
+ if (values >= -32768 && values < 32768)
+ {
+ emit_move_insn (operands[2], operands[3]);
+ DONE;
+ }
+ operands[4] = GEN_INT ((values >> 16) & 65535);
+})
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "symbolic_operand" ""))]
+ "reload_completed
+ && (!TARGET_INSNS_64PLUS
+ || !sdata_symbolic_operand (operands[1], SImode))"
+ [(set (match_dup 0) (high:SI (match_dup 1)))
+ (set (match_dup 0) (lo_sum:SI (match_dup 0) (match_dup 1)))]
+ "")
+
+;; Normally, we represent the load of an sdata address as a normal
+;; move of a SYMBOL_REF. In DSBT mode, B14 is not constant, so we
+;; should show the dependency.
+(define_insn "load_sdata_pic"
+ [(set (match_operand:SI 0 "register_operand" "=a,b")
+ (plus:SI (match_operand:SI 1 "pic_register_operand" "Z,Z")
+ (unspec:SI [(match_operand:SI 2 "sdata_symbolic_operand" "S0,S0")]
+ UNSPEC_LOAD_SDATA)))]
+ "flag_pic"
+ "@
+ %|%.\\tadda%D2\\t%$\\t%1, %2, %0
+ %|%.\\tadda%D2\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "d")
+ (set_attr "cross" "y,n")
+ (set_attr "predicable" "no")])
+
+;; Move instruction patterns
+
+(define_mode_attr LDST_SUFFIX [(QI "b") (HI "h")
+ (SI "w") (SF "w") (V2HI "w") (V4QI "w")
+ (DI "dw") (V2SI "dw") (V4HI "dw") (V8QI "dw")])
+
+(define_insn "mov<mode>_insn"
+ [(set (match_operand:QIHIM 0 "nonimmediate_operand"
+ "=a,b, a, b, ab, ab,a,?a, b,?b, Q, R, R, Q")
+ (match_operand:QIHIM 1 "general_operand"
+ "a,b,?b,?a,Is5,IsB,Q, R, R, Q, a,?a, b,?b"))]
+ "GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) == REG"
+ "@
+ %|%.\\tmv\\t%$\\t%1, %0
+ %|%.\\tmv\\t%$\\t%1, %0
+ %|%.\\tmv\\t%$\\t%1, %0
+ %|%.\\tmv\\t%$\\t%1, %0
+ %|%.\\tmvk\\t%$\\t%1, %0
+ %|%.\\tmvk\\t%$\\t%1, %0
+ %|%.\\tld<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tld<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tld<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tld<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tst<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tst<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tst<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tst<LDST_SUFFIX>\\t%$\\t%1, %0"
+ [(set_attr "type" "*,*,*,*,*,*,load,load,load,load,store,store,store,store")
+ (set_attr "units62" "dls,dls,ls,ls,s,s,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr")
+ (set_attr "units64" "dls,dls,ls,ls,dl,s,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr")
+ (set_attr "addr_regfile" "*,*,*,*,*,*,a,b,b,a,a,b,b,a")
+ (set_attr "dest_regfile" "*,*,*,*,*,*,a,a,b,b,a,a,b,b")
+ (set_attr "cross" "n,n,y,y,n,n,n,y,n,y,n,y,n,y")])
+
+(define_insn "mov<mode>_insn"
+ [(set (match_operand:SISFVM 0 "nonimmediate_operand"
+ "=a,b, a, b, ab, ab,a,b,ab,a,?a, b,?b, Q, R, R, Q")
+ (match_operand:SISFVM 1 "general_operand"
+ "a,b,?b,?a,Is5,IsB,S0,S0,Si,Q, R, R, Q, a,?a, b,?b"))]
+ "(GET_CODE (operands[0]) != MEM || GET_CODE (operands[1]) == REG
+ || (GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))))"
+ "@
+ %|%.\\tmv\\t%$\\t%1, %0
+ %|%.\\tmv\\t%$\\t%1, %0
+ %|%.\\tmv\\t%$\\t%1, %0
+ %|%.\\tmv\\t%$\\t%1, %0
+ %|%.\\tmvk\\t%$\\t%1, %0
+ %|%.\\tmvk\\t%$\\t%1, %0
+ %|%.\\tadda%D1\\t%$\\tB14, %1, %0
+ %|%.\\tadda%D1\\t%$\\tB14, %1, %0
+ #
+ %|%.\\tldw\\t%$\\t%1, %0
+ %|%.\\tldw\\t%$\\t%1, %0
+ %|%.\\tldw\\t%$\\t%1, %0
+ %|%.\\tldw\\t%$\\t%1, %0
+ %|%.\\tstw\\t%$\\t%1, %0
+ %|%.\\tstw\\t%$\\t%1, %0
+ %|%.\\tstw\\t%$\\t%1, %0
+ %|%.\\tstw\\t%$\\t%1, %0"
+ [(set_attr "type" "*,*,*,*,*,*,*,*,*,load,load,load,load,store,store,store,store")
+ (set_attr "units62" "dls,dls,ls,ls,s,s,d,d,*,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr")
+ (set_attr "units64" "dls,dls,ls,ls,dl,s,d,d,*,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr")
+ (set_attr "addr_regfile" "*,*,*,*,*,*,*,*,*,a,b,b,a,a,b,b,a")
+ (set_attr "dest_regfile" "*,*,*,*,*,*,*,*,*,a,a,b,b,a,a,b,b")
+ (set_attr "cross" "n,n,y,y,n,n,y,n,*,n,y,n,y,n,y,n,y")
+ (set_attr "predicable" "yes,yes,yes,yes,yes,yes,no,no,yes,yes,yes,yes,yes,yes,yes,yes,yes")])
+
+(define_insn "*mov<mode>_insn"
+ [(set (match_operand:DIDFVM 0 "nonimmediate_operand"
+ "=a,b, a, b,ab,a,?a, b,?b, Q, R, R, Q")
+ (match_operand:DIDFVM 1 "general_operand"
+ "a,b,?b,?a,iF,Q, R, R, Q, a,?a, b,?b"))]
+ "(!MEM_P (operands[0]) || REG_P (operands[1])
+ || (GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))))"
+{
+ if (MEM_P (operands[1]) && TARGET_LDDW)
+ return "%|%.\\tlddw\\t%$\\t%1, %0";
+ if (MEM_P (operands[0]) && TARGET_STDW)
+ return "%|%.\\tstdw\\t%$\\t%1, %0";
+ if (TARGET_INSNS_64PLUS && REG_P (operands[0]) && REG_P (operands[1])
+ && A_REGNO_P (REGNO (operands[0])) == A_REGNO_P (REGNO (operands[1])))
+ return "%|%.\\tdmv\\t%$\\t%P1, %p1, %0";
+ return "#";
+}
+ [(set_attr "units" "s,s,*,*,*,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr,d_addr")
+ (set_attr "addr_regfile" "*,*,*,*,*,a,b,b,a,a,b,b,a")
+ (set_attr "dest_regfile" "*,*,*,*,*,a,a,b,b,a,a,b,b")
+ (set_attr "type" "*,*,*,*,*,load,load,load,load,store,store,store,store")
+ (set_attr "cross" "n,n,y,y,*,n,y,n,y,n,y,n,y")])
+
+(define_split
+ [(set (match_operand:DIDFVM 0 "nonimmediate_operand" "")
+ (match_operand:DIDFVM 1 "general_operand" ""))]
+ "reload_completed
+ && !((MEM_P (operands[0]) && TARGET_STDW)
+ || (MEM_P (operands[1]) && TARGET_LDDW))
+ && !const_vector_operand (operands[1], <MODE>mode)
+ && !(TARGET_INSNS_64PLUS && REG_P (operands[0]) && REG_P (operands[1])
+ && A_REGNO_P (REGNO (operands[0])) == A_REGNO_P (REGNO (operands[1])))"
+ [(set (match_dup 2) (match_dup 3))
+ (set (match_dup 4) (match_dup 5))]
+{
+ rtx lo_half[2], hi_half[2];
+ split_di (operands, 2, lo_half, hi_half);
+
+ /* We can't have overlap for a register-register move, but if
+ memory is involved, we have to make sure we don't clobber the
+ address. */
+ if (reg_overlap_mentioned_p (lo_half[0], hi_half[1]))
+ {
+ operands[2] = hi_half[0];
+ operands[3] = hi_half[1];
+ operands[4] = lo_half[0];
+ operands[5] = lo_half[1];
+ }
+ else
+ {
+ operands[2] = lo_half[0];
+ operands[3] = lo_half[1];
+ operands[4] = hi_half[0];
+ operands[5] = hi_half[1];
+ }
+})
+
+(define_insn "real_load<mode>"
+ [(unspec [(match_operand 0 "const_int_operand" "JA,JA,JB,JB")
+ (match_operand:M32 1 "memory_operand" "Q,R,R,Q")]
+ UNSPEC_REAL_LOAD)]
+ ""
+ "%|%.\\tld<LDST_SUFFIX>\\t%$\\t%1, %k0"
+ [(set_attr "type" "load")
+ (set_attr "units" "d_addr")
+ (set_attr "addr_regfile" "a,b,b,a")
+ (set_attr "dest_regfile" "a,a,b,b")
+ (set_attr "cross" "n,y,n,y")])
+
+(define_insn "real_load<mode>"
+ [(unspec [(match_operand 0 "const_int_operand" "JA,JA,JB,JB")
+ (match_operand:DIDFVM 1 "memory_operand" "Q,R,R,Q")]
+ UNSPEC_REAL_LOAD)]
+ "TARGET_LDDW"
+ "%|%.\\tlddw\\t%$\\t%1, %K0"
+ [(set_attr "type" "load")
+ (set_attr "units" "d_addr")
+ (set_attr "addr_regfile" "a,b,b,a")
+ (set_attr "dest_regfile" "a,a,b,b")
+ (set_attr "cross" "n,y,n,y")])
+
+(define_insn "load_shadow"
+ [(set (match_operand 0 "register_operand" "=ab")
+ (unspec [(pc)] UNSPEC_LOAD_SHADOW))]
+ ""
+ ";; load to %0 occurs"
+ [(set_attr "type" "load_shadow")])
+
+(define_insn "mult_shadow"
+ [(set (match_operand 0 "register_operand" "=ab")
+ (unspec [(pc)] UNSPEC_MULT_SHADOW))]
+ ""
+ ";; multiplication occurs and stores to %0"
+ [(set_attr "type" "mult_shadow")])
+
+
+(define_mode_iterator MOV [QI HI SI SF DI DF V2HI V4QI V2SI V4HI V8QI])
+
+(define_expand "mov<mode>"
+ [(set (match_operand:MOV 0 "nonimmediate_operand" "")
+ (match_operand:MOV 1 "general_operand" ""))]
+ ""
+{
+ if (expand_move (operands, <MODE>mode))
+ DONE;
+})
+
+(define_expand "movmisalign<mode>"
+ [(set (match_operand:SIDIVM 0 "nonimmediate_operand" "")
+ (unspec:SIDIVM [(match_operand:SIDIVM 1 "nonimmediate_operand" "")]
+ UNSPEC_MISALIGNED_ACCESS))]
+ "TARGET_INSNS_64"
+{
+ if (memory_operand (operands[0], <MODE>mode))
+ {
+ emit_insn (gen_movmisalign<mode>_store (operands[0], operands[1]));
+ DONE;
+ }
+})
+
+(define_insn_and_split "movmisalign<mode>_store"
+ [(set (match_operand:SIDIVM 0 "memory_operand" "=W,Q,T,Q,T")
+ (unspec:SIDIVM [(match_operand:SIDIVM 1 "register_operand" "r,a,b,b,a")]
+ UNSPEC_MISALIGNED_ACCESS))
+ (clobber (match_scratch:SI 2 "=r,X,X,X,X"))]
+ "TARGET_INSNS_64"
+ "@
+ #
+ %|%.\\tstn<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tstn<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tstn<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tstn<LDST_SUFFIX>\\t%$\\t%1, %0"
+ "&& reload_completed && satisfies_constraint_W (operands[0])"
+ [(parallel
+ [(set (match_dup 3) (unspec:SIDIVM [(match_dup 1)] UNSPEC_MISALIGNED_ACCESS))
+ (clobber (match_dup 4))])]
+{
+ rtx addr = XEXP (operands[0], 0);
+ rtx tmpreg = operands[2];
+
+ if (GET_CODE (addr) == PLUS && XEXP (addr, 0) == stack_pointer_rtx
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
+ val &= GET_MODE_SIZE (<MODE>mode) - 1;
+ if (val == 0)
+ {
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+ }
+ }
+ operands[3] = change_address (operands[0], <MODE>mode, tmpreg);
+ emit_move_insn (tmpreg, addr);
+ operands[4] = gen_rtx_SCRATCH (SImode);
+}
+ [(set_attr "type" "storen")
+ (set_attr "units" "d_addr")
+ (set_attr "addr_regfile" "*,a,b,a,b")
+ (set_attr "dest_regfile" "*,a,b,b,a")
+ (set_attr "cross" "*,n,n,y,y")])
+
+(define_insn_and_split "movmisalign<mode>_load"
+ [(set (match_operand:SIDIVM 0 "register_operand" "=ab,a,b,b,a")
+ (unspec:SIDIVM [(match_operand:SIDIVM 1 "memory_operand" "W,Q,T,Q,T")]
+ UNSPEC_MISALIGNED_ACCESS))]
+ "TARGET_INSNS_64"
+ "@
+ #
+ %|%.\\tldn<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tldn<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tldn<LDST_SUFFIX>\\t%$\\t%1, %0
+ %|%.\\tldn<LDST_SUFFIX>\\t%$\\t%1, %0"
+ "&& reload_completed && satisfies_constraint_W (operands[1])"
+ [(set (match_dup 0) (unspec:SIDIVM [(match_dup 2)] UNSPEC_MISALIGNED_ACCESS))]
+{
+ rtx addr = XEXP (operands[1], 0);
+ rtx tmpreg = (GET_MODE (operands[0]) == SImode ? operands[0]
+ : operand_subword_force (operands[0], 0, DImode));
+
+ if (GET_CODE (addr) == PLUS && XEXP (addr, 0) == stack_pointer_rtx
+ && GET_CODE (XEXP (addr, 1)) == CONST_INT)
+ {
+ unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
+ val &= GET_MODE_SIZE (<MODE>mode) - 1;
+ if (val == 0)
+ {
+ emit_move_insn (operands[0], operands[1]);
+ DONE;
+ }
+ }
+ operands[2] = change_address (operands[1], <MODE>mode, tmpreg);
+ emit_move_insn (tmpreg, addr);
+}
+ [(set_attr "type" "loadn")
+ (set_attr "units" "d_addr")
+ (set_attr "addr_regfile" "*,a,b,a,b")
+ (set_attr "dest_regfile" "*,a,b,b,a")
+ (set_attr "cross" "*,n,n,y,y")])
+
+;;
+
+;; -------------------------------------------------------------------------
+;; Extensions/extractions
+;; -------------------------------------------------------------------------
+
+(define_code_iterator any_extract [zero_extract sign_extract])
+(define_code_iterator any_ext [zero_extend sign_extend])
+
+(define_code_attr ext_name [(zero_extend "zero_extend") (sign_extend "sign_extend")])
+
+(define_code_attr u [(zero_extend "u") (sign_extend "")])
+
+(define_code_attr z [(zero_extract "z") (sign_extract "")])
+(define_code_attr zu [(zero_extract "u") (sign_extract "")])
+
+(define_mode_attr ext_shift [(QI "24") (HI "16")])
+
+(define_insn "<ext_name><mode>si2"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,?a, b,?b")
+ (any_ext: SI (match_operand:QIHIM 1 "nonimmediate_operand" "a,b,Q, R, R, Q")))]
+ ""
+ "@
+ %|%.\\text<u>\\t%$\\t%1, <ext_shift>, <ext_shift>, %0
+ %|%.\\text<u>\\t%$\\t%1, <ext_shift>, <ext_shift>, %0
+ %|%.\\tld<LDST_SUFFIX><u>\\t%$\\t%1, %0
+ %|%.\\tld<LDST_SUFFIX><u>\\t%$\\t%1, %0
+ %|%.\\tld<LDST_SUFFIX><u>\\t%$\\t%1, %0
+ %|%.\\tld<LDST_SUFFIX><u>\\t%$\\t%1, %0"
+ [(set_attr "type" "*,*,load,load,load,load")
+ (set_attr "units" "s,s,d_addr,d_addr,d_addr,d_addr")
+ (set_attr "addr_regfile" "*,*,a,b,b,a")
+ (set_attr "dest_regfile" "*,*,a,a,b,b")
+ (set_attr "cross" "n,n,n,y,n,y")])
+
+(define_insn "*ext<z>v_const"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=a,b")
+ (any_extract:SI (match_operand:SI 1 "register_operand" "a,b")
+ (match_operand:SI 2 "const_int_operand" "n,n")
+ (match_operand:SI 3 "const_int_operand" "n,n")))]
+ "INTVAL (operands[3]) >= 0
+ && INTVAL (operands[2]) + INTVAL (operands[3]) <= 32"
+{
+ int pos = INTVAL (operands[3]);
+ int len = INTVAL (operands[2]);
+ rtx xop[4];
+ xop[0] = operands[0];
+ xop[1] = operands[1];
+ xop[2] = GEN_INT (32 - pos - len);
+ xop[3] = GEN_INT (32 - len);
+
+ output_asm_insn ("%|%.\\text<zu>\\t%$\\t%1, %2, %3, %0", xop);
+ return "";
+}
+ [(set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_expand "ext<z>v"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (any_extract:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand:SI 3 "const_int_operand" "")))]
+ ""
+{
+ if (INTVAL (operands[2]) < 0
+ || INTVAL (operands[2]) + INTVAL (operands[3]) > 32)
+ FAIL;
+})
+
+(define_insn "real_<ext_name><mode>"
+ [(unspec [(match_operand 0 "const_int_operand" "JA,JA,JB,JB")
+ (any_ext:SI (match_operand:QIHIM 1 "memory_operand" "Q,R,R,Q"))]
+ UNSPEC_REAL_LOAD)]
+ ""
+ "%|%.\\tld<LDST_SUFFIX><u>\\t%$\\t%1, %k0"
+ [(set_attr "type" "load")
+ (set_attr "units" "d_addr")
+ (set_attr "addr_regfile" "a,b,b,a")
+ (set_attr "dest_regfile" "a,a,b,b")
+ (set_attr "cross" "n,y,n,y")])
+
+(define_insn "clrr"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "0,0,0,0")
+ (match_operand:SI 2 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 3 "reg_or_const_int_operand" "ai,bi,a,b")]
+ UNSPEC_CLR))]
+ ""
+{
+ if (CONST_INT_P (operands[2]))
+ {
+ rtx xops[4];
+ int v1 = INTVAL (operands[2]);
+ int v2 = (v1 >> 5) & 0x1f;
+ v1 &= 0x1f;
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = GEN_INT (v1);
+ xops[3] = GEN_INT (v2);
+ output_asm_insn ("%|%.\\tclr\\t%$\\t%1, %3, %2, %0", xops);
+ return "";
+ }
+ return "%|%.\\tclr\\t%$\\t%2, %3, %0";
+}
+ [(set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "extr"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 2 "reg_or_const_int_operand" "ai,bi,a,b")]
+ UNSPEC_EXT))]
+ ""
+{
+ if (CONST_INT_P (operands[2]))
+ {
+ rtx xops[4];
+ int v1 = INTVAL (operands[2]);
+ int v2 = (v1 >> 5) & 0x1f;
+ v1 &= 0x1f;
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = GEN_INT (v1);
+ xops[3] = GEN_INT (v2);
+ output_asm_insn ("%|%.\\text\\t%$\\t%1, %3, %2, %0", xops);
+ return "";
+ }
+ return "%|%.\\text\\t%$\\t%1, %2, %0";
+}
+ [(set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "extru"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 2 "reg_or_const_int_operand" "ai,bi,a,b")]
+ UNSPEC_EXTU))]
+ ""
+{
+ if (CONST_INT_P (operands[2]))
+ {
+ rtx xops[4];
+ int v1 = INTVAL (operands[2]);
+ int v2 = (v1 >> 5) & 0x1f;
+ v1 &= 0x1f;
+ xops[0] = operands[0];
+ xops[1] = operands[1];
+ xops[2] = GEN_INT (v1);
+ xops[3] = GEN_INT (v2);
+ output_asm_insn ("%|%.\\textu\\t%$\\t%1, %3, %2, %0", xops);
+ return "";
+ }
+ return "%|%.\\textu\\t%$\\t%1, %2, %0";
+}
+ [(set_attr "units" "s")
+ (set_attr "cross" "n,y,n,y")])
+
+;; -------------------------------------------------------------------------
+;; Compare instructions
+;; -------------------------------------------------------------------------
+
+(define_insn "scmpsi_insn"
+ [(set (match_operand:SI 0 "register_operand" "=ab,a,b,a,b")
+ (match_operator:SI 1 "eqltgt_operator"
+ [(match_operand:SI 2 "register_operand" "ab,a,b,?b,?a")
+ (match_operand:SI 3 "reg_or_scst5_operand" "Is5,aIs5,bIs5,aIs5,bIs5")]))]
+ ""
+ "%|%.\\tcmp%C1\\t%$\\t%3, %2, %0"
+ [(set_attr "units" "l")
+ (set (attr "cross")
+ (symbol_ref "CROSS_OPERANDS (operands[0], operands[2])"))])
+
+(define_insn "*ucmpsi_insn_64"
+ [(set (match_operand:SI 0 "register_operand" "=ab,a,b,a,b")
+ (match_operator:SI 1 "ltugtu_operator"
+ [(match_operand:SI 2 "register_operand" "ab,a,b,?b,?a")
+ (match_operand:SI 3 "reg_or_ucst5_operand" "Iu5,aIu5,bIu5,aIu5,bIu5")]))]
+ "TARGET_INSNS_64"
+ "%|%.\\tcmp%C1\\t%$\\t%3, %2, %0"
+ [(set_attr "units" "l")
+ (set (attr "cross")
+ (symbol_ref "CROSS_OPERANDS (operands[0], operands[2])"))])
+
+(define_insn "*ucmpsi_insn"
+ [(set (match_operand:SI 0 "register_operand" "=ab,a,b,a,b")
+ (match_operator:SI 1 "ltugtu_operator"
+ [(match_operand:SI 2 "register_operand" "ab,a,b,?b,?a")
+ (match_operand:SI 3 "reg_or_ucst4_operand" "Iu4,aIu4,bIu4,aIu4,bIu4")]))]
+ "!TARGET_INSNS_64"
+ "%|%.\\tcmp%C1\\t%$\\t%3, %2, %0"
+ [(set_attr "units" "l")
+ (set (attr "cross")
+ (symbol_ref "CROSS_OPERANDS (operands[0], operands[2])"))])
+
+(define_code_iterator andior_eqne [eq ne])
+(define_code_attr andior_name [(eq "and") (ne "ior")])
+(define_code_attr andior_condmod [(eq "") (ne "!")])
+
+(define_insn "*scmpsi_<andior_name>_insn"
+ [(set (match_operand:SI 0 "register_operand" "=A,B,A,B")
+ (if_then_else:SI
+ (andior_eqne:SI (match_operand:SI 4 "register_operand" "0,0,0,0")
+ (const_int 0))
+ (match_dup 4)
+ (match_operator:SI 1 "eqltgt_operator"
+ [(match_operand:SI 2 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 3 "reg_or_scst5_operand" "aIs5,bIs5,aIs5,bIs5")])))]
+ ""
+ "%|[<andior_condmod>%4]\\tcmp%C1\\t%$\\t%3, %2, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")
+ (set_attr "predicable" "no")])
+
+(define_insn "*ucmpsi_<andior_name>_insn_64"
+ [(set (match_operand:SI 0 "register_operand" "=A,B,A,B")
+ (if_then_else:SI
+ (andior_eqne:SI (match_operand:SI 4 "register_operand" "0,0,0,0")
+ (const_int 0))
+ (match_dup 4)
+ (match_operator:SI 1 "ltugtu_operator"
+ [(match_operand:SI 2 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 3 "reg_or_ucst5_operand" "aIu5,bIu5,aIu5,bIu5")])))]
+ "TARGET_INSNS_64"
+ "%|[<andior_condmod>%4]\\tcmp%C1\\t%$\\t%3, %2, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")
+ (set_attr "predicable" "no")])
+
+(define_insn "*ucmpsi_<andior_name>_insn"
+ [(set (match_operand:SI 0 "register_operand" "=A,B,A,B")
+ (if_then_else:SI
+ (andior_eqne:SI (match_operand:SI 4 "register_operand" "0,0,0,0")
+ (const_int 0))
+ (match_dup 4)
+ (match_operator:SI 1 "ltugtu_operator"
+ [(match_operand:SI 2 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 3 "reg_or_ucst4_operand" "aIu4,bIu4,aIu4,bIu4")])))]
+ "!TARGET_INSNS_64"
+ "%|[<andior_condmod>%4]\\tcmp%C1\\t%$\\t%3, %2, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")
+ (set_attr "predicable" "no")])
+
+(define_expand "cmpsi_<andior_name>"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI
+ (andior_eqne:SI (match_operand:SI 4 "register_operand" "0,0,0,0")
+ (const_int 0))
+ (match_dup 4)
+ (match_operator:SI 1 "c6x_comparison_operator"
+ [(match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "reg_or_const_int_operand" "")])))]
+ ""
+{
+ if (c6x_force_op_for_comparison_p (GET_CODE (operands[1]), operands[3]))
+ operands[3] = force_reg (SImode, operands[3]);
+})
+
+(define_insn "*cmpsf_insn"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (match_operator:SI 1 "eqltgt_operator"
+ [(match_operand:SF 2 "register_operand" "a,b,a,b")
+ (match_operand:SF 3 "register_operand" "a,b,?b,?a")]))]
+ "TARGET_FP"
+ "%|%.\\tcmp%c1sp\\t%$\\t%2, %3, %0"
+ [(set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "*cmpdf_insn"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (match_operator:SI 1 "eqltgt_operator"
+ [(match_operand:DF 2 "register_operand" "a,b,a,b")
+ (match_operand:DF 3 "register_operand" "a,b,?b,?a")]))]
+ "TARGET_FP"
+ "%|%.\\tcmp%c1dp\\t%$\\t%2, %3, %0"
+ [(set_attr "type" "cmpdp")
+ (set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_expand "cmp<mode>_<andior_name>"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (if_then_else:SI
+ (andior_eqne:SI (match_operand:SI 4 "register_operand" "0,0,0,0")
+ (const_int 0))
+ (match_dup 4)
+ (match_operator:SI 1 "eqltgt_operator"
+ [(match_operand:SFDFM 2 "register_operand" "")
+ (match_operand:SFDFM 3 "register_operand" "")])))]
+ "TARGET_FP")
+
+(define_insn "*cmpsf_<andior_name>_insn"
+ [(set (match_operand:SI 0 "register_operand" "=A,B,A,B")
+ (if_then_else:SI
+ (andior_eqne:SI (match_operand:SI 4 "register_operand" "0,0,0,0")
+ (const_int 0))
+ (match_dup 4)
+ (match_operator:SI 1 "eqltgt_operator"
+ [(match_operand:SF 2 "register_operand" "a,b,a,b")
+ (match_operand:SF 3 "register_operand" "a,b,?b,?a")])))]
+ "TARGET_FP"
+ "%|[<andior_condmod>%4]\\tcmp%c1sp\\t%$\\t%2, %3, %0"
+ [(set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")
+ (set_attr "predicable" "no")])
+
+;; reload_reg_class_lower will ensure that two-word reloads are allocated first,
+;; which could exhaust the predicate registers if we used just "a" and "b"
+;; constraints on operands 2 and 3.
+(define_insn "*cmpdf_<andior_name>_insn"
+ [(set (match_operand:SI 0 "register_operand" "=A,B,A,B")
+ (if_then_else:SI
+ (andior_eqne:SI (match_operand:SI 4 "register_operand" "0,0,0,0")
+ (const_int 0))
+ (match_dup 4)
+ (match_operator:SI 1 "eqltgt_operator"
+ [(match_operand:DF 2 "register_operand" "Da,Db,Da,Db")
+ (match_operand:DF 3 "register_operand" "Da,Db,?Db,?Da")])))]
+ "TARGET_FP"
+ "%|[<andior_condmod>%4]\\tcmp%c1dp\\t%$\\t%2, %3, %0"
+ [(set_attr "type" "cmpdp")
+ (set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")
+ (set_attr "predicable" "no")])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ior:SI (match_operand 1 "c6x_any_comparison_operand" "")
+ (match_operand 2 "c6x_any_comparison_operand" "")))]
+ "!reg_overlap_mentioned_p (operands[0], operands[2])"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0)
+ (if_then_else:SI (ne:SI (match_dup 0) (const_int 0))
+ (match_dup 0)
+ (match_dup 2)))])
+
+(define_split
+ [(set (match_operand:SI 0 "register_operand" "")
+ (and:SI (match_operand 1 "c6x_any_comparison_operand" "")
+ (match_operand 2 "c6x_any_comparison_operand" "")))]
+ "!reg_overlap_mentioned_p (operands[0], operands[2])"
+ [(set (match_dup 0) (match_dup 1))
+ (set (match_dup 0)
+ (if_then_else:SI (eq:SI (match_dup 0) (const_int 0))
+ (match_dup 0)
+ (match_dup 2)))])
+
+
+;; -------------------------------------------------------------------------
+;; setcc instructions
+;; -------------------------------------------------------------------------
+
+(define_expand "cstoresi4"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operator:SI 1 "comparison_operator"
+ [(match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "reg_or_ucst4_operand" "")]))]
+ ""
+{
+ if (!c6x_comparison_operator (operands[1], SImode))
+ {
+ rtx tmpreg = gen_reg_rtx (SImode);
+ rtx t = gen_rtx_fmt_ee (reverse_condition (GET_CODE (operands[1])),
+ SImode, operands[2], operands[3]);
+ emit_insn (gen_rtx_SET (VOIDmode, tmpreg, t));
+ emit_insn (gen_scmpsi_insn (operands[0],
+ gen_rtx_fmt_ee (EQ, SImode, tmpreg, const0_rtx),
+ tmpreg, const0_rtx));
+ DONE;
+ }
+})
+
+;; -------------------------------------------------------------------------
+;; Jump instructions
+;; -------------------------------------------------------------------------
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "register_operand" "a,b"))]
+ ""
+ "%|%.\\tb\\t%$\\t%0"
+ [(set_attr "type" "branch")
+ (set_attr "units" "s")
+ (set_attr "cross" "y,n")
+ (set_attr "dest_regfile" "b")])
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "%|%.\\tb\\t%$\\t%l0"
+ [(set_attr "type" "branch")
+ (set_attr "units" "s")
+ (set_attr "dest_regfile" "any")])
+
+(define_expand "tablejump"
+ [(parallel [(set (pc) (match_operand:SI 0 "register_operand" ""))
+ (use (label_ref (match_operand 1 "" "")))])]
+ "!flag_pic || !TARGET_INSNS_64"
+{
+})
+
+(define_insn "*tablejump_internal"
+ [(set (pc) (match_operand:SI 0 "register_operand" "b"))
+ (use (label_ref (match_operand 1 "" "")))]
+ "!flag_pic || !TARGET_INSNS_64"
+ "%|\\tb\\t%$\\t%0"
+ [(set_attr "type" "branch")
+ (set_attr "predicable" "no")
+ (set_attr "units" "s")
+ (set_attr "dest_regfile" "b")])
+
+;; Implement switch statements when generating PIC code. Switches are
+;; implemented by `tablejump' when not using -fpic.
+
+;; Emit code here to do the range checking and make the index zero based.
+;; operand 0 is the index
+;; operand 1 is the lower bound
+;; operand 2 is the range of indices (highest - lowest + 1)
+;; operand 3 is the label that precedes the table itself
+;; operand 4 is the fall through label
+
+(define_expand "casesi"
+ [(use (match_operand:SI 0 "register_operand" ""))
+ (use (match_operand:SI 1 "const_int_operand" ""))
+ (use (match_operand:SI 2 "const_int_operand" ""))
+ (use (match_operand 3 "" ""))
+ (use (match_operand 4 "" ""))]
+ "flag_pic && TARGET_INSNS_64"
+{
+ rtx indx;
+ rtx low = operands[1];
+ rtx range = operands[2];
+ rtx table = operands[3];
+ rtx fail = operands[4];
+
+ gcc_assert (GET_CODE (operands[1]) == CONST_INT);
+ gcc_assert (GET_CODE (operands[2]) == CONST_INT);
+
+ if (!reg_or_ucst4_operand (range, SImode))
+ range = force_reg (SImode, range);
+
+ /* If low bound is 0, we don't have to subtract it. */
+ if (INTVAL (operands[1]) == 0)
+ indx = operands[0];
+ else
+ {
+ rtx offset = GEN_INT (-INTVAL (low));
+ indx = gen_reg_rtx (SImode);
+ if (!addsi_operand (offset, SImode))
+ offset = force_reg (SImode, offset);
+ emit_insn (gen_addsi3 (indx, operands[0], offset));
+ }
+ emit_cmp_and_jump_insns (indx, range, GTU, NULL_RTX, SImode, 1, fail);
+
+ emit_jump_insn (gen_casesi_internal (indx, table));
+ DONE;
+})
+
+;; This is the only instance in this file where a pattern emits more than
+;; one instruction. The concern here is that the addkpc insn could otherwise
+;; be scheduled too far away from the label. A tablejump always ends an
+;; extended basic block, so it shouldn't happen that the scheduler places
+;; something in the delay slots.
+(define_insn "casesi_internal"
+ [(set (pc)
+ (mem:SI (plus:SI (mult:SI (match_operand:SI 0 "register_operand" "b")
+ (const_int 4))
+ (label_ref (match_operand 1 "" "")))))
+ (clobber (match_scratch:SI 2 "=&b"))
+ (clobber (match_scratch:SI 3 "=b"))]
+ "flag_pic && TARGET_INSNS_64"
+ "addkpc\t.s2\t%l1,%2, 0\n\t\tldw\t.d2t2\t*+%2[%0], %3\n\t\tnop\t\t4\n\t\tadd\t.l2\t%2, %3, %3\n\t\tb\t.s2\t%3"
+ [(set_attr "type" "branch")
+ (set_attr "predicable" "no")
+ (set_attr "dest_regfile" "b")])
+
+(define_expand "cbranch<mode>4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(match_operand:SIDIM 1 "register_operand" "")
+ (match_operand:SIDIM 2 "reg_or_const_int_operand" "")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+{
+ rtx t = c6x_expand_compare (operands[0], VOIDmode);
+ operands[0] = t;
+ operands[1] = XEXP (t, 0);
+ operands[2] = XEXP (t, 1);
+})
+
+(define_expand "cbranch<mode>4"
+ [(set (pc)
+ (if_then_else (match_operator 0 "c6x_fp_comparison_operator"
+ [(match_operand:SFDFM 1 "register_operand" "")
+ (match_operand:SFDFM 2 "register_operand" "")])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+{
+ rtx t = c6x_expand_compare (operands[0], VOIDmode);
+ operands[0] = t;
+ operands[1] = XEXP (t, 0);
+ operands[2] = XEXP (t, 1);
+})
+
+(define_insn "br_true"
+ [(set (pc)
+ (if_then_else (match_operator 0 "predicate_operator"
+ [(match_operand:SI 1 "register_operand" "AB")
+ (const_int 0)])
+ (label_ref (match_operand 2 "" ""))
+ (pc)))]
+ ""
+ "%|[%J0]\\tb\\t%$\\t%l2"
+ [(set_attr "type" "branch")
+ (set_attr "predicable" "no")
+ (set_attr "units" "s")
+ (set_attr "dest_regfile" "any")])
+
+(define_insn "br_false"
+ [(set (pc)
+ (if_then_else (match_operator 0 "predicate_operator"
+ [(match_operand:SI 1 "register_operand" "AB")
+ (const_int 0)])
+ (pc)
+ (label_ref (match_operand 2 "" ""))))]
+ ""
+ "%|[%j0]\\tb\\t%$\\t%l2"
+ [(set_attr "type" "branch")
+ (set_attr "predicable" "no")
+ (set_attr "units" "s")
+ (set_attr "dest_regfile" "any")])
+
+(define_expand "return"
+ [(parallel
+ [(return)
+ (use (reg:SI REG_B3))])]
+ "reload_completed && get_frame_size () == 0 && c6x_nsaved_regs () == 0")
+
+;; We can't expand this before we know where the link register is stored.
+(define_insn_and_split "eh_return"
+ [(unspec_volatile [(match_operand:SI 0 "register_operand" "ab")]
+ UNSPECV_EH_RETURN)
+ (clobber (match_scratch:SI 1 "=&ab"))]
+ ""
+ "#"
+ "&& reload_completed"
+ [(const_int 0)]
+ "
+ {
+ c6x_set_return_address (operands[0], operands[1]);
+ DONE;
+ }"
+)
+
+;; -------------------------------------------------------------------------
+;; Delayed-branch real jumps and shadows
+;; -------------------------------------------------------------------------
+
+(define_insn "real_jump"
+ [(unspec [(match_operand 0 "c6x_jump_operand" "a,b,s") (const_int 0)]
+ UNSPEC_REAL_JUMP)]
+ ""
+{
+ if (GET_CODE (operands[0]) == LABEL_REF)
+ return "%|%.\\tb\\t%$\\t%l0";
+ return "%|%.\\tb\\t%$\\t%0";
+}
+ [(set_attr "type" "branch")
+ (set_attr "has_shadow" "y")
+ (set_attr "units" "s")
+ (set_attr "cross" "y,n,n")
+ (set_attr "dest_regfile" "b,b,any")])
+
+(define_insn "real_call"
+ [(unspec [(match_operand 0 "c6x_call_operand" "a,b,S1") (const_int 1)]
+ UNSPEC_REAL_JUMP)
+ (clobber (reg:SI REG_B3))]
+ ""
+ "%|%.\\tcall\\t%$\\t%0"
+ [(set_attr "type" "call")
+ (set_attr "has_shadow" "y")
+ (set_attr "predicable" "no")
+ (set_attr "units" "s")
+ (set_attr "cross" "y,n,n")
+ (set_attr "dest_regfile" "b,b,any")])
+
+(define_insn "real_ret"
+ [(unspec [(match_operand 0 "register_operand" "a,b") (const_int 2)]
+ UNSPEC_REAL_JUMP)]
+ ""
+ "%|%.\\tret\\t%$\\t%0"
+ [(set_attr "type" "branch")
+ (set_attr "has_shadow" "y")
+ (set_attr "units" "s")
+ (set_attr "cross" "y,n")
+ (set_attr "dest_regfile" "b")])
+
+(define_insn "indirect_jump_shadow"
+ [(set (pc) (unspec [(pc)] UNSPEC_JUMP_SHADOW))]
+ ""
+ ";; indirect jump occurs"
+ [(set_attr "type" "shadow")])
+
+;; Operand 0 may be a PARALLEL which isn't handled by output_operand, so
+;; we don't try to print it.
+(define_insn "indirect_call_value_shadow"
+ [(set (match_operand 0 "" "")
+ (call (unspec [(pc)] UNSPEC_JUMP_SHADOW)
+ (const_int 0)))]
+ ""
+ ";; indirect call occurs, with return value"
+ [(set_attr "type" "shadow")])
+
+(define_insn "indirect_sibcall_shadow"
+ [(call (unspec [(pc)] UNSPEC_JUMP_SHADOW)
+ (const_int 0))]
+ "SIBLING_CALL_P (insn)"
+ ";; indirect sibcall occurs"
+ [(set_attr "type" "shadow")])
+
+(define_insn "indirect_call_shadow"
+ [(call (unspec [(pc)] UNSPEC_JUMP_SHADOW)
+ (const_int 0))]
+ ""
+ ";; indirect call occurs"
+ [(set_attr "type" "shadow")])
+
+(define_insn "call_value_shadow"
+ [(set (match_operand 0 "" "")
+ (call (unspec [(match_operand 1 "" "")] UNSPEC_JUMP_SHADOW)
+ (const_int 0)))]
+ ""
+ ";; call to %1 occurs, with return value"
+ [(set_attr "type" "shadow")])
+
+(define_insn "call_shadow"
+ [(call (unspec [(match_operand 0 "" "")] UNSPEC_JUMP_SHADOW)
+ (const_int 0))]
+ "!SIBLING_CALL_P (insn)"
+ ";; call to %0 occurs"
+ [(set_attr "type" "shadow")])
+
+(define_insn "sibcall_shadow"
+ [(call (unspec [(match_operand 0 "" "")] UNSPEC_JUMP_SHADOW)
+ (const_int 0))]
+ "SIBLING_CALL_P (insn)"
+ ";; sibcall to %0 occurs"
+ [(set_attr "type" "shadow")])
+
+(define_insn "jump_shadow"
+ [(set (pc) (unspec [(match_operand 0 "" "")] UNSPEC_JUMP_SHADOW))]
+ ""
+ ";; jump to %0 occurs"
+ [(set_attr "type" "shadow")])
+
+(define_insn "condjump_shadow"
+ [(set (pc)
+ (if_then_else (eq (unspec [(const_int 0)] UNSPEC_JUMP_SHADOW)
+ (const_int 0))
+ (match_operand 0 "" "")
+ (pc)))]
+ ""
+ ";; condjump to %0 occurs"
+ [(set_attr "type" "shadow")])
+
+(define_insn "return_shadow"
+ [(unspec [(const_int 0)] UNSPEC_JUMP_SHADOW)
+ (return)]
+ ""
+ ";; return occurs"
+ [(set_attr "type" "shadow")])
+
+;; -------------------------------------------------------------------------
+;; Add instructions
+;; -------------------------------------------------------------------------
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand"
+ "=a ,b , a, b, a, b, a, b, ab, a, b, a, b,ab")
+ (plus:SI (match_operand:SI 1 "register_operand"
+ "%a ,b , a, b, b, a, b, a, 0, a, b, z, z,0")
+ (match_operand:SI 2 "addsi_operand"
+ "aIs5,bIs5,?b,?a,?a,?b,?aIs5,?bIs5,I5x,I5x,I5x,Iux,Iux,IsB")))]
+ ""
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ HOST_WIDE_INT val = INTVAL (operands[2]);
+
+ if (c6x_get_unit_specifier (insn) == 'd')
+ {
+ bool issp = (TARGET_INSNS_64PLUS
+ && operands[1] == stack_pointer_rtx
+ && GET_CODE (PATTERN (insn)) != COND_EXEC);
+
+ if (get_attr_cross (insn) == CROSS_N)
+ {
+ if (satisfies_constraint_Iu5 (operands[2]))
+ return "%|%.\\tadd\\t%$\\t%1, %2, %0";
+ else if (satisfies_constraint_In5 (operands[2]))
+ return "%|%.\\tsub\\t%$\\t%1, %n2, %0";
+ }
+
+ if (issp && val > 0 && val < 32768)
+ {
+ return "%|%.\\taddab\\t%$\\t%1, %2, %0";
+ }
+ if ((val & 1) == 0 && ((val >= -62 && val <= 62)
+ || (issp && val > 0 && val < 65536)))
+ {
+ if (val < 0)
+ return "%|%.\\tsubah\\t%$\\t%1, %r2, %0";
+ else
+ return "%|%.\\taddah\\t%$\\t%1, %r2, %0";
+ }
+ else if ((val & 3) == 0 && ((val >= -124 && val <= 124)
+ || (issp && val > 0 && val < 131072)))
+ {
+ if (val < 0)
+ return "%|%.\\tsubaw\\t%$\\t%1, %R2, %0";
+ else
+ return "%|%.\\taddaw\\t%$\\t%1, %R2, %0";
+ }
+ else if ((val & 7) == 0 && val > 0 && val <= 248)
+ {
+ rtx xop[3];
+ xop[0] = operands[0];
+ xop[1] = operands[1];
+ xop[2] = GEN_INT (val >> 3);
+ output_asm_insn ("%|%.\\taddad\\t%$\\t%1, %2, %0", xop);
+ return "";
+ }
+ }
+ else
+ {
+ if (satisfies_constraint_Is5 (operands[2]))
+ return "%|%.\\tadd\\t%$\\t%2, %1, %0";
+ }
+ gcc_assert (rtx_equal_p (operands[0], operands[1]));
+ return "%|%.\\taddk\\t%$\\t%2, %0";
+ }
+ if (which_alternative == 4 || which_alternative == 5)
+ return "%|%.\\tadd\\t%$\\t%2, %1, %0";
+ else
+ return "%|%.\\tadd\\t%$\\t%1, %2, %0";
+}
+ [(set_attr "units62" "dls,dls,ls,ls,ls,ls,ls,ls,s,d,d,*,*,s")
+ (set_attr "units67" "dls,dls,ls,ls,ls,ls,ls,ls,ds,d,d,*,*,s")
+ (set_attr "units64" "dls,dls,dls,dls,dls,dls,ls,ls,ds,d,d,d,d,s")
+ (set_attr "cross" "n,n,y,y,y,y,y,y,n,n,n,y,n,n")
+ (set_attr "predicable" "yes,yes,yes,yes,yes,yes,yes,yes,yes,yes,yes,no,no,yes")])
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b,a,b")
+ (minus:SI (match_operand:SI 1 "reg_or_scst5_operand" "a,b,aIs5,bIs5,bIs5,aIs5")
+ (match_operand:SI 2 "register_operand" "a,b,a,b,?a,?b")))]
+ ""
+ "%|%.\\tsub\\t%$\\t%1, %2, %0"
+ [(set_attr "units62" "dls,dls,ls,ls,l,l")
+ (set_attr "units64" "dls,dls,ls,ls,ls,ls")
+ (set_attr "cross" "n,n,n,n,y,y")])
+
+(define_insn "*addshiftsi"
+ [(set (match_operand:SI 0 "register_operand" "=a,b")
+ (plus:SI (mult:SI (match_operand:SI 2 "register_operand" "a,b")
+ (match_operand:SI 3 "adda_scale_operand" "n,n"))
+ (match_operand:SI 1 "register_operand" "a,b")))]
+ ""
+ "%|%.\\tadda%d3\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "d")])
+
+(define_insn "*subshiftsi"
+ [(set (match_operand:SI 0 "register_operand" "=a,b")
+ (minus:SI (match_operand:SI 1 "register_operand" "a,b")
+ (mult:SI (match_operand:SI 2 "register_operand" "a,b")
+ (match_operand:SI 3 "suba_scale_operand" "n,n"))))]
+ ""
+ "%|%.\\tsuba%d3\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "d")])
+
+(define_insn "addsidi3_widen"
+ [(set (match_operand:DI 0 "register_operand" "=a,b,a,b")
+ (plus:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "%a,b,a,b"))
+ (zero_extend:DI (match_operand:SI 2 "register_operand" "a,b,?b,?a"))))]
+ ""
+ "%|%.\\taddu\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_expand "adddi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (plus:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:DI 2 "register_operand" "")))]
+ ""
+{
+ rtx tmp;
+ rtx lo_half[3], hi_half[3];
+ split_di (operands + 1, 2, lo_half + 1, hi_half + 1);
+ if (reg_overlap_mentioned_p (operands[0], hi_half[1])
+ || reg_overlap_mentioned_p (operands[0], hi_half[2]))
+ tmp = gen_reg_rtx (DImode);
+ else
+ tmp = operands[0];
+ split_di (&tmp, 1, lo_half, hi_half);
+ emit_insn (gen_addsidi3_widen (tmp, lo_half[1], lo_half[2]));
+ emit_insn (gen_addsi3 (hi_half[0], copy_rtx (hi_half[0]), hi_half[1]));
+ emit_insn (gen_addsi3 (copy_rtx (hi_half[0]),
+ copy_rtx (hi_half[0]), hi_half[2]));
+ if (tmp != operands[0])
+ emit_move_insn (operands[0], tmp);
+ DONE;
+})
+
+(define_insn "addsf3"
+ [(set (match_operand:SF 0 "register_operand" "=a,b,a,b")
+ (plus:SF (match_operand:SF 1 "register_operand" "%a,b,a,b")
+ (match_operand:SF 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_FP"
+ "%|%.\\taddsp\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "fp4")
+ (set_attr "units67" "l")
+ (set_attr "units67p" "ls")
+ (set_attr "units674" "ls")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "adddf3"
+ [(set (match_operand:DF 0 "register_operand" "=a,b,a,b")
+ (plus:DF (match_operand:DF 1 "register_operand" "%a,b,a,b")
+ (match_operand:DF 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_FP"
+ "%|%.\\tadddp\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "adddp")
+ (set_attr "units67" "l")
+ (set_attr "units67p" "ls")
+ (set_attr "units674" "ls")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "subsf3"
+ [(set (match_operand:SF 0 "register_operand" "=a,b, a, b, a, b")
+ (minus:SF (match_operand:SF 1 "register_operand" "a,b, b, a, a, b")
+ (match_operand:SF 2 "register_operand" "a,b,?a,?b,?b,?a")))]
+ "TARGET_FP"
+ "%|%.\\tsubsp\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "fp4")
+ (set_attr "units67" "l")
+ (set_attr "units67p" "ls")
+ (set_attr "units674" "ls")
+ (set_attr "cross" "n,n,y,y,y,y")])
+
+(define_insn "subdf3"
+ [(set (match_operand:DF 0 "register_operand" "=a,b, a, b, a, b")
+ (minus:DF (match_operand:DF 1 "register_operand" "a,b, b, a, a, b")
+ (match_operand:DF 2 "register_operand" "a,b,?a,?b,?b,?a")))]
+ "TARGET_FP"
+ "%|%.\\tsubdp\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "adddp")
+ (set_attr "units67" "l")
+ (set_attr "units67p" "ls")
+ (set_attr "units674" "ls")
+ (set_attr "cross" "n,n,y,y,y,y")])
+
+;; -------------------------------------------------------------------------
+;; Logical instructions
+;; -------------------------------------------------------------------------
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b,a,b")
+ (and:SI (match_operand:SI 1 "register_operand" "%a,b,b,a,a,b")
+ (match_operand:SI 2 "andsi_operand" "aIs5,bIs5,?aIs5,?bIs5,aJc,bJc")))]
+ ""
+{
+ if (which_alternative < 4)
+ return "%|%.\\tand\\t%$\\t%2, %1, %0";
+ else
+ return "%|%.\\tclr\\t%$\\t%1, %f2, %F2, %0";
+}
+ [(set_attr "units62" "ls,ls,ls,ls,s,s")
+ (set_attr "units64" "dls,dls,dls,dls,s,s")
+ (set_attr "cross" "n,n,y,y,n,n")])
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b,a,b")
+ (ior:SI (match_operand:SI 1 "register_operand" "%a,b,b,a,a,b")
+ (match_operand:SI 2 "iorsi_operand" "aIs5,bIs5,?aIs5,?bIs5,aJs,bJs")))]
+ ""
+{
+ if (which_alternative < 4)
+ return "%|%.\\tor\\t%$\\t%2, %1, %0";
+ else
+ return "%|%.\\tset\\t%$\\t%1, %s2, %S2, %0";
+}
+ [(set_attr "units62" "ls,ls,ls,ls,s,s")
+ (set_attr "units64" "dls,dls,dls,dls,s,s")
+ (set_attr "cross" "n,n,y,y,n,n")])
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (xor:SI (match_operand:SI 1 "register_operand" "%a,b,b,a")
+ (match_operand:SI 2 "reg_or_scst5_operand" "aIs5,bIs5,?aIs5,?bIs5")))]
+ ""
+ "%|%.\\txor\\t%$\\t%2, %1, %0"
+ [(set_attr "units62" "ls")
+ (set_attr "units64" "dls")
+ (set_attr "cross" "n,n,y,y")])
+
+;; -------------------------------------------------------------------------
+;; Conversions
+;; -------------------------------------------------------------------------
+
+(define_insn "extendsfdf2"
+ [(set (match_operand:DF 0 "register_operand" "=a,b,a,b")
+ (float_extend:DF (match_operand:SF 1 "register_operand" "a,b,?b,?a")))]
+ "TARGET_FP"
+ "%|%.\\tspdp\\t%$\\t%1,%0"
+ [(set_attr "type" "dp2")
+ (set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "truncdfsf2"
+ [(set (match_operand:SF 0 "register_operand" "=a,b")
+ (float_truncate:SF (match_operand:DF 1 "register_operand" "a,b")))]
+ "TARGET_FP"
+ "%|%.\\tdpsp\\t%$\\t%1,%0"
+ [(set_attr "type" "fp4")
+ (set_attr "units" "l")
+ (set_attr "cross" "n")])
+
+;;;; Convert between signed integer types and floating point.
+(define_insn "floatsisf2"
+ [(set (match_operand:SF 0 "register_operand" "=a,b,a,b")
+ (float:SF (match_operand:SI 1 "register_operand" "a,b,?b,?a")))]
+ "TARGET_FP"
+ "%|%.\\tintsp\\t%$\\t%1,%0"
+ [(set_attr "type" "fp4")
+ (set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "floatunssisf2"
+ [(set (match_operand:SF 0 "register_operand" "=a,b,a,b")
+ (unsigned_float:SF (match_operand:SI 1 "register_operand" "a,b,?b,?a")))]
+ "TARGET_FP"
+ "%|%.\\tintspu\\t%$\\t%1,%0"
+ [(set_attr "type" "fp4")
+ (set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "floatsidf2"
+ [(set (match_operand:DF 0 "register_operand" "=a,b,a,b")
+ (float:DF (match_operand:SI 1 "register_operand" "a,b,?b,?a")))]
+ "TARGET_FP"
+ "%|%.\\tintdp\\t%$\\t%1,%0"
+ [(set_attr "type" "intdp")
+ (set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "floatunssidf2"
+ [(set (match_operand:DF 0 "register_operand" "=a,b,a,b")
+ (unsigned_float:DF (match_operand:SI 1 "register_operand" "a,b,?b,?a")))]
+ "TARGET_FP"
+ "%|%.\\tintdpu\\t%$\\t%1,%0"
+ [(set_attr "type" "intdp")
+ (set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "fix_truncsfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (fix:SI (match_operand:SF 1 "register_operand" "a,b,?b,?a")))]
+ "TARGET_FP"
+ "%|%.\\tsptrunc\\t%$\\t%1,%0"
+ [(set_attr "type" "fp4")
+ (set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "fix_truncdfsi2"
+ [(set (match_operand:SI 0 "register_operand" "=a,b")
+ (fix:SI (match_operand:DF 1 "register_operand" "a,b")))]
+ "TARGET_FP"
+ "%|%.\\tdptrunc\\t%$\\t%1,%0"
+ [(set_attr "type" "fp4")
+ (set_attr "units" "l")
+ (set_attr "cross" "n")])
+
+;; -------------------------------------------------------------------------
+;; Saturating arithmetic
+;; -------------------------------------------------------------------------
+
+(define_insn "saddsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b,a,b,a,b")
+ (ss_plus:SI (match_operand:SI 1 "register_operand" "a,b,?b,?a,a,b,?b,?a")
+ (match_operand:SI 2 "reg_or_const_int_operand" "a,b,a,b,aIs5,bIs5,aIs5,bIs5")))]
+ ""
+ "%|%.\\tsadd\\t%$\\t%2, %1, %0"
+ [(set_attr "units" "ls,ls,ls,ls,l,l,l,l")
+ (set_attr "cross" "n,n,y,y,n,n,y,y")])
+
+(define_insn "ssubsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (ss_minus:SI (match_operand:SI 1 "reg_or_scst5_operand" "aIs5,bIs5,?bIs5,?aIs5")
+ (match_operand:SI 2 "register_operand" "a,b,a,b")))]
+ ""
+ "%|%.\\tssub\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "subcsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (unspec:SI
+ [(match_operand:SI 1 "register_operand" "a,b,a,b")
+ (match_operand:SI 2 "register_operand" "a,b,?b,?a")]
+ UNSPEC_SUBC))]
+ ""
+ "%|%.\\tsubc\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+;; -------------------------------------------------------------------------
+;; Call instructions
+;; -------------------------------------------------------------------------
+
+(define_expand "call"
+ [(match_operand 0 "" "")]
+ ""
+{
+ c6x_expand_call (NULL_RTX, operands[0], false);
+ DONE;
+})
+
+(define_expand "call_value"
+ [(match_operand 0 "" "")
+ (match_operand 1 "" "")]
+ ""
+{
+ c6x_expand_call (operands[0], operands[1], false);
+ DONE;
+})
+
+(define_expand "sibcall"
+ [(match_operand 0 "" "")]
+ ""
+{
+ c6x_expand_call (NULL_RTX, operands[0], true);
+ cfun->machine->contains_sibcall = true;
+ DONE;
+})
+
+(define_expand "sibcall_value"
+ [(match_operand 0 "" "")
+ (match_operand 1 "" "")]
+ ""
+{
+ c6x_expand_call (operands[0], operands[1], true);
+ cfun->machine->contains_sibcall = true;
+ DONE;
+})
+
+(define_insn "call_internal"
+ [(call (mem (match_operand:SI 0 "c6x_call_operand" "S1,a,b"))
+ (const_int 0))]
+ "!SIBLING_CALL_P (insn)"
+ "%|%.\\tcall\\t%$\\t%0"
+ [(set_attr "type" "call")
+ (set_attr "predicable" "no")
+ (set_attr "units" "s")
+ (set_attr "dest_regfile" "any,b,b")
+ (set_attr "cross" "n,y,n")])
+
+(define_insn "call_value_internal"
+ [(set (match_operand 0 "" "")
+ (call (mem (match_operand:SI 1 "c6x_call_operand" "S1,a,b"))
+ (const_int 0)))]
+ ""
+ "%|%.\\tcall\\t%$\\t%1"
+ [(set_attr "type" "call")
+ (set_attr "predicable" "no")
+ (set_attr "units" "s")
+ (set_attr "dest_regfile" "any,b,b")
+ (set_attr "cross" "n,y,n")])
+
+(define_insn "sibcall_internal"
+ [(call (mem (match_operand:SI 0 "c6x_call_operand" "S1,C"))
+ (const_int 0))]
+ "SIBLING_CALL_P (insn)"
+ "%|%.\\tb\\t%$\\t%0"
+ [(set_attr "type" "branch")
+ (set_attr "predicable" "no")
+ (set_attr "units" "s")
+ (set_attr "dest_regfile" "any,b")])
+
+(define_insn "callp"
+ [(call (mem (match_operand:SI 0 "c6x_call_operand" "S1"))
+ (const_int 0))
+ (unspec [(const_int 6)] UNSPEC_NOP)]
+ "!SIBLING_CALL_P (insn)"
+ "%|%.\\tcallp\\t%$\\t%0, B3"
+ [(set_attr "type" "callp")
+ (set_attr "predicable" "no")
+ (set_attr "units" "s")
+ (set_attr "dest_regfile" "b")
+ (set_attr "cross" "n")])
+
+(define_insn "callp_value"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (call (mem (match_operand:SI 1 "c6x_call_operand" "S1"))
+ (const_int 0)))
+ (unspec [(const_int 6)] UNSPEC_NOP)]
+ "!SIBLING_CALL_P (insn)"
+ "%|%.\\tcallp\\t%$\\t%1, B3"
+ [(set_attr "type" "callp")
+ (set_attr "predicable" "no")
+ (set_attr "units" "s")
+ (set_attr "dest_regfile" "b")
+ (set_attr "cross" "n")])
+
+(define_insn "return_internal"
+ [(return)
+ (use (match_operand:SI 0 "register_operand" "b"))]
+ "reload_completed"
+ "%|%.\\tret\\t%$\\t%0"
+ [(set_attr "type" "branch")
+ (set_attr "units" "s")
+ (set_attr "dest_regfile" "b")])
+
+(define_insn "addkpc"
+ [(set (match_operand:SI 0 "register_operand" "=b")
+ (unspec:SI [(match_operand 1 "" "")] UNSPEC_ADDKPC))
+ (unspec [(match_operand 2 "const_int_operand" "n")] UNSPEC_NOP)]
+ "TARGET_INSNS_64"
+ "%|%.\\taddkpc\\t%$\\t%l1, %0, %2"
+ [(set_attr "units" "s")
+ (set_attr "dest_regfile" "b")])
+
+;; -------------------------------------------------------------------------
+;; Unary operations
+;; -------------------------------------------------------------------------
+
+(define_insn "negsi2"
+ [(set (match_operand:SI 0 "register_operand" "=a, a, b, b")
+ (neg:SI (match_operand:SI 1 "register_operand" "a,?b, b,?a")))]
+ ""
+ "%|%.\\tneg\\t%$\\t%1, %0"
+ [(set_attr "units" "ls")
+ (set_attr "cross" "n,y,n,y")])
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=a, a, b, b")
+ (not:SI (match_operand:SI 1 "register_operand" "a,?b, b,?a")))]
+ ""
+ "%|%.\\tnot\\t%$\\t%1, %0"
+ [(set_attr "units" "ls")
+ (set_attr "cross" "n,y,n,y")])
+
+(define_insn "clrsbsi2"
+ [(set (match_operand:SI 0 "register_operand" "=a, a, b, b")
+ (clrsb:SI (match_operand:SI 1 "register_operand" "a,?b, b,?a")))]
+ ""
+ "%|%.\\tnorm\\t%$\\t%1, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,y,n,y")])
+
+(define_insn "clzsi2"
+ [(set (match_operand:SI 0 "register_operand" "=a, a, b, b")
+ (clz:SI (match_operand:SI 1 "register_operand" "a,?b, b,?a")))]
+ ""
+ "%|%.\\tlmbd\\t%$\\t1, %1, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,y,n,y")])
+
+;; bitrevsi2 is defined in c6x-mult.md.in.
+
+(define_expand "ctzsi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (ctz:SI (match_operand:SI 1 "register_operand" "")))]
+ "TARGET_INSNS_64"
+{
+ rtx tmpreg = gen_reg_rtx (SImode);
+ emit_insn (gen_bitrevsi2 (tmpreg, operands[1]));
+ emit_insn (gen_clzsi2 (operands[0], tmpreg));
+ DONE;
+})
+
+(define_expand "ctzdi2"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ctz:SI (match_operand:DI 1 "register_operand" "")))]
+ "TARGET_INSNS_64"
+{
+ rtx tmpreg = gen_reg_rtx (DImode);
+ rtx out;
+ emit_insn (gen_bitrevsi2 (gen_highpart (SImode, tmpreg),
+ gen_lowpart (SImode, operands[1])));
+ emit_insn (gen_bitrevsi2 (gen_lowpart (SImode, tmpreg),
+ gen_highpart (SImode, operands[1])));
+ out = expand_unop (DImode, clz_optab, tmpreg, operands[0], 1);
+ if (!rtx_equal_p (out, operands[0]))
+ emit_move_insn (operands[0], out);
+ DONE;
+})
+
+(define_insn "ssabssi2"
+ [(set (match_operand:SI 0 "register_operand" "=a, a, b, b")
+ (ss_abs:SI (match_operand:SI 1 "register_operand" "a,?b, b,?a")))]
+ ""
+ "%|%.\\tabs\\t%$\\t%1, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,y,n,y")])
+
+;; -------------------------------------------------------------------------
+;; Shift instructions
+;; -------------------------------------------------------------------------
+
+(define_code_iterator any_shift [ss_ashift ashift ashiftrt lshiftrt])
+(define_code_iterator any_rshift [ashiftrt lshiftrt])
+(define_code_attr shift_code [(ss_ashift "ss_ashl") (ashift "ashl")
+ (ashiftrt "ashr") (lshiftrt "lshr")])
+(define_code_attr shift_insn [(ss_ashift "sshl") (ashift "shl")
+ (ashiftrt "shr") (lshiftrt "shru")])
+
+(define_insn "<shift_code>si3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (any_shift:SI (match_operand:SI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 2 "reg_or_ucst5_operand" "aIu5,bIu5,aIu5,bIu5")))]
+ ""
+ "%|%.\\t<shift_insn>\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")])
+
+;; See c6x-mult.md.in for the rotlsi3 pattern.
+
+(define_insn "rotrdi3_16"
+ [(set (match_operand:DI 0 "register_operand" "=a,b")
+ (rotatert:DI (match_operand:DI 1 "register_operand" "a,b")
+ (const_int 16)))]
+ "TARGET_INSNS_64PLUS"
+ "%|%.\\tdpackx2\\t%$\\t%P1, %p1, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n")])
+
+(define_insn "shlmbsi3"
+ [(set (match_operand:SI 0 "register_operand" "=a,b,a,b")
+ (ior:SI (ashift:SI (match_operand:SI 1 "register_operand" "a,b,?b,?a")
+ (const_int 8))
+ (lshiftrt:SI (match_operand:SI 2 "register_operand" "a,b,a,b")
+ (const_int 24))))]
+ "TARGET_INSNS_64"
+ "%|%.\\tshlmb\\t%$\\t%2, %1, %0"
+ [(set_attr "units" "ls")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_expand "ashldi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (ashift:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "TARGET_INSNS_64"
+{
+ if (CONST_INT_P (operands[2]) && INTVAL (operands[2]) == 8)
+ {
+ rtx lo0, lo1, hi0, hi1, tmp;
+ lo0 = gen_lowpart (SImode, operands[0]);
+ hi0 = gen_highpart (SImode, operands[0]);
+ lo1 = gen_lowpart (SImode, operands[1]);
+ hi1 = gen_highpart (SImode, operands[1]);
+ if (reg_overlap_mentioned_p (hi0, lo1))
+ tmp = gen_reg_rtx (SImode);
+ else
+ tmp = hi0;
+ emit_insn (gen_shlmbsi3 (tmp, hi1, lo1));
+ emit_insn (gen_ashlsi3 (lo0, lo1, operands[2]));
+ if (tmp != hi0)
+ emit_move_insn (hi0, tmp);
+ DONE;
+ }
+ FAIL;
+})
+
+(define_expand "rotrdi3"
+ [(set (match_operand:DI 0 "register_operand" "")
+ (rotatert:DI (match_operand:DI 1 "register_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")))]
+ "TARGET_INSNS_64PLUS"
+{
+ if (CONST_INT_P (operands[2]) && INTVAL (operands[2]) == 16)
+ {
+ emit_insn (gen_rotrdi3_16 (operands[0], operands[1]));
+ DONE;
+ }
+ FAIL;
+})
+
+(define_insn "bswapv2hi2"
+ [(set (match_operand:V2HI 0 "register_operand" "=a,b,a,b")
+ (bswap:V2HI (match_operand:V2HI 1 "register_operand" "a,b,?b,?a")))]
+ "TARGET_INSNS_64"
+ "%|%.\\tswap4\\t%$\\t%1, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_expand "bswapsi2"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (bswap:SI (match_operand:SI 1 "register_operand" "")))]
+ "TARGET_INSNS_64"
+{
+ rtx tmpreg = gen_reg_rtx (SImode);
+ rtx tmpv2 = gen_lowpart (V2HImode, tmpreg);
+ rtx op0v2 = gen_lowpart (V2HImode, operands[0]);
+ emit_insn (gen_rotlsi3 (tmpreg, operands[1], GEN_INT (16)));
+ emit_insn (gen_bswapv2hi2 (op0v2, tmpv2));
+ DONE;
+})
+
+;; -------------------------------------------------------------------------
+;; Division
+;; -------------------------------------------------------------------------
+
+(define_insn "divsi3_insn"
+ [(set (reg:SI REG_A4) (div:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (clobber (reg:SI REG_A0))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B5))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t__c6xabi_divi"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "divsi3_insn_indcall"
+ [(set (reg:SI REG_A4) (div:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (use (match_operand:SI 0 "register_operand" "b"))
+ (clobber (reg:SI REG_A0))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B5))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t%0"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "udivsi3_insn"
+ [(set (reg:SI REG_A4) (udiv:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (clobber (reg:SI REG_A0))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t__c6xabi_divu"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "udivsi3_insn_indcall"
+ [(set (reg:SI REG_A4) (udiv:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (use (match_operand:SI 0 "register_operand" "b"))
+ (clobber (reg:SI REG_A0))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t%0"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "modsi3_insn"
+ [(set (reg:SI REG_A4) (mod:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A5))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t__c6xabi_remi"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "modsi3_insn_indcall"
+ [(set (reg:SI REG_A4) (mod:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (use (match_operand:SI 0 "register_operand" "b"))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A5))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t%0"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "divmodsi4_insn"
+ [(set (reg:SI REG_A4) (div:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (set (reg:SI REG_A5) (mod:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t__c6xabi_divremi"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "divmodsi4_insn_indcall"
+ [(set (reg:SI REG_A4) (div:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (set (reg:SI REG_A5) (mod:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (use (match_operand:SI 0 "register_operand" "b"))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A5))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t%0"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "umodsi3_insn"
+ [(set (reg:SI REG_A4) (umod:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A5))
+ (clobber (reg:SI REG_A7))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t__c6xabi_remu"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "umodsi3_insn_indcall"
+ [(set (reg:SI REG_A4) (umod:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (use (match_operand:SI 0 "register_operand" "b"))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A5))
+ (clobber (reg:SI REG_A7))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t%0"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "udivmodsi4_insn"
+ [(set (reg:SI REG_A4) (udiv:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (set (reg:SI REG_A5) (umod:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (clobber (reg:SI REG_A0))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t__c6xabi_divremu"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "udivmodsi4_insn_indcall"
+ [(set (reg:SI REG_A4) (udiv:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (set (reg:SI REG_A5) (umod:SI (reg:SI REG_A4) (reg:SI REG_B4)))
+ (use (match_operand:SI 0 "register_operand" "b"))
+ (clobber (reg:SI REG_A0))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "%|%.\\tcall\\t%$\\t%0"
+ [(set_attr "type" "call")
+ (set_attr "dest_regfile" "any")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn_and_split "divmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (div:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (set (match_operand:SI 3 "register_operand" "")
+ (mod:SI (match_dup 1) (match_dup 2)))
+ (clobber (reg:SI REG_A0))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A4))
+ (clobber (reg:SI REG_A5))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B5))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "#"
+ ""
+ [(const_int 0)]
+{
+ rtx reg = NULL_RTX;
+
+ if (TARGET_LONG_CALLS)
+ {
+ if (reload_completed)
+ reg = gen_rtx_REG (SImode, REG_A6);
+ else
+ reg = gen_reg_rtx (SImode);
+ }
+ emit_move_insn (gen_rtx_REG (SImode, REG_A4), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, REG_B4), operands[2]);
+ if (find_reg_note (curr_insn, REG_UNUSED, operands[3]))
+ {
+ if (TARGET_LONG_CALLS)
+ {
+ emit_move_insn (reg, optab_libfunc (sdiv_optab, SImode));
+ emit_insn (gen_divsi3_insn_indcall (reg));
+ }
+ else
+ emit_insn (gen_divsi3_insn ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, REG_A4));
+ }
+ else if (find_reg_note (curr_insn, REG_UNUSED, operands[0]))
+ {
+ if (TARGET_LONG_CALLS)
+ {
+ emit_move_insn (reg, optab_libfunc (smod_optab, SImode));
+ emit_insn (gen_modsi3_insn_indcall (reg));
+ }
+ else
+ emit_insn (gen_modsi3_insn ());
+ emit_move_insn (operands[3], gen_rtx_REG (SImode, REG_A4));
+ }
+ else
+ {
+ if (TARGET_LONG_CALLS)
+ {
+ emit_move_insn (reg, optab_libfunc (sdivmod_optab, SImode));
+ emit_insn (gen_divmodsi4_insn_indcall (reg));
+ }
+ else
+ emit_insn (gen_divmodsi4_insn ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, REG_A4));
+ emit_move_insn (operands[3], gen_rtx_REG (SImode, REG_A5));
+ }
+ DONE;
+})
+
+(define_insn_and_split "udivmodsi4"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (udiv:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))
+ (set (match_operand:SI 3 "register_operand" "")
+ (umod:SI (match_dup 1) (match_dup 2)))
+ (clobber (reg:SI REG_A0))
+ (clobber (reg:SI REG_A1))
+ (clobber (reg:SI REG_A2))
+ (clobber (reg:SI REG_A4))
+ (clobber (reg:SI REG_A5))
+ (clobber (reg:SI REG_A6))
+ (clobber (reg:SI REG_A7))
+ (clobber (reg:SI REG_B0))
+ (clobber (reg:SI REG_B1))
+ (clobber (reg:SI REG_B2))
+ (clobber (reg:SI REG_B3))
+ (clobber (reg:SI REG_B4))
+ (clobber (reg:SI REG_B30))
+ (clobber (reg:SI REG_B31))]
+ ""
+ "#"
+ ""
+ [(const_int 0)]
+{
+ rtx reg = NULL_RTX;
+
+ if (TARGET_LONG_CALLS)
+ {
+ if (reload_completed)
+ reg = gen_rtx_REG (SImode, REG_A6);
+ else
+ reg = gen_reg_rtx (SImode);
+ }
+
+ emit_move_insn (gen_rtx_REG (SImode, REG_A4), operands[1]);
+ emit_move_insn (gen_rtx_REG (SImode, REG_B4), operands[2]);
+ if (find_reg_note (curr_insn, REG_UNUSED, operands[3]))
+ {
+ if (TARGET_LONG_CALLS)
+ {
+ emit_move_insn (reg, optab_libfunc (udiv_optab, SImode));
+ emit_insn (gen_udivsi3_insn_indcall (reg));
+ }
+ else
+ emit_insn (gen_udivsi3_insn ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, REG_A4));
+ }
+ else if (find_reg_note (curr_insn, REG_UNUSED, operands[0]))
+ {
+ if (TARGET_LONG_CALLS)
+ {
+ emit_move_insn (reg, optab_libfunc (umod_optab, SImode));
+ emit_insn (gen_umodsi3_insn_indcall (reg));
+ }
+ else
+ emit_insn (gen_umodsi3_insn ());
+ emit_move_insn (operands[3], gen_rtx_REG (SImode, REG_A4));
+ }
+ else
+ {
+ if (TARGET_LONG_CALLS)
+ {
+ emit_move_insn (reg, optab_libfunc (udivmod_optab, SImode));
+ emit_insn (gen_udivmodsi4_insn_indcall (reg));
+ }
+ else
+ emit_insn (gen_udivmodsi4_insn ());
+ emit_move_insn (operands[0], gen_rtx_REG (SImode, REG_A4));
+ emit_move_insn (operands[3], gen_rtx_REG (SImode, REG_A5));
+ }
+ DONE;
+})
+
+;; -------------------------------------------------------------------------
+;; Multiplication
+;; See c6x-mult.md.in for define_insn patterns.
+;; -------------------------------------------------------------------------
+
+(define_expand "mulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (sign_extend:SI (match_operand:HI 1 "register_operand" ""))
+ (sign_extend:SI (match_operand:HI 2 "reg_or_scst5_operand" ""))))]
+ ""
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ emit_insn (gen_mulhisi3_const (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+})
+
+(define_expand "usmulhisi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (zero_extend:SI (match_operand:HI 1 "register_operand" ""))
+ (sign_extend:SI (match_operand:HI 2 "reg_or_scst5_operand" ""))))]
+ ""
+{
+ if (CONSTANT_P (operands[2]))
+ {
+ emit_insn (gen_usmulhisi3_const (operands[0], operands[1], operands[2]));
+ DONE;
+ }
+})
+
+(define_expand "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (mult:SI (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "register_operand" "")))]
+ ""
+{
+ if (!TARGET_MPY32)
+ {
+ rtx lo1 = gen_lowpart (HImode, operands[1]);
+ rtx lo2 = gen_lowpart (HImode, operands[2]);
+ /* (N * AH + AL) * (N * BH + BL)
+ = N*(AH * BL + BH * AL) + AL*BL */
+ rtx tmp1 = gen_reg_rtx (SImode);
+ rtx tmp2 = gen_reg_rtx (SImode);
+ rtx tmp3 = gen_reg_rtx (SImode);
+ emit_insn (gen_umulhisi3 (tmp1, lo1, lo2));
+ emit_insn (gen_umulhisi3_lh (tmp2, lo1, operands[2]));
+ emit_insn (gen_umulhisi3_hl (tmp3, operands[1], lo2));
+ emit_insn (gen_addsi3 (tmp2, tmp2, tmp3));
+ emit_insn (gen_ashlsi3 (tmp2, tmp2, GEN_INT (16)));
+ emit_insn (gen_addsi3 (operands[0], tmp1, tmp2));
+ DONE;
+ }
+})
+
+;; -------------------------------------------------------------------------
+;; Floating point multiplication
+;; -------------------------------------------------------------------------
+
+(define_insn "mulsf3"
+ [(set (match_operand:SF 0 "register_operand" "=a,b,a,b")
+ (mult:SF (match_operand:SF 1 "register_operand" "%a,b,?a,?b")
+ (match_operand:SF 2 "register_operand" "a,b,b,a")))]
+ "TARGET_FP"
+ "%|%.\\tmpysp\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpy4")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "muldf3"
+ [(set (match_operand:DF 0 "register_operand" "=a,b")
+ (mult:DF (match_operand:DF 1 "register_operand" "%a,b")
+ (match_operand:DF 2 "register_operand" "a,b")))]
+ "TARGET_FP"
+ "%|%.\\tmpydp\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpydp")
+ (set_attr "units" "m")
+ (set_attr "cross" "n")])
+
+;; Note that mpyspdp and mpysp2dp are available on C67x, despite what the
+;; manual says.
+(define_insn "*muldf_ext1"
+ [(set (match_operand:DF 0 "register_operand" "=a,b,a,b")
+ (mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "a,b,a,b"))
+ (match_operand:DF 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_FP_EXT"
+ "%|%.\\tmpyspdp\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpyspdp")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "*muldf_ext2"
+ [(set (match_operand:DF 0 "register_operand" "=a,b,a,b")
+ (mult:DF (float_extend:DF (match_operand:SF 1 "register_operand" "%a,b,a,b"))
+ (float_extend:DF (match_operand:SF 2 "register_operand" "a,b,?b,?a"))))]
+ "TARGET_FP_EXT"
+ "%|%.\\tmpysp2dp\\t%$\\t%1, %2, %0"
+ [(set_attr "type" "mpysp2dp")
+ (set_attr "units" "m")
+ (set_attr "cross" "n,n,y,y")])
+
+;; -------------------------------------------------------------------------
+;; Floating point division
+;; -------------------------------------------------------------------------
+
+(define_insn "rcpsf2"
+ [(set (match_operand:SF 0 "register_operand" "=a,b,a,b")
+ (unspec:SF [(match_operand:SF 1 "register_operand" "a,b,?b,?a")]
+ UNSPEC_RCP))]
+ "TARGET_FP"
+ "%|%.\\trcpsp\\t%$\\t%1, %0"
+ [(set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "rcpdf2"
+ [(set (match_operand:DF 0 "register_operand" "=a,b")
+ (unspec:DF [(match_operand:DF 1 "register_operand" "a,b")]
+ UNSPEC_RCP))]
+ "TARGET_FP"
+ "%|%.\\trcpdp\\t%$\\t%1, %0"
+ [(set_attr "type" "dp2")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_expand "divsf3"
+ [(set (match_dup 4)
+ (unspec:SF [(match_operand:SF 2 "register_operand" "")]
+ UNSPEC_RCP))
+ (set (match_dup 5) (mult:SF (match_dup 2) (match_dup 4)))
+ (set (match_dup 6) (minus:SF (match_dup 3) (match_dup 5)))
+ (set (match_dup 4) (mult:SF (match_dup 4) (match_dup 6)))
+ (set (match_dup 5) (mult:SF (match_dup 2) (match_dup 4)))
+ (set (match_dup 6) (minus:SF (match_dup 3) (match_dup 5)))
+ (set (match_dup 4) (mult:SF (match_dup 4) (match_dup 6)))
+ (set (match_operand:SF 0 "register_operand" "")
+ (mult:SF (match_operand:SF 1 "register_operand")
+ (match_dup 4)))]
+ "TARGET_FP && flag_reciprocal_math"
+{
+ operands[3] = force_reg (SFmode,
+ CONST_DOUBLE_FROM_REAL_VALUE (dconst2, SFmode));
+ operands[4] = gen_reg_rtx (SFmode);
+ operands[5] = gen_reg_rtx (SFmode);
+ operands[6] = gen_reg_rtx (SFmode);
+})
+
+(define_expand "divdf3"
+ [(set (match_dup 4)
+ (unspec:DF [(match_operand:DF 2 "register_operand" "")]
+ UNSPEC_RCP))
+ (set (match_dup 5) (mult:DF (match_dup 2) (match_dup 4)))
+ (set (match_dup 6) (minus:DF (match_dup 3) (match_dup 5)))
+ (set (match_dup 4) (mult:DF (match_dup 4) (match_dup 6)))
+ (set (match_dup 5) (mult:DF (match_dup 2) (match_dup 4)))
+ (set (match_dup 6) (minus:DF (match_dup 3) (match_dup 5)))
+ (set (match_dup 4) (mult:DF (match_dup 4) (match_dup 6)))
+ (set (match_dup 5) (mult:DF (match_dup 2) (match_dup 4)))
+ (set (match_dup 6) (minus:DF (match_dup 3) (match_dup 5)))
+ (set (match_dup 4) (mult:DF (match_dup 4) (match_dup 6)))
+ (set (match_operand:DF 0 "register_operand" "")
+ (mult:DF (match_operand:DF 1 "register_operand")
+ (match_dup 4)))]
+ "TARGET_FP && flag_reciprocal_math"
+{
+ operands[3] = force_reg (DFmode,
+ CONST_DOUBLE_FROM_REAL_VALUE (dconst2, DFmode));
+ operands[4] = gen_reg_rtx (DFmode);
+ operands[5] = gen_reg_rtx (DFmode);
+ operands[6] = gen_reg_rtx (DFmode);
+})
+
+;; -------------------------------------------------------------------------
+;; Block moves
+;; -------------------------------------------------------------------------
+
+(define_expand "movmemsi"
+ [(use (match_operand:BLK 0 "memory_operand" ""))
+ (use (match_operand:BLK 1 "memory_operand" ""))
+ (use (match_operand:SI 2 "nonmemory_operand" ""))
+ (use (match_operand:SI 3 "const_int_operand" ""))
+ (use (match_operand:SI 4 "const_int_operand" ""))
+ (use (match_operand:SI 5 "const_int_operand" ""))]
+ ""
+{
+ if (c6x_expand_movmem (operands[0], operands[1], operands[2], operands[3],
+ operands[4], operands[5]))
+ DONE;
+ else
+ FAIL;
+})
+
+;; -------------------------------------------------------------------------
+;; Prologue and epilogue.
+;; -------------------------------------------------------------------------
+
+;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and
+;; all of memory. This blocks insns from being moved across this point.
+
+(define_insn "blockage"
+ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)]
+ ""
+ ""
+ [(set_attr "type" "blockage")])
+
+(define_insn "push_rts"
+ [(set (mem:SI (reg:SI REG_SP)) (reg:SI REG_B14))
+ (set (mem:DI (plus:SI (reg:SI REG_SP) (const_int -8))) (reg:DI REG_A14))
+ (set (mem:DI (plus:SI (reg:SI REG_SP) (const_int -16))) (reg:DI REG_B12))
+ (set (mem:DI (plus:SI (reg:SI REG_SP) (const_int -24))) (reg:DI REG_A12))
+ (set (mem:DI (plus:SI (reg:SI REG_SP) (const_int -32))) (reg:DI REG_B10))
+ (set (mem:DI (plus:SI (reg:SI REG_SP) (const_int -40))) (reg:DI REG_A10))
+ (set (mem:DI (plus:SI (reg:SI REG_SP) (const_int -48))) (reg:DI REG_B2))
+ (set (reg:SI REG_SP) (plus:SI (reg:SI REG_SP) (const_int -56)))
+ (unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)
+ (clobber (reg:SI REG_A3))]
+ "TARGET_INSNS_64PLUS"
+ "%|%.\\tcallp\\t%$\\t__c6xabi_push_rts, a3"
+ [(set_attr "type" "callp")
+ (set_attr "dest_regfile" "a")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_insn "pop_rts"
+ [(set (reg:SI REG_B14) (mem:SI (plus:SI (reg:SI REG_SP) (const_int 56))))
+ (set (reg:DI REG_A14) (mem:DI (plus:SI (reg:SI REG_SP) (const_int 48))))
+ (set (reg:DI REG_B12) (mem:DI (plus:SI (reg:SI REG_SP) (const_int 40))))
+ (set (reg:DI REG_A12) (mem:DI (plus:SI (reg:SI REG_SP) (const_int 32))))
+ (set (reg:DI REG_B10) (mem:DI (plus:SI (reg:SI REG_SP) (const_int 24))))
+ (set (reg:DI REG_A10) (mem:DI (plus:SI (reg:SI REG_SP) (const_int 16))))
+ (set (reg:DI REG_B2) (mem:DI (plus:SI (reg:SI REG_SP) (const_int 8))))
+ (set (reg:SI REG_SP) (plus:SI (reg:SI REG_SP) (const_int 56)))
+ (clobber (reg:SI REG_A3))
+ (return)]
+ "TARGET_INSNS_64PLUS"
+ "%|%.\\tretp\\t%$\\t__c6xabi_pop_rts, a3"
+ [(set_attr "type" "callp")
+ (set_attr "dest_regfile" "a")
+ (set_attr "units" "s")
+ (set_attr "cross" "n")])
+
+(define_expand "prologue"
+ [(const_int 1)]
+ ""
+ "c6x_expand_prologue (); DONE;")
+
+(define_expand "epilogue"
+ [(const_int 1)]
+ ""
+ "c6x_expand_epilogue (false); DONE;")
+
+(define_expand "sibcall_epilogue"
+ [(return)]
+ ""
+{
+ c6x_expand_epilogue (true);
+ DONE;
+})
+
+(define_insn "setup_dsbt"
+ [(set (match_operand:SI 0 "pic_register_operand" "+Z")
+ (unspec:SI [(match_dup 0)
+ (match_operand:SI 1 "symbolic_operand" "")]
+ UNSPEC_SETUP_DSBT))]
+ "TARGET_DSBT"
+ "%|%.\\tldw\\t%$\\t*+%0($DSBT_index%1), %0"
+ [(set_attr "type" "load")
+ (set_attr "units" "d_addr")
+ (set_attr "dest_regfile" "b")
+ (set_attr "addr_regfile" "b")])
+
+
+;; A dummy use/set to prevent prologue and epiloge overlapping.
+;; This can be caused by sched-ebb in the presence of multiple
+;; exit sequences, and causes the unwinding table generation to explode.
+(define_insn "epilogue_barrier"
+ [(set (match_operand:SI 0 "register_operand" "")
+ (unspec:SI [(match_operand:SI 1 "register_operand" "")]
+ UNSPEC_EPILOGUE_BARRIER))]
+ ""
+ ""
+ [(set_attr "type" "blockage")])
+
+;; -------------------------------------------------------------------------
+;; Vector insns
+;; -------------------------------------------------------------------------
+
+(define_code_iterator logical [and ior xor])
+(define_code_attr logical_insn [(and "and") (ior "ior") (xor "xor")])
+(define_code_attr logical_opcode [(and "and") (ior "or") (xor "xor")])
+(define_code_iterator plusminus [plus minus])
+(define_code_attr plusminus_insn [(plus "add") (minus "sub")])
+(define_code_iterator ss_plusminus [ss_plus ss_minus])
+(define_code_attr ss_plusminus_insn [(ss_plus "add") (ss_minus "sub")])
+
+;; Vector logical insns
+
+(define_insn "<logical_insn><mode>3"
+ [(set (match_operand:VEC4M 0 "register_operand" "=a,b,a,b")
+ (logical:VEC4M (match_operand:VEC4M 1 "register_operand" "a,b,a,b")
+ (match_operand:VEC4M 2 "register_operand" "a,b,?b,?a")))]
+ ""
+ "%|%.\\t<logical_opcode>\\t%$\\t%1, %2, %0"
+ [(set_attr "units62" "ls")
+ (set_attr "units64" "dls")
+ (set_attr "cross" "n,n,y,y")])
+
+;; Vector add/subtract
+
+(define_insn "<plusminus_insn>v2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=a,b,a,b")
+ (plusminus:V2HI (match_operand:V2HI 1 "register_operand" "a,b,a,b")
+ (match_operand:V2HI 2 "register_operand" "a,b,?b,?a")))]
+ ""
+ "%|%.\\t<plusminus_insn>2\\t%$\\t%1, %2, %0"
+ [(set_attr "units62" "l")
+ (set_attr "units64" "dls")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "<plusminus_insn>v4qi3"
+ [(set (match_operand:V4QI 0 "register_operand" "=a,b,a,b")
+ (plusminus:V4QI (match_operand:V4QI 1 "register_operand" "a,b,a,b")
+ (match_operand:V4QI 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_INSNS_64"
+ "%|%.\\t<plusminus_insn>4\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "ss_addv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=a,b,a,b")
+ (ss_plus:V2HI (match_operand:V2HI 1 "register_operand" "a,b,a,b")
+ (match_operand:V2HI 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_INSNS_64"
+ "%|%.\\tsadd2\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "ss_subv2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=a,b,a,b")
+ (ss_minus:V2HI (match_operand:V2HI 1 "register_operand" "a,b,a,b")
+ (match_operand:V2HI 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_INSNS_64"
+ "%|%.\\tssub2\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "us_addv4qi3"
+ [(set (match_operand:V4QI 0 "register_operand" "=a,b,a,b")
+ (ss_plus:V4QI (match_operand:V4QI 1 "register_operand" "a,b,a,b")
+ (match_operand:V4QI 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_INSNS_64"
+ "%|%.\\tsaddu4\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")])
+
+;; Vector/scalar min/max
+
+(define_mode_iterator SMINMAX [HI V2HI])
+(define_mode_iterator UMINMAX [QI V4QI])
+
+(define_insn "smax<mode>3"
+ [(set (match_operand:SMINMAX 0 "register_operand" "=a,b,a,b")
+ (smax:SMINMAX (match_operand:SMINMAX 1 "register_operand" "a,b,a,b")
+ (match_operand:SMINMAX 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_INSNS_64"
+ "%|%.\\tmax2\\t%$\\t%1, %2, %0"
+ [(set_attr "units64" "l")
+ (set_attr "units64p" "ls")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "smin<mode>3"
+ [(set (match_operand:SMINMAX 0 "register_operand" "=a,b,a,b")
+ (smin:SMINMAX (match_operand:SMINMAX 1 "register_operand" "a,b,a,b")
+ (match_operand:SMINMAX 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_INSNS_64"
+ "%|%.\\tmin2\\t%$\\t%1, %2, %0"
+ [(set_attr "units64" "l")
+ (set_attr "units64p" "ls")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umax<mode>3"
+ [(set (match_operand:UMINMAX 0 "register_operand" "=a,b,a,b")
+ (umax:UMINMAX (match_operand:UMINMAX 1 "register_operand" "a,b,a,b")
+ (match_operand:UMINMAX 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_INSNS_64"
+ "%|%.\\tmaxu4\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+(define_insn "umin<mode>3"
+ [(set (match_operand:UMINMAX 0 "register_operand" "=a,b,a,b")
+ (umin:UMINMAX (match_operand:UMINMAX 1 "register_operand" "a,b,a,b")
+ (match_operand:UMINMAX 2 "register_operand" "a,b,?b,?a")))]
+ "TARGET_INSNS_64"
+ "%|%.\\tminu4\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,n,y,y")])
+
+;; Vector shifts
+
+(define_insn "<shift_code>v2hi3"
+ [(set (match_operand:V2HI 0 "register_operand" "=a,b,a,b")
+ (any_rshift:V2HI (match_operand:V2HI 1 "register_operand" "a,b,?b,?a")
+ (match_operand:SI 2 "reg_or_ucst5_operand" "aIu5,bIu5,aIu5,bIu5")))]
+ "TARGET_INSNS_64"
+ "%|%.\\t<shift_insn>2\\t%$\\t%1, %2, %0"
+ [(set_attr "units" "s")
+ (set_attr "cross" "n,n,y,y")])
+
+;; See c6x-mult.md.in for avg2/avgu4
+
+;; Widening vector multiply and dot product.
+;; See c6x-mult.md.in for the define_insn patterns
+
+(define_expand "sdot_prodv2hi"
+ [(match_operand:SI 0 "register_operand" "")
+ (match_operand:V2HI 1 "register_operand" "")
+ (match_operand:V2HI 2 "register_operand" "")
+ (match_operand:SI 3 "register_operand" "")]
+ "TARGET_INSNS_64"
+{
+ rtx t = gen_reg_rtx (SImode);
+ emit_insn (gen_dotv2hi (t, operands[1], operands[2]));
+ emit_insn (gen_addsi3 (operands[0], operands[3], t));
+ DONE;
+})
+
+;; Unary vector operations
+
+(define_insn "ssabsv2hi2"
+ [(set (match_operand:V2HI 0 "register_operand" "=a, a, b, b")
+ (ss_abs:V2HI (match_operand:V2HI 1 "register_operand" "a,?b, b,?a")))]
+ "TARGET_INSNS_64"
+ "%|%.\\tabs2\\t%$\\t%1, %0"
+ [(set_attr "units" "l")
+ (set_attr "cross" "n,y,n,y")])
+
+;; Pack insns
+
+(define_insn "*packv2hi_insv"
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+a,b,a,b,ab")
+ (const_int 16)
+ (const_int 16))
+ (match_operand:SI 1 "nonmemory_operand" "a,b,?b,?a,n"))]
+ "TARGET_INSNS_64"
+ "@
+ %|%.\\tpack2\\t%$\\t%1, %0, %0
+ %|%.\\tpack2\\t%$\\t%1, %0, %0
+ %|%.\\tpack2\\t%$\\t%1, %0, %0
+ %|%.\\tpack2\\t%$\\t%1, %0, %0
+ %|%.\\tmvklh\\t%$\\t%1, %0"
+ [(set_attr "units" "ls")
+ (set_attr "cross" "n,n,y,y,n")])
+
+(define_insn "movstricthi"
+ [(set (strict_low_part (match_operand:HI 0 "register_operand" "+a,b,a,b"))
+ (match_operand:HI 1 "register_operand" "a,b,?b,?a"))]
+ "TARGET_INSNS_64"
+ "%|%.\\tpackhl2\\t%$\\t%0, %1, %0"
+ [(set_attr "units" "ls")
+ (set_attr "cross" "n,n,y,y")])
+
+(include "c6x-mult.md")
+(include "sync.md")
--- /dev/null
+; Option definitions for TI C6X.
+; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+; Contributed by CodeSourcery.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify
+; it under the terms of the GNU General Public License as published by
+; the Free Software Foundation; either version 3, or (at your option)
+; any later version.
+;
+; GCC is distributed in the hope that it will be useful,
+; but WITHOUT ANY WARRANTY; without even the implied warranty of
+; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+; GNU General Public License for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+HeaderInclude
+config/c6x/c6x-opts.h
+
+SourceInclude
+config/c6x/c6x-opts.h
+
+mbig-endian
+Target Report RejectNegative Mask(BIG_ENDIAN)
+Use big-endian byte order
+
+mlittle-endian
+Target Report RejectNegative InverseMask(BIG_ENDIAN, LITTLE_ENDIAN)
+Use little-endian byte order
+
+msim
+Target RejectNegative
+Use simulator runtime
+
+msdata=
+Target RejectNegative Enum(c6x_sdata) Joined Var(c6x_sdata_mode) Init(C6X_SDATA_DEFAULT)
+Select method for sdata handling
+
+Enum
+Name(c6x_sdata) Type(enum c6x_sdata)
+Valid arguments for the -msdata= option
+
+EnumValue
+Enum(c6x_sdata) String(none) Value(C6X_SDATA_NONE)
+
+EnumValue
+Enum(c6x_sdata) String(default) Value(C6X_SDATA_DEFAULT)
+
+EnumValue
+Enum(c6x_sdata) String(all) Value(C6X_SDATA_ALL)
+
+mdsbt
+Target Mask(DSBT)
+Compile for the DSBT shared library ABI
+
+mlong-calls
+Target Report Mask(LONG_CALLS)
+Avoid generating pc-relative calls; use indirection
+
+march=
+Target RejectNegative Joined Enum(c6x_isa) Var(c6x_arch_option)
+Specify the name of the target architecture
--- /dev/null
+/* Intrinsics for TI C6X.
+
+ Copyright (C) 2011 Free Software Foundation, Inc.
+ Contributed by CodeSourcery.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef _GCC_C6X_INTRINSICS_H
+#define _GCC_C6X_INTRINSICS_H
+
+#if !defined(__TMS320C6X__)
+# error "c6x_intrinsics.h is only supported for C6X targets"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdint.h>
+
+/* Define vector types. */
+typedef uint8_t __uv4qi __attribute__((vector_size (4)));
+typedef int16_t __v2hi __attribute__((vector_size (4)));
+typedef int32_t __v2si __attribute__((vector_size (8)));
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_abs (int src)
+{
+ return __builtin_c6x_abs (src);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_abs2 (int src)
+{
+ return (int)__builtin_c6x_abs2 ((__v2hi)src);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_sadd (int src1, int src2)
+{
+ return __builtin_c6x_sadd (src1, src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_ssub (int src1, int src2)
+{
+ return __builtin_c6x_ssub (src1, src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_add2 (int src1, int src2)
+{
+ return (int)__builtin_c6x_add2 ((__v2hi)src1, (__v2hi)src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_sub2 (int src1, int src2)
+{
+ return (int)__builtin_c6x_sub2 ((__v2hi)src1, (__v2hi)src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_add4 (int src1, int src2)
+{
+ return (int)__builtin_c6x_add4 ((__uv4qi)src1, (__uv4qi)src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_sub4 (int src1, int src2)
+{
+ return (int)__builtin_c6x_sub4 ((__uv4qi)src1, (__uv4qi)src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_sadd2 (int src1, int src2)
+{
+ return (int)__builtin_c6x_sadd2 ((__v2hi)src1, (__v2hi)src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_ssub2 (int src1, int src2)
+{
+ return (int)__builtin_c6x_ssub2 ((__v2hi)src1, (__v2hi)src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_saddu4 (int src1, int src2)
+{
+ return (int)__builtin_c6x_saddu4 ((__uv4qi)src1, (__uv4qi)src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_smpy (int src1, int src2)
+{
+ return __builtin_c6x_smpy (src1, src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_smpylh (int src1, int src2)
+{
+ return __builtin_c6x_smpylh (src1, src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_smpyhl (int src1, int src2)
+{
+ return __builtin_c6x_smpyhl (src1, src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_smpyh (int src1, int src2)
+{
+ return __builtin_c6x_smpyh (src1, src2);
+}
+
+__extension__ static __inline long long __attribute__ ((__always_inline__))
+_smpy2ll (int src1, int src2)
+{
+ return (long long)__builtin_c6x_smpy2 ((__v2hi)src1, (__v2hi)src2);
+}
+
+__extension__ static __inline long long __attribute__ ((__always_inline__))
+_mpy2ll (int src1, int src2)
+{
+ return (long long)__builtin_c6x_mpy2 ((__v2hi)src1, (__v2hi)src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_extr (int src1, int src2)
+{
+ return __builtin_c6x_extr (src1, src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_extru (int src1, int src2)
+{
+ return __builtin_c6x_extru (src1, src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_clrr (int src1, int src2)
+{
+ return __builtin_c6x_clrr (src1, src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_avg2 (int src1, int src2)
+{
+ return (int)__builtin_c6x_avg2 ((__v2hi)src1, (__v2hi)src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_avgu4 (int src1, int src2)
+{
+ return (int)__builtin_c6x_avgu4 ((__uv4qi)src1, (__uv4qi)src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_sshl (int src1, int src2)
+{
+ return __builtin_c6x_sshl (src1, src2);
+}
+
+__extension__ static __inline int __attribute__ ((__always_inline__))
+_subc (int src1, int src2)
+{
+ return __builtin_c6x_subc (src1, src2);
+}
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
--- /dev/null
+;; Constraint definitions for TI C6X.
+;; Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+;; Contributed by Andrew Jenner <andrew@codesourcery.com>
+;; Contributed by Bernd Schmidt <bernds@codesourcery.com>
+;; Contributed by CodeSourcery.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+(define_register_constraint "a" "A_REGS"
+ "Register file A (A0--A31).")
+
+(define_register_constraint "b" "B_REGS"
+ "Register file B (B0--B31).")
+
+(define_register_constraint "A" "PREDICATE_A_REGS"
+ "Predicate registers in register file A (A0--A2 on C64X and higher,
+ A1 and A2 otherwise).")
+
+(define_register_constraint "B" "PREDICATE_B_REGS"
+ "Predicate registers in register file B (B0--B2).")
+
+(define_register_constraint "C" "CALL_USED_B_REGS"
+ "A call-used register in register file B (B0--B9, B16--B31).")
+
+(define_register_constraint "Da" "NONPREDICATE_A_REGS"
+ "Register file A, excluding predicate registers (A3--A31, plus A0 if
+not C64X or higher).")
+
+(define_register_constraint "Db" "NONPREDICATE_B_REGS"
+ "Register file B, excluding predicate registers (B3--B31).")
+
+(define_register_constraint "Z" "PICREG"
+ "Register B14 (aka DP).")
+
+(define_register_constraint "z" "SPREG"
+ "Register B15 (aka SP).")
+
+(define_constraint "Iu4"
+ "Integer constant in the range 0 @dots{} 15, aka ucst4."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 15")))
+
+(define_constraint "Iu5"
+ "Integer constant in the range 0 @dots{} 31, aka ucst5."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 31")))
+
+(define_constraint "In5"
+ "Integer constant in the range @minus{}31 @dots{} 0, negation of ucst5."
+ (and (match_code "const_int")
+ (match_test "ival >= -31 && ival <= 0")))
+
+(define_constraint "Is5"
+ "Integer constant in the range @minus{}16 @dots{} 15, aka scst5."
+ (and (match_code "const_int")
+ (match_test "ival >= -16 && ival <= 15")))
+
+(define_constraint "I5x"
+ "Integer constant that can be the operand of an ADDA or a SUBA insn."
+ (and (match_code "const_int")
+ (match_test "(ival >= -31 && ival <= 31)
+ || ((ival & 1) == 0 && ival >= -62 && ival <= 62)
+ || ((ival & 3) == 0 && ival >= -124 && ival <= 124)
+ || ((TARGET_INSNS_64 || TARGET_INSNS_67)
+ && (ival & 7) == 0 && ival > 0 && ival <= 248)")))
+
+(define_constraint "Iux"
+ "Integer constant that can be the operand of a long ADDA or a SUBA insn,
+ i.e. one involving B14 or B15 as source operand."
+ (and (match_code "const_int")
+ (and (match_test "TARGET_INSNS_64PLUS")
+ (match_test "ival >= 0
+ && (ival < 32768
+ || ((ival & 1) == 0 && ival < 65536)
+ || ((ival & 3) == 0 && ival < 131072))"))))
+
+(define_constraint "IuB"
+ "Integer constant in the range 0 @dots{} 65535, aka ucst16."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival <= 65535")))
+
+(define_constraint "IsB"
+ "Integer constant in the range @minus{}32768 @dots{} 32767."
+ (and (match_code "const_int")
+ (match_test "ival >= -32768 && ival <= 32767")))
+
+(define_constraint "IsC"
+ "Integer constant in the range @math{-2^{20}} @dots{} @math{2^{20} - 1}."
+ (and (match_code "const_int")
+ (match_test "ival >= -0x100000 && ival <= 0xfffff")))
+
+(define_constraint "JA"
+ "@internal
+ Integer constant in the range 0 @dots{} 31, corresponding to an A register
+ number."
+ (and (match_code "const_int")
+ (match_test "ival >= 0 && ival < 32")))
+
+(define_constraint "JB"
+ "@internal
+ Integer constant in the range 32 @dots{} 63, corresponding to a B register
+ number."
+ (and (match_code "const_int")
+ (match_test "ival >= 32 && ival < 64")))
+
+(define_constraint "Jc"
+ "Integer constant that is a valid mask for the clr instruction"
+ (and (match_code "const_int")
+ (match_test "c6x_valid_mask_p (ival)")))
+
+(define_constraint "Js"
+ "Integer constant that is a valid mask for the set instruction"
+ (and (match_code "const_int")
+ (match_test "c6x_valid_mask_p (~ival)")))
+
+(define_memory_constraint "Q"
+ "Memory location with A base register."
+ (and (match_code "mem")
+ (match_test "c6x_mem_operand (op, A_REGS, false)")))
+
+(define_memory_constraint "R"
+ "Memory location with B base register."
+ (and (match_code "mem")
+ (match_test "c6x_mem_operand (op, B_REGS, false)")))
+
+(define_memory_constraint "T"
+ "@internal
+ Memory location with B base register, but not using a long offset."
+ (and (match_code "mem")
+ (match_test "c6x_mem_operand (op, B_REGS, true)")))
+
+(define_constraint "S0"
+ "@internal
+ On C64x+ targets, a GP-relative small data reference"
+ (and (match_test "TARGET_INSNS_64PLUS")
+ (match_operand 0 "sdata_symbolic_operand")))
+
+(define_constraint "S1"
+ "@internal
+ Any kind of @code{SYMBOL_REF}, for use in a call address."
+ (and (match_code "symbol_ref")
+ (match_operand 0 "c6x_call_operand")))
+
+(define_constraint "S2"
+ "@internal
+ Any SYMBOL_REF or LABEL_REF."
+ (ior (match_code "symbol_ref") (match_code "label_ref")))
+
+(define_constraint "Si"
+ "@internal
+ Any immediate value, unless it matches the S0 constraint."
+ (and (match_operand 0 "immediate_operand")
+ (match_test "!satisfies_constraint_S0 (op)")))
+
+(define_memory_constraint "W"
+ "@internal
+ A memory operand with an address that can't be used in an unaligned access."
+ (and (match_code "mem")
+ (match_test "!c6x_legitimate_address_p_1 (GET_MODE (op), XEXP (op, 0),
+ reload_completed, true)")))
--- /dev/null
+/* Copyright 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file just supplies function prologues for the .init and .fini
+ * sections. It is linked in before crtbegin.o.
+ */
+
+ .section .init
+ .globl _init
+ .type _init,@function
+_init:
+ add .l2 -8, B15, B15
+ stw .d2t2 B3,*+B15(4)
+ .section .fini
+ .globl _fini
+ .type _fini,@function
+_fini:
+ add .l2 -8, B15, B15
+ stw .d2t2 B3,*+B15(4)
--- /dev/null
+/* Copyright 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+/*
+ * This file supplies function epilogues for the .init and .fini sections.
+ * It is linked in after all other files.
+ */
+
+ .section .init
+ ldw .d2t2 *+B15(4), B3
+ add .d2 B15, 8, B15
+ nop 3
+ ret .s2 B3
+ nop 5
+
+ .section .fini
+ ldw .d2t2 *+B15(4), B3
+ add .d2 B15, 8, B15
+ nop 3
+ ret .s2 B3
+ nop 5
+
--- /dev/null
+/* ELF definitions for TI C6X
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Andrew Jenner <andrew@codesourcery.com>
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Controlling the Compilation Driver. */
+#define ENDIAN_LINK_SPEC "%{mbig-endian:-EB} %{mlittle-endian:-EL} "
+
+#undef ASM_SPEC
+#define ASM_SPEC "%{march=*:-march=%*} %{mbig-endian:-mbig-endian} \
+ %{mdsbt:-mdsbt %{!fPIC:-mpid=near} %{fPIC:-mpid=far -mpic} %{fpic:-mpic}} \
+ %{!mdsbt:%{fpic:-mpic -mpid=near} %{fPIC:-mpic -mpid=far}}"
+
+#undef DATA_SECTION_ASM_OP
+#define DATA_SECTION_ASM_OP "\t.section\t\".fardata\",\"aw\""
+#undef READONLY_DATA_SECTION_ASM_OP
+#define READONLY_DATA_SECTION_ASM_OP "\t.section\t\".const\",\"a\",@progbits"
+#define BSS_SECTION_ASM_OP "\t.section\t\".far\",\"aw\",@nobits"
+#define SDATA_SECTION_ASM_OP "\t.section\t\".neardata\",\"aw\""
+#define SBSS_SECTION_ASM_OP "\t.section\t\".bss\",\"aw\",@nobits"
+#define TARGET_LIBGCC_SDATA_SECTION ".neardata"
--- /dev/null
+/* ELF definitions for TI C6X
+ Copyright (C) 2010 Free Software Foundation, Inc.
+ Contributed by Andrew Jenner <andrew@codesourcery.com>
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* crt0.o should come from the linker script, but for compatibility,
+ we mention it here for -msim. */
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "%{msim:crt0%O%s} crti%O%s crtbegin%O%s"
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend%O%s crtn%O%s"
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{msim:--start-group -lc -lsim --end-group;" \
+ ":-lc}"
+
+#undef LINK_SPEC
+#define LINK_SPEC ENDIAN_LINK_SPEC
--- /dev/null
+/* Software floating-point emulation.
+ Return 1 iff a == b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/double.h"
+
+CMPtype __c6xabi_eqd(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_EQ_D(r, A, B);
+ if (r && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return !r;
+}
--- /dev/null
+/* Software floating-point emulation.
+ Return 1 iff a == b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/single.h"
+
+CMPtype __c6xabi_eqf(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_EQ_S(r, A, B);
+ if (r && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return !r;
+}
--- /dev/null
+/* Software floating-point emulation.
+ Return 1 iff a >= b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/double.h"
+
+CMPtype __c6xabi_ged(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_D(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r >= 0;
+}
--- /dev/null
+/* Software floating-point emulation.
+ Return 1 iff a >= b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/single.h"
+
+CMPtype __c6xabi_gef(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_S(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r >= 0;
+}
--- /dev/null
+#! /bin/sh
+# Generate c6x-mult.md from c6x-mult.md.in
+# The input file is passed as an argument.
+
+# Copyright 2011 Free Software Foundation, Inc.
+
+#This file is part of GCC.
+
+#GCC is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3, or (at your option)
+#any later version.
+
+#GCC is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with GCC; see the file COPYING3. If not see
+#<http://www.gnu.org/licenses/>.
+
+echo ";; -*- buffer-read-only: t -*-"
+echo ";; Generated automatically from c6x-mult.md.in by genmult.sh"
+
+sed -e "s,_VARIANT_,,g" -e "s,_SET_,set,g" -e "s,_.BRK_,,g" \
+ -e "s,_A_,a,g" -e "s,_B_,b,g" -e "s,_DESTOPERAND_,register_operand,g" \
+ -e "s,_MOD._,,g" -e "s,:_M,:,g" < $1
+
+sed -e "s,_VARIANT_,_real,g" -e "s,_SET_,unspec,g" -e "s,_OBRK_,[,g" \
+ -e "s,_CBRK_,] UNSPEC_REAL_MULT,g" -e "s,_A_,JA,g" -e "s,_B_,JB,g" \
+ -e "s,_DESTOPERAND_,const_int_operand,g" -e "s,_MODk_,k,g" \
+ -e "s,_MODK_,K,g" -e 's,:_MV..[IQ],:SI,g' -e "s,:_MSQ,:SI,g" < $1
--- /dev/null
+#!/bin/sh
+# Generate c6x-tables.opt from the lists in *.def.
+# Copyright (C) 2011 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+cat <<EOF
+; -*- buffer-read-only: t -*-
+; Generated automatically by genopt.sh from c6x-isas.def.
+;
+; Copyright (C) 2011 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+Enum
+Name(c6x_isa) Type(int)
+Known C6X ISAs (for use with the -march= option):
+
+EOF
+
+awk -F'[(, ]+' 'BEGIN {
+ value = 0
+}
+/^C6X_ISA/ {
+ name = $2
+ gsub("\"", "", name)
+ print "EnumValue"
+ print "Enum(c6x_isa) String(" name ") Value(" value ")"
+ print ""
+ value++
+}' $1/c6x-isas.def
--- /dev/null
+#! /bin/sh
+# Generate c6x-sched.md from c6x-sched.md.in
+# The input file is passed as an argument.
+
+# Copyright 2010, 2011 Free Software Foundation, Inc.
+
+#This file is part of GCC.
+
+#GCC is free software; you can redistribute it and/or modify
+#it under the terms of the GNU General Public License as published by
+#the Free Software Foundation; either version 3, or (at your option)
+#any later version.
+
+#GCC is distributed in the hope that it will be useful,
+#but WITHOUT ANY WARRANTY; without even the implied warranty of
+#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+#GNU General Public License for more details.
+
+#You should have received a copy of the GNU General Public License
+#along with GCC; see the file COPYING3. If not see
+#<http://www.gnu.org/licenses/>.
+
+echo ";; -*- buffer-read-only: t -*-"
+echo ";; Generated automatically from c6x-sched.md.in by gensched.sh"
+
+for cross in n y; do
+ for side in 1 2; do
+ tside=$side
+ case $side in
+ 1) rf="a"; otherside=2 ;;
+ 2) rf="b"; otherside=1 ;;
+ esac
+ case $cross in
+ y) cunit="+x$side"; tside=$otherside;;
+ n) cunit="";;
+ esac
+ echo
+ echo ";; Definitions for side $side, cross $cross"
+ echo
+ sed -e "s,_CROSS_,$cross,g" -e "s,_CUNIT_,$cunit,g" \
+ -e "s,_N_,$side,g" -e "s,_RF_,$rf,g" -e "s,_NX_,$tside,g" \
+ < $1
+ done
+done
--- /dev/null
+/* Software floating-point emulation.
+ Return 1 iff a > b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/double.h"
+
+CMPtype __c6xabi_gtd(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_D(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r > 0;
+}
--- /dev/null
+/* Software floating-point emulation.
+ Return 1 iff a > b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/single.h"
+
+CMPtype __c6xabi_gtf(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_S(r, A, B, -2);
+ if (r == -2 && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r > 0;
+}
--- /dev/null
+/* Software floating-point emulation.
+ Return 1 iff a <= b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/double.h"
+
+CMPtype __c6xabi_led(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_D(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r <= 0;
+}
--- /dev/null
+/* Software floating-point emulation.
+ Return 1 iff a <= b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/single.h"
+
+CMPtype __c6xabi_lef(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_S(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r <= 0;
+}
--- /dev/null
+/* Copyright 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+ ;; ABI considerations for the divide functions
+ ;; The following registers are call-used:
+ ;; __c6xabi_divi A0,A1,A2,A4,A6,B0,B1,B2,B4,B5
+ ;; __c6xabi_divu A0,A1,A2,A4,A6,B0,B1,B2,B4
+ ;; __c6xabi_remi A1,A2,A4,A5,A6,B0,B1,B2,B4
+ ;; __c6xabi_remu A1,A4,A5,A7,B0,B1,B2,B4
+ ;;
+ ;; In our implementation, divu and remu are leaf functions,
+ ;; while both divi and remi call into divu.
+ ;; A0 is not clobbered by any of the functions.
+ ;; divu does not clobber B2 either, which is taken advantage of
+ ;; in remi.
+ ;; divi uses B5 to hold the original return address during
+ ;; the call to divu.
+ ;; remi uses B2 and A5 to hold the input values during the
+ ;; call to divu. It stores B3 in on the stack.
+
+#ifdef L_divsi3
+.text
+.align 2
+.global __c6xabi_divi
+.hidden __c6xabi_divi
+.type __c6xabi_divi, STT_FUNC
+
+__c6xabi_divi:
+ call .s2 __c6xabi_divu
+|| mv .d2 B3, B5
+|| cmpgt .l1 0, A4, A1
+|| cmpgt .l2 0, B4, B1
+
+ [A1] neg .l1 A4, A4
+|| [B1] neg .l2 B4, B4
+|| xor .s1x A1, B1, A1
+
+#ifdef _TMS320C6400
+ [A1] addkpc .s2 1f, B3, 4
+#else
+ [A1] mvkl .s2 1f, B3
+ [A1] mvkh .s2 1f, B3
+ nop 2
+#endif
+1:
+ neg .l1 A4, A4
+|| mv .l2 B3,B5
+|| ret .s2 B5
+ nop 5
+#endif
+
+#if defined L_modsi3 || defined L_divmodsi4
+.align 2
+#ifdef L_modsi3
+#define MOD_OUTPUT_REG A4
+.global __c6xabi_remi
+.hidden __c6xabi_remi
+.type __c6xabi_remi, STT_FUNC
+#else
+#define MOD_OUTPUT_REG A5
+.global __c6xabi_divremi
+.hidden __c6xabi_divremi
+.type __c6xabi_divremi, STT_FUNC
+__c6xabi_divremi:
+#endif
+
+__c6xabi_remi:
+ stw .d2t2 B3, *B15--[2]
+|| cmpgt .l1 0, A4, A1
+|| cmpgt .l2 0, B4, B2
+|| mv .s1 A4, A5
+|| call .s2 __c6xabi_divu
+
+ [A1] neg .l1 A4, A4
+|| [B2] neg .l2 B4, B4
+|| xor .s2x B2, A1, B0
+|| mv .d2 B4, B2
+
+#ifdef _TMS320C6400
+ [B0] addkpc .s2 1f, B3, 1
+ [!B0] addkpc .s2 2f, B3, 1
+ nop 2
+#else
+ [B0] mvkl .s2 1f,B3
+ [!B0] mvkl .s2 2f,B3
+
+ [B0] mvkh .s2 1f,B3
+ [!B0] mvkh .s2 2f,B3
+#endif
+1:
+ neg .l1 A4, A4
+2:
+ ldw .d2t2 *++B15[2], B3
+
+#ifdef _TMS320C6400_PLUS
+ mpy32 .m1x A4, B2, A6
+ nop 3
+ ret .s2 B3
+ sub .l1 A5, A6, MOD_OUTPUT_REG
+ nop 4
+#else
+ mpyu .m1x A4, B2, A1
+ nop 1
+ mpylhu .m1x A4, B2, A6
+|| mpylhu .m2x B2, A4, B2
+ nop 1
+ add .l1x A6, B2, A6
+|| ret .s2 B3
+ shl .s1 A6, 16, A6
+ add .d1 A6, A1, A6
+ sub .l1 A5, A6, MOD_OUTPUT_REG
+ nop 2
+#endif
+
+#endif
+
+#if defined L_udivsi3 || defined L_udivmodsi4
+.align 2
+#ifdef L_udivsi3
+.global __c6xabi_divu
+.hidden __c6xabi_divu
+.type __c6xabi_divu, STT_FUNC
+__c6xabi_divu:
+#else
+.global __c6xabi_divremu
+.hidden __c6xabi_divremu
+.type __c6xabi_divremu, STT_FUNC
+__c6xabi_divremu:
+#endif
+ ;; We use a series of up to 31 subc instructions. First, we find
+ ;; out how many leading zero bits there are in the divisor. This
+ ;; gives us both a shift count for aligning (shifting) the divisor
+ ;; to the, and the number of times we have to execute subc.
+
+ ;; At the end, we have both the remainder and most of the quotient
+ ;; in A4. The top bit of the quotient is computed first and is
+ ;; placed in A2.
+
+ ;; Return immediately if the dividend is zero. Setting B4 to 1
+ ;; is a trick to allow us to leave the following insns in the jump
+ ;; delay slot without affecting the result.
+ mv .s2x A4, B1
+
+#ifndef _TMS320C6400
+[!b1] mvk .s2 1, B4
+#endif
+[b1] lmbd .l2 1, B4, B1
+||[!b1] b .s2 B3 ; RETURN A
+#ifdef _TMS320C6400
+||[!b1] mvk .d2 1, B4
+#endif
+#ifdef L_udivmodsi4
+||[!b1] zero .s1 A5
+#endif
+ mv .l1x B1, A6
+|| shl .s2 B4, B1, B4
+
+ ;; The loop performs a maximum of 28 steps, so we do the
+ ;; first 3 here.
+ cmpltu .l1x A4, B4, A2
+[!A2] sub .l1x A4, B4, A4
+|| shru .s2 B4, 1, B4
+|| xor .s1 1, A2, A2
+
+ shl .s1 A2, 31, A2
+|| [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+
+ ;; RETURN A may happen here (note: must happen before the next branch)
+0:
+ cmpgt .l2 B1, 7, B0
+|| [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+|| [b0] b .s1 0b
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ ;; loop backwards branch happens here
+
+ ret .s2 B3
+|| mvk .s1 32, A1
+ sub .l1 A1, A6, A6
+#ifdef L_udivmodsi4
+|| extu .s1 A4, A6, A5
+#endif
+ shl .s1 A4, A6, A4
+ shru .s1 A4, 1, A4
+|| sub .l1 A6, 1, A6
+ or .l1 A2, A4, A4
+ shru .s1 A4, A6, A4
+ nop
+
+#endif
+
+#ifdef L_umodsi3
+.align 2
+.global __c6xabi_remu
+.hidden __c6xabi_remu
+.type __c6xabi_remu, STT_FUNC
+__c6xabi_remu:
+ ;; The ABI seems designed to prevent these functions calling each other,
+ ;; so we duplicate most of the divsi3 code here.
+ mv .s2x A4, B1
+#ifndef _TMS320C6400
+[!b1] mvk .s2 1, B4
+#endif
+ lmbd .l2 1, B4, B1
+||[!b1] b .s2 B3 ; RETURN A
+#ifdef _TMS320C6400
+||[!b1] mvk .d2 1, B4
+#endif
+
+ mv .l1x B1, A7
+|| shl .s2 B4, B1, B4
+
+ cmpltu .l1x A4, B4, A1
+[!a1] sub .l1x A4, B4, A4
+ shru .s2 B4, 1, B4
+
+0:
+ cmpgt .l2 B1, 7, B0
+|| [b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ ;; RETURN A may happen here (note: must happen before the next branch)
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+|| [b0] b .s1 0b
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+ ;; loop backwards branch happens here
+
+ ret .s2 B3
+[b1] subc .l1x A4,B4,A4
+|| [b1] add .s2 -1, B1, B1
+[b1] subc .l1x A4,B4,A4
+
+ extu .s1 A4, A7, A4
+ nop 2
+#endif
+
+#if defined L_strasgi_64plus && defined _TMS320C6400_PLUS
+
+.align 2
+.global __c6xabi_strasgi_64plus
+.hidden __c6xabi_strasgi_64plus
+.type __c6xabi_strasgi_64plus, STT_FUNC
+__c6xabi_strasgi_64plus:
+ shru .s2x a6, 2, b31
+|| mv .s1 a4, a30
+|| mv .d2 b4, b30
+
+ add .s2 -4, b31, b31
+
+ sploopd 1
+|| mvc .s2 b31, ilc
+ ldw .d2t2 *b30++, b31
+ nop 4
+ mv .s1x b31,a31
+ spkernel 6, 0
+|| stw .d1t1 a31, *a30++
+
+ ret .s2 b3
+ nop 5
+#endif
+
+#ifdef L_strasgi
+.global __c6xabi_strasgi
+.type __c6xabi_strasgi, STT_FUNC
+__c6xabi_strasgi:
+ ;; This is essentially memcpy, with alignment known to be at least
+ ;; 4, and the size a multiple of 4 greater than or equal to 28.
+ ldw .d2t1 *B4++, A0
+|| mvk .s2 16, B1
+ ldw .d2t1 *B4++, A1
+|| mvk .s2 20, B2
+|| sub .d1 A6, 24, A6
+ ldw .d2t1 *B4++, A5
+ ldw .d2t1 *B4++, A7
+|| mv .l2x A6, B7
+ ldw .d2t1 *B4++, A8
+ ldw .d2t1 *B4++, A9
+|| mv .s2x A0, B5
+|| cmpltu .l2 B2, B7, B0
+
+0:
+ stw .d1t2 B5, *A4++
+||[b0] ldw .d2t1 *B4++, A0
+|| mv .s2x A1, B5
+|| mv .l2 B7, B6
+
+[b0] sub .d2 B6, 24, B7
+||[b0] b .s2 0b
+|| cmpltu .l2 B1, B6, B0
+
+[b0] ldw .d2t1 *B4++, A1
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A5, B5
+|| cmpltu .l2 12, B6, B0
+
+[b0] ldw .d2t1 *B4++, A5
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A7, B5
+|| cmpltu .l2 8, B6, B0
+
+[b0] ldw .d2t1 *B4++, A7
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A8, B5
+|| cmpltu .l2 4, B6, B0
+
+[b0] ldw .d2t1 *B4++, A8
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A9, B5
+|| cmpltu .l2 0, B6, B0
+
+[b0] ldw .d2t1 *B4++, A9
+|| stw .d1t2 B5, *A4++
+|| mv .s2x A0, B5
+|| cmpltu .l2 B2, B7, B0
+
+ ;; loop back branch happens here
+
+ cmpltu .l2 B1, B6, B0
+|| ret .s2 b3
+
+[b0] stw .d1t1 A1, *A4++
+|| cmpltu .l2 12, B6, B0
+[b0] stw .d1t1 A5, *A4++
+|| cmpltu .l2 8, B6, B0
+[b0] stw .d1t1 A7, *A4++
+|| cmpltu .l2 4, B6, B0
+[b0] stw .d1t1 A8, *A4++
+|| cmpltu .l2 0, B6, B0
+[b0] stw .d1t1 A9, *A4++
+
+ ;; return happens here
+
+#endif
+
+#ifdef _TMS320C6400_PLUS
+#ifdef L_push_rts
+.align 2
+.global __c6xabi_push_rts
+.hidden __c6xabi_push_rts
+.type __c6xabi_push_rts, STT_FUNC
+__c6xabi_push_rts:
+ stw .d2t2 B14, *B15--[2]
+ stdw .d2t1 A15:A14, *B15--
+|| b .s2x A3
+ stdw .d2t2 B13:B12, *B15--
+ stdw .d2t1 A13:A12, *B15--
+ stdw .d2t2 B11:B10, *B15--
+ stdw .d2t1 A11:A10, *B15--
+ stdw .d2t2 B3:B2, *B15--
+#endif
+
+#ifdef L_pop_rts
+.align 2
+.global __c6xabi_pop_rts
+.hidden __c6xabi_pop_rts
+.type __c6xabi_pop_rts, STT_FUNC
+__c6xabi_pop_rts:
+ lddw .d2t2 *++B15, B3:B2
+ lddw .d2t1 *++B15, A11:A10
+ lddw .d2t2 *++B15, B11:B10
+ lddw .d2t1 *++B15, A13:A12
+ lddw .d2t2 *++B15, B13:B12
+ lddw .d2t1 *++B15, A15:A14
+|| b .s2 B3
+ ldw .d2t2 *++B15[2], B14
+ nop 4
+#endif
+
+#ifdef L_call_stub
+.align 2
+.global __c6xabi_call_stub
+.type __c6xabi_call_stub, STT_FUNC
+__c6xabi_call_stub:
+ stw .d2t1 A2, *B15--[2]
+ stdw .d2t1 A7:A6, *B15--
+|| call .s2 B31
+ stdw .d2t1 A1:A0, *B15--
+ stdw .d2t2 B7:B6, *B15--
+ stdw .d2t2 B5:B4, *B15--
+ stdw .d2t2 B1:B0, *B15--
+ stdw .d2t2 B3:B2, *B15--
+|| addkpc .s2 1f, B3, 0
+1:
+ lddw .d2t2 *++B15, B3:B2
+ lddw .d2t2 *++B15, B1:B0
+ lddw .d2t2 *++B15, B5:B4
+ lddw .d2t2 *++B15, B7:B6
+ lddw .d2t1 *++B15, A1:A0
+ lddw .d2t1 *++B15, A7:A6
+|| b .s2 B3
+ ldw .d2t1 *++B15[2], A2
+ nop 4
+#endif
+
+#endif
+
--- /dev/null
+# Copyright (C) 2011 Free Software Foundation, Inc.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+GCC_4.7.0 {
+ __c6xabi_strasgi
+ __c6xabi_call_stub
+ __c6xabi_mpyll
+ __c6xabi_negll
+ __c6xabi_llshru
+ __c6xabi_llshl
+ __c6xabi_llshr
+ __c6xabi_fixfu
+ __c6xabi_fixdu
+ __c6xabi_fixflli
+ __c6xabi_fixdlli
+ __c6xabi_fixfull
+ __c6xabi_fixdull
+ __c6xabi_fltllif
+ __c6xabi_fltllid
+ __c6xabi_fltullf
+ __c6xabi_fltulld
+ __c6xabi_divlli
+ __c6xabi_remlli
+ __c6xabi_divull
+ __c6xabi_remull
+ __c6xabi_divremull
+ __c6xabi_gef
+ __c6xabi_gtf
+ __c6xabi_lef
+ __c6xabi_ltf
+ __c6xabi_eqf
+ __c6xabi_ged
+ __c6xabi_gtd
+ __c6xabi_led
+ __c6xabi_ltd
+ __c6xabi_eqd
+ __c6xabi_addf
+ __c6xabi_divf
+ __c6xabi_neqf
+ __c6xabi_cmpf
+ __c6xabi_mpyf
+ __c6xabi_negf
+ __c6xabi_subf
+ __c6xabi_unordf
+ __c6xabi_fixfi
+ __c6xabi_fltif
+ __c6xabi_fltuf
+ __c6xabi_addd
+ __c6xabi_divd
+ __c6xabi_neqd
+ __c6xabi_cmpd
+ __c6xabi_mpyd
+ __c6xabi_negd
+ __c6xabi_subd
+ __c6xabi_unordd
+ __c6xabi_fixdi
+ __c6xabi_fltid
+ __c6xabi_fltud
+ __c6xabi_cvtfd
+ __c6xabi_cvtdf
+ __c6xabi_mulcf
+ __c6xabi_mulcd
+ __c6xabi_divcf
+ __c6xabi_divcd
+
+ __gnu_ltsf2
+ __gnu_ltdf2
+ __gnu_gesf2
+ __gnu_gedf2
+ __gnu_gtsf2
+ __gnu_gtdf2
+ __gnu_eqsf2
+ __gnu_eqdf2
+
+ # Exception-Handling
+ _Unwind_Complete
+ _Unwind_VRS_Get
+ _Unwind_VRS_Set
+ _Unwind_VRS_Pop
+ __c6xabi_unwind_cpp_pr0
+ __c6xabi_unwind_cpp_pr1
+ __c6xabi_unwind_cpp_pr2
+ __c6xabi_unwind_cpp_pr3
+ __c6xabi_unwind_cpp_pr4
+ # The libstdc++ exception-handling personality routine uses this
+ # GNU-specific entry point.
+ __gnu_unwind_frame
+}
--- /dev/null
+/* Software floating-point emulation.
+ Return 1 iff a < b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/double.h"
+
+CMPtype __c6xabi_ltd(DFtype a, DFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_D(A); FP_DECL_D(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_D(A, a);
+ FP_UNPACK_RAW_D(B, b);
+ FP_CMP_D(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_D(A) || FP_ISSIGNAN_D(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r < 0;
+}
--- /dev/null
+/* Software floating-point emulation.
+ Return 1 iff a < b, 0 otherwise.
+ Copyright (C) 1997,1999,2006,2007,2011 Free Software Foundation, Inc.
+ Contributed by Richard Henderson (rth@cygnus.com) and
+ Jakub Jelinek (jj@ultra.linux.cz).
+
+ This file is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config/soft-fp/soft-fp.h"
+#include "config/soft-fp/single.h"
+
+CMPtype __c6xabi_ltf(SFtype a, SFtype b)
+{
+ FP_DECL_EX;
+ FP_DECL_S(A); FP_DECL_S(B);
+ CMPtype r;
+
+ FP_UNPACK_RAW_S(A, a);
+ FP_UNPACK_RAW_S(B, b);
+ FP_CMP_S(r, A, B, 2);
+ if (r == 2 && (FP_ISSIGNAN_S(A) || FP_ISSIGNAN_S(B)))
+ FP_SET_EXCEPTION(FP_EX_INVALID);
+ FP_HANDLE_EXCEPTIONS;
+
+ return r < 0;
+}
--- /dev/null
+/* Predicates for TI C6X
+ Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Andrew Jenner <andrew@codesourcery.com>
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+(define_predicate "reg_or_const_int_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "const_int_operand")))
+
+(define_predicate "const_vector_operand"
+ (match_code "const_vector"))
+
+(define_predicate "scst5_operand"
+ (and (match_operand 0 "const_int_operand")
+ (match_test "satisfies_constraint_Is5 (op)")))
+
+(define_predicate "reg_or_ucst4_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_operand 0 "const_int_operand")
+ (match_test "satisfies_constraint_Iu4 (op)"))))
+
+(define_predicate "reg_or_scst5_operand"
+ (ior (match_operand 0 "register_operand")
+ (match_operand 0 "scst5_operand")))
+
+(define_predicate "reg_or_ucst5_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_operand 0 "const_int_operand")
+ (match_test "satisfies_constraint_Iu5 (op)"))))
+
+(define_predicate "addsi_operand"
+ (ior (match_operand 0 "register_operand")
+ (and (match_operand 0 "const_int_operand")
+ (match_test "satisfies_constraint_IsB (op)"))))
+
+(define_predicate "andsi_operand"
+ (ior (match_operand 0 "reg_or_scst5_operand")
+ (and (match_operand 0 "const_int_operand")
+ (match_test "satisfies_constraint_Jc (op)"))))
+
+(define_predicate "iorsi_operand"
+ (ior (match_operand 0 "reg_or_scst5_operand")
+ (and (match_operand 0 "const_int_operand")
+ (match_test "satisfies_constraint_Js (op)"))))
+
+(define_predicate "insv_operand"
+ (and (match_operand 0 "const_int_operand")
+ (match_test "INTVAL (op) == 0 || INTVAL (op) == -1")))
+
+(define_predicate "c6x_jump_operand"
+ (match_code "label_ref,symbol_ref,reg"))
+
+(define_predicate "c6x_call_operand"
+ (ior (match_code "symbol_ref,reg")
+ (and (match_code "subreg")
+ (match_test "GET_CODE (XEXP (op, 0)) == REG")))
+{
+ /* The linker transforms jumps to undefined weak symbols in a way that
+ is incompatible with our code generation. */
+ return (GET_CODE (op) != SYMBOL_REF
+ || (!SYMBOL_REF_WEAK (op)
+ && !c6x_long_call_p (op)));
+})
+
+;; Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref,
+;; possibly with an offset.
+(define_predicate "symbolic_operand"
+ (ior (match_code "symbol_ref,label_ref")
+ (and (match_code "const")
+ (match_test "GET_CODE (XEXP (op,0)) == PLUS
+ && (GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF
+ || GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF)
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT"))))
+
+(define_predicate "const_int_or_symbolic_operand"
+ (ior (match_operand 0 "symbolic_operand")
+ (match_operand 0 "const_int_operand")))
+
+;; Return nonzero iff OP is one of the integer constants 2, 4 or 8.
+(define_predicate "adda_scale_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 2 || INTVAL (op) == 4
+ || ((TARGET_INSNS_64 || TARGET_INSNS_67)
+ && INTVAL (op) == 8)")))
+
+;; Return nonzero iff OP is one of the integer constants 2 or 4.
+(define_predicate "suba_scale_operand"
+ (and (match_code "const_int")
+ (match_test "INTVAL (op) == 2 || INTVAL (op) == 4")))
+
+;; True if this operator is valid for predication.
+(define_predicate "predicate_operator"
+ (match_code "eq,ne"))
+
+(define_predicate "c6x_comparison_operator"
+ (match_code "eq,ltu,gtu,lt,gt"))
+
+(define_predicate "non_c6x_comparison_operator"
+ (match_code "ne,leu,geu,le,ge"))
+
+;; FP Comparisons handled by c6x_expand_compare.
+(define_predicate "c6x_fp_comparison_operator"
+ (ior (match_code "eq,lt,gt,le,ge")
+ (and (match_test "TARGET_FP")
+ (match_code "ltgt,uneq,unlt,ungt,unle,unge,ordered,unordered"))))
+
+(define_predicate "c6x_any_comparison_operand"
+ (match_code "eq,lt,gt,le,ge,ltu,gtu")
+{
+ rtx op0 = XEXP (op, 0);
+ rtx op1 = XEXP (op, 1);
+ if (ltugtu_operator (op, SImode)
+ && register_operand (op0, SImode)
+ && ((TARGET_INSNS_64 && reg_or_ucst5_operand (op1, SImode))
+ || (!TARGET_INSNS_64 && reg_or_ucst4_operand (op1, SImode))))
+ return true;
+ if (eqltgt_operator (op, SImode)
+ && register_operand (op0, SImode)
+ && reg_or_scst5_operand (op1, SImode))
+ return true;
+ if (!TARGET_FP)
+ return false;
+ if (!eqltgt_operator (op, SFmode) && !eqltgt_operator (op, DFmode))
+ return false;
+ if (register_operand (op0, GET_MODE (op))
+ && register_operand (op1, GET_MODE (op)))
+ return true;
+ return false;
+})
+
+(define_predicate "ltugtu_operator"
+ (match_code "ltu,gtu"))
+
+(define_predicate "eqltgt_operator"
+ (match_code "eq,lt,gt"))
+
+(define_predicate "eqne_operator"
+ (match_code "eq,ne"))
+
+(define_predicate "predicate_register"
+ (and (match_code "reg")
+ (ior (match_test "REGNO_REG_CLASS (REGNO (op)) == PREDICATE_A_REGS")
+ (match_test "REGNO_REG_CLASS (REGNO (op)) == PREDICATE_B_REGS"))))
+
+;; Allow const_ints for things like the real_mult patterns.
+(define_predicate "a_register"
+ (ior (and (match_code "reg")
+ (match_test "A_REGNO_P (REGNO (op))"))
+ (and (match_code "const_int")
+ (match_test "A_REGNO_P (INTVAL (op))"))))
+
+(define_predicate "b_register"
+ (ior (and (match_code "reg")
+ (match_test "B_REGNO_P (REGNO (op))"))
+ (and (match_code "const_int")
+ (match_test "B_REGNO_P (INTVAL (op))"))))
+
+(define_predicate "pic_register_operand"
+ (and (match_code "reg")
+ (match_test "op == pic_offset_table_rtx")))
+
+;; True if OP refers to a symbol in the sdata section.
+(define_predicate "sdata_symbolic_operand"
+ (match_code "symbol_ref,const")
+{
+ HOST_WIDE_INT offset = 0, size = 0;
+ tree t;
+
+ switch (GET_CODE (op))
+ {
+ case CONST:
+ op = XEXP (op, 0);
+ if (GET_CODE (op) != PLUS
+ || GET_CODE (XEXP (op, 0)) != SYMBOL_REF
+ || GET_CODE (XEXP (op, 1)) != CONST_INT)
+ return false;
+ offset = INTVAL (XEXP (op, 1));
+ op = XEXP (op, 0);
+ /* FALLTHRU */
+
+ case SYMBOL_REF:
+ /* For shared libraries, only allow symbols we know are local.
+ For executables, the linker knows to create copy relocs if
+ necessary so we can use DP-relative addressing for all small
+ objects. */
+ if ((c6x_initial_flag_pic && !SYMBOL_REF_LOCAL_P (op))
+ || !SYMBOL_REF_SMALL_P (op))
+ return false;
+
+ /* Note that in addition to DECLs, we can get various forms
+ of constants here. */
+ t = SYMBOL_REF_DECL (op);
+ if (DECL_P (t))
+ t = DECL_SIZE_UNIT (t);
+ else
+ t = TYPE_SIZE_UNIT (TREE_TYPE (t));
+ if (t && host_integerp (t, 0))
+ {
+ size = tree_low_cst (t, 0);
+ if (size < 0)
+ size = 0;
+ }
+
+ /* Don't allow addressing outside the object. */
+ return (offset >= 0 && offset <= size);
+
+ default:
+ gcc_unreachable ();
+ }
+})
--- /dev/null
+/* Soft-FP definitions for TI C6X.
+ Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+
+ This files is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ In addition to the permissions in the GNU Lesser General Public
+ License, the Free Software Foundation gives you unlimited
+ permission to link the compiled version of this file into
+ combinations with other programs, and to distribute those
+ combinations without any restriction coming from the use of this
+ file. (The Lesser General Public License restrictions do apply in
+ other respects; for example, they cover modification of the file,
+ and distribution when not linked into a combine executable.)
+
+ This file is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with GCC; see the file COPYING.LIB. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#define _FP_W_TYPE_SIZE 32
+#define _FP_W_TYPE unsigned long
+#define _FP_WS_TYPE signed long
+#define _FP_I_TYPE long
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_loop(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_H ((_FP_QNANBIT_H << 1) - 1)
+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
+#define _FP_NANSIGN_H 0
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_NANSIGN_Q 0
+
+#define _FP_KEEPNANFRACP 1
+
+/* Someone please check this. */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ else \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+#define __LITTLE_ENDIAN 1234
+#define __BIG_ENDIAN 4321
+
+#if defined _BIG_ENDIAN
+# define __BYTE_ORDER __BIG_ENDIAN
+#else
+# define __BYTE_ORDER __LITTLE_ENDIAN
+#endif
+
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+
+/* Rename helper functions to the names specified in the C6000 ELF ABI. */
+#define __fixdfsi __c6xabi_fixdi
+#define __fixsfsi __c6xabi_fixfi
+#define __floatsidf __c6xabi_fltid
+#define __floatunsidf __c6xabi_fltud
+#define __floatsisf __c6xabi_fltif
+#define __floatunsisf __c6xabi_fltuf
+#define __truncdfsf2 __c6xabi_cvtdf
+#define __extendsfdf2 __c6xabi_cvtfd
+#define __adddf3 __c6xabi_addd
+#define __subdf3 __c6xabi_subd
+#define __muldf3 __c6xabi_mpyd
+#define __divdf3 __c6xabi_divd
+#define __negdf2 __c6xabi_negd
+#define __absdf2 __c6xabi_absd
+#define __addsf3 __c6xabi_addf
+#define __subsf3 __c6xabi_subf
+#define __mulsf3 __c6xabi_mpyf
+#define __divsf3 __c6xabi_divf
+#define __negsf2 __c6xabi_negf
+#define __abssf2 __c6xabi_absf
+#define __lesf2 __c6xabi_cmpf
+#define __ledf2 __c6xabi_cmpd
+#define __ltsf2 __gnu_ltsf2
+#define __ltdf2 __gnu_ltdf2
+#define __gesf2 __gnu_gesf2
+#define __gedf2 __gnu_gedf2
+#define __gtsf2 __gnu_gtsf2
+#define __gtdf2 __gnu_gtdf2
+#define __eqsf2 __gnu_eqsf2
+#define __eqdf2 __gnu_eqdf2
+#define __nesf2 __c6xabi_neqf
+#define __nedf2 __c6xabi_neqd
+#define __unordsf2 __c6xabi_unordf
+#define __unorddf2 __c6xabi_unordd
--- /dev/null
+;; GCC machine description for C6X synchronization instructions.
+;; Copyright (C) 2011 Free Software Foundation, Inc.
+;;
+;; This file is part of GCC.
+;;
+;; GCC is free software; you can redistribute it and/or modify
+;; it under the terms of the GNU General Public License as published by
+;; the Free Software Foundation; either version 3, or (at your option)
+;; any later version.
+;;
+;; GCC is distributed in the hope that it will be useful,
+;; but WITHOUT ANY WARRANTY; without even the implied warranty of
+;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+;; GNU General Public License for more details.
+;;
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; C64X+ has atomic instructions, but they are not atomic on all
+;; devices and have other problems. We use normal loads and stores,
+;; and place them in overlapping branch shadows to ensure interrupts
+;; are disabled during the sequence, which guarantees atomicity on all
+;; single-core systems.
+
+(define_code_iterator FETCHOP [plus minus ior xor and])
+(define_code_attr fetchop_name
+ [(plus "add") (minus "sub") (ior "ior") (xor "xor") (and "and")])
+(define_code_attr fetchop_pred
+ [(plus "reg_or_scst5_operand") (minus "register_operand")
+ (ior "reg_or_scst5_operand") (xor "reg_or_scst5_operand")
+ (and "reg_or_scst5_operand")])
+(define_code_attr fetchop_constr
+ [(plus "bIs5") (minus "b") (ior "bIs5") (xor "bIs5") (and "bIs5")])
+(define_code_attr fetchop_opcode
+ [(plus "add") (minus "sub") (ior "or") (xor "xor") (and "and")])
+(define_code_attr fetchop_inops02
+ [(plus "%2, %0") (minus "%0, %2") (ior "%2, %0") (xor "%2, %0")
+ (and "%2, %0")])
+(define_code_attr fetchop_inops21
+ [(plus "%1, %2") (minus "%2, %1") (ior "%1, %2") (xor "%1, %2")
+ (and "%1, %2")])
+
+(define_expand "sync_compare_and_swapsi"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec_volatile:SI
+ [(match_operand:SI 2 "register_operand" "")
+ (match_operand:SI 3 "register_operand" "")]
+ UNSPECV_CAS))
+ (clobber (match_scratch:SI 4 ""))])]
+ ""
+{
+})
+
+(define_expand "sync_<fetchop_name>si"
+ [(parallel
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 0)
+ (match_operand:SI 1 "<fetchop_pred>" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+{
+})
+
+(define_expand "sync_old_<fetchop_name>si"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1)
+ (match_operand:SI 2 "<fetchop_pred>" ""))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 ""))])]
+ ""
+{
+})
+
+(define_expand "sync_new_<fetchop_name>si"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (FETCHOP:SI (match_operand:SI 1 "memory_operand" "")
+ (match_operand:SI 2 "<fetchop_pred>" "")))
+ (set (match_dup 1)
+ (unspec:SI [(FETCHOP:SI (match_dup 1) (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 ""))])]
+ ""
+{
+})
+
+(define_expand "sync_nandsi"
+ [(parallel
+ [(set (match_operand:SI 0 "memory_operand" "")
+ (unspec:SI
+ [(not:SI (and:SI (match_dup 0)
+ (match_operand:SI 1 "reg_or_scst5_operand" "")))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 2 ""))])]
+ ""
+{
+})
+
+(define_expand "sync_old_nandsi"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (match_operand:SI 1 "memory_operand" ""))
+ (set (match_dup 1)
+ (unspec:SI
+ [(not:SI (and:SI (match_dup 1)
+ (match_operand:SI 2 "reg_or_scst5_operand" "")))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 ""))])]
+ ""
+{
+})
+
+(define_expand "sync_new_nandsi"
+ [(parallel
+ [(set (match_operand:SI 0 "register_operand" "")
+ (not:SI (and:SI (match_operand:SI 1 "memory_operand" "")
+ (match_operand:SI 2 "reg_or_scst5_operand" ""))))
+ (set (match_dup 1)
+ (unspec:SI [(not:SI (and:SI (match_dup 1) (match_dup 2)))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 ""))])]
+ ""
+{
+})
+
+(define_insn "*sync_compare_and_swapsi"
+ [(set (match_operand:SI 0 "register_operand" "=&b")
+ (match_operand:SI 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec_volatile:SI
+ [(match_operand:SI 2 "register_operand" "B")
+ (match_operand:SI 3 "register_operand" "b")]
+ UNSPECV_CAS))
+ (clobber (match_scratch:SI 4 "=&B"))]
+ ""
+ "0: b .s2 1f ; 0\n\\
+ || ldw .d%U1t%U0 %1, %0\n\\
+ nop 4\n\\
+|| b .s2 2f ; 1\n\\
+ cmpeq .l2 %0, %2, %2 ; 5\n\\
+1: [%2] stw .d%U1t%U3 %3, %1 ; 6\n\\
+2:"
+ [(set_attr "type" "atomic")])
+
+(define_insn "sync_<fetchop_name>si_insn"
+ [(set (match_operand:SI 0 "memory_operand" "+m")
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 0)
+ (match_operand:SI 1 "<fetchop_pred>" "<fetchop_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 2 "=&B"))]
+ ""
+ "0: b .s2 1f ; 0\n\\
+|| ldw .d%U0t%U2 %0, %2\n\\
+ nop 4\n\\
+|| b .s2 2f ; 1\n\\
+ <fetchop_opcode> .l2 <fetchop_inops21>, %2 ; 5\n\\
+1: stw .d%U0t%U2 %2, %0 ; 6\n\\
+2:"
+ [(set_attr "type" "atomic")])
+
+(define_insn "sync_old_<fetchop_name>si_insn"
+ [(set (match_operand:SI 0 "register_operand" "=&b")
+ (match_operand:SI 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1)
+ (match_operand:SI 2 "<fetchop_pred>" "<fetchop_constr>"))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=&B"))]
+ ""
+ "0: b .s2 1f ; 0\n\\
+|| ldw .d%U1t%U0 %1, %0\n\\
+ nop 4\n\\
+|| b .s2 2f ; 1\n\\
+ <fetchop_opcode> .l2 <fetchop_inops02>, %3 ; 5\n\\
+1: stw .d%U1t%U3 %3, %1 ; 6\n\\
+2:"
+ [(set_attr "type" "atomic")])
+
+(define_insn "sync_new_<fetchop_name>si_insn"
+ [(set (match_operand:SI 0 "register_operand" "=&b")
+ (FETCHOP:SI (match_operand:SI 1 "memory_operand" "+m")
+ (match_operand:SI 2 "<fetchop_pred>" "<fetchop_constr>")))
+ (set (match_dup 1)
+ (unspec:SI
+ [(FETCHOP:SI (match_dup 1)
+ (match_dup 2))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=&B"))]
+ ""
+ "0: b .s2 1f ; 0\n\\
+|| ldw .d%U1t%U0 %1, %0\n\\
+ nop 4\n\\
+|| b .s2 2f ; 1\n\\
+ <fetchop_opcode> .l2 <fetchop_inops02>, %0 ; 5\n\\
+1: stw .d%U1t%U0 %0, %1 ; 6\n\\
+2:"
+ [(set_attr "type" "atomic")])
+
+(define_insn "sync_nandsi_insn"
+ [(set (match_operand:SI 0 "memory_operand" "+m")
+ (unspec:SI
+ [(not:SI (and:SI (match_dup 0)
+ (match_operand:SI 1 "reg_or_scst5_operand" "bIs5")))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 2 "=&B"))]
+ ""
+ "0: b .s2 1f ; 0\n\\
+|| ldw .d%U0t%U2 %0, %2\n\\
+ nop 1\n\\
+ nop 3\n\\
+|| b .s2 2f ; 2\n\\
+ and .l2 %1, %2, %2 ; 5\n\\
+1: not .l2 %2, %2 ; 6\n\\
+ stw .d%U0t%U2 %2, %0 ; 7\n\\
+2:"
+ [(set_attr "type" "atomic")])
+
+(define_insn "sync_old_nandsi_insn"
+ [(set (match_operand:SI 0 "register_operand" "=&b")
+ (match_operand:SI 1 "memory_operand" "+m"))
+ (set (match_dup 1)
+ (unspec:SI
+ [(not:SI (and:SI (match_dup 1)
+ (match_operand:SI 2 "reg_or_scst5_operand" "bIs5")))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=&B"))]
+ ""
+ "0: b .s2 1f ; 0\n\\
+|| ldw .d%U1t%U0 %1, %0\n\\
+ nop 1\n\\
+ nop 3\n\\
+|| b .s2 2f ; 2\n\\
+ and .l2 %2, %0, %3 ; 5\n\\
+1: not .l2 %3, %3 ; 6\n\\
+ stw .d%U1t%U3 %3, %1 ; 7\n\\
+2:"
+ [(set_attr "type" "atomic")])
+
+(define_insn "sync_new_nandsi_insn"
+ [(set (match_operand:SI 0 "register_operand" "=&b")
+ (not:SI (and:SI (match_operand:SI 1 "memory_operand" "+m")
+ (match_operand:SI 2 "reg_or_scst5_operand" "bIs5"))))
+ (set (match_dup 1)
+ (unspec:SI
+ [(not:SI (and:SI (match_dup 1) (match_dup 2)))]
+ UNSPEC_ATOMIC))
+ (clobber (match_scratch:SI 3 "=&B"))]
+ ""
+ "0: b .s2 1f ; 0\n\\
+|| ldw .d%U1t%U0 %1, %0\n\\
+ nop 1\n\\
+ nop 3\n\\
+|| b .s2 2f ; 2\n\\
+ and .l2 %2, %0, %0 ; 5\n\\
+1: not .l2 %0, %0 ; 6\n\\
+ stw .d%U1t%U0 %0, %1 ; 7\n\\
+2:"
+ [(set_attr "type" "atomic")])
--- /dev/null
+# Target Makefile Fragment for TI C6X.
+# Copyright (C) 2010, 2011
+# Free Software Foundation, Inc.
+# Contributed by CodeSourcery.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published
+# by the Free Software Foundation; either version 3, or (at your
+# option) any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+# License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+MD_INCLUDES= $(srcdir)/config/c6x/constraints.md \
+ $(srcdir)/config/c6x/predicates.md \
+ $(srcdir)/config/c6x/c6x-mult.md \
+ $(srcdir)/config/c6x/c6x-sched.md
+
+s-config s-conditions s-flags s-codes s-constants s-emit s-recog s-preds \
+ s-opinit s-extract s-peep s-attr s-attrtab s-output: $(MD_INCLUDES)
+
+$(srcdir)/config/c6x/c6x-sched.md: $(srcdir)/config/c6x/gensched.sh \
+ $(srcdir)/config/c6x/c6x-sched.md.in
+ $(SHELL) $(srcdir)/config/c6x/gensched.sh \
+ $(srcdir)/config/c6x/c6x-sched.md.in > $@
+
+$(srcdir)/config/c6x/c6x-mult.md: $(srcdir)/config/c6x/genmult.sh \
+ $(srcdir)/config/c6x/c6x-mult.md.in
+ $(SHELL) $(srcdir)/config/c6x/genmult.sh \
+ $(srcdir)/config/c6x/c6x-mult.md.in > $@
+
+$(srcdir)/config/c6x/c6x-tables.opt: $(srcdir)/config/c6x/genopt.sh \
+ $(srcdir)/config/c6x/c6x-isas.def
+ $(SHELL) $(srcdir)/config/c6x/genopt.sh $(srcdir)/config/c6x > \
+ $(srcdir)/config/c6x/c6x-tables.opt
--- /dev/null
+# Target Makefile Fragment for TI C6X using ELF.
+# Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+# Contributed by CodeSourcery.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published
+# by the Free Software Foundation; either version 3, or (at your
+# option) any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+# License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB1ASMSRC = c6x/lib1funcs.asm
+LIB1ASMFUNCS = _divsi3 _udivsi3 _umodsi3 _modsi3 _udivmodsi4 _divmodsi4
+LIB1ASMFUNCS += _strasgi _strasgi_64plus _clzsi2 _clzdi2 _clz
+LIB1ASMFUNCS += _push_rts _pop_rts _call_stub
+
+LIB2FUNCS_EXCLUDE = _cmpdi2 _ucmpdi2 _gcc_bcmp _eprintf _clzsi _clzdi
+
+LIB2FUNCS_EXTRA = $(srcdir)/config/c6x/gef.c \
+ $(srcdir)/config/c6x/gtf.c \
+ $(srcdir)/config/c6x/lef.c \
+ $(srcdir)/config/c6x/ltf.c \
+ $(srcdir)/config/c6x/eqf.c \
+ $(srcdir)/config/c6x/ged.c \
+ $(srcdir)/config/c6x/gtd.c \
+ $(srcdir)/config/c6x/led.c \
+ $(srcdir)/config/c6x/ltd.c \
+ $(srcdir)/config/c6x/eqd.c
+
+# Use this variant for fully testing all CPU types
+#MULTILIB_OPTIONS = mbig-endian march=c674x/march=c64x/march=c67x/march=c67x+/march=c62x
+#MULTILIB_DIRNAMES = be c674x c64x c67x c67x+ c62x
+
+MULTILIB_OPTIONS = mbig-endian march=c674x
+MULTILIB_DIRNAMES = be c674x
+MULTILIB_EXCEPTIONS =
+MULTILIB_MATCHES =
+
+# Assemble startup files.
+$(T)crti.o: $(srcdir)/config/c6x/crti.s $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crti.o $(CRTSTUFF_T_CFLAGS) -x assembler-with-cpp \
+ $(srcdir)/config/c6x/crti.s
+
+$(T)crtn.o: $(srcdir)/config/c6x/crtn.s $(GCC_PASSES)
+ $(GCC_FOR_TARGET) $(GCC_CFLAGS) $(MULTILIB_CFLAGS) $(INCLUDES) \
+ -c -o $(T)crtn.o $(CRTSTUFF_T_CFLAGS) -x assembler-with-cpp \
+ $(srcdir)/config/c6x/crtn.s
+
+EXTRA_MULTILIB_PARTS = crtbegin.o crtend.o crtbeginS.o crtendS.o crti.o crtn.o
+
+# Avoid failures when the user's GOT becomes too large.
+CRTSTUFF_T_CFLAGS = -msdata=none
+CRTSTUFF_T_CFLAGS_S = -msdata=none
+TARGET_LIBGCC2_CFLAGS = -msdata=none
+
+SHLIB_MAPFILES += $(srcdir)/config/c6x/libgcc-c6xeabi.ver
--- /dev/null
+softfp_float_modes := sf df
+softfp_int_modes := si di
+softfp_extensions := sfdf
+softfp_truncations := dfsf
+softfp_machine_header := c6x/sfp-machine.h
+softfp_exclude_libgcc2 := y
+
+# softfp seems to be missing a whole bunch of prototypes.
+TARGET_LIBGCC2_CFLAGS += -Wno-missing-prototypes
--- /dev/null
+MULTILIB_OSDIRNAMES = march.c674x=!c674x
+MULTILIB_OSDIRNAMES += mbig-endian=!be
+MULTILIB_OSDIRNAMES += mbig-endian/march.c674x=!be/c674x
+
+CRTSTUFF_T_CFLAGS = -fPIC -msdata=none
+CRTSTUFF_T_CFLAGS_S = -fPIC -msdata=none
+TARGET_LIBGCC2_CFLAGS = -fPIC -msdata=none
--- /dev/null
+/* Definitions for TI C6X running ucLinux using ELF
+ Copyright (C) 2010, 2011 Free Software Foundation, Inc.
+ Contributed by Andrew Jenner <andrew@codesourcery.com>
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#undef TARGET_OS_CPP_BUILTINS
+#define TARGET_OS_CPP_BUILTINS() \
+ do \
+ { \
+ builtin_define ("__uClinux__"); \
+ builtin_define_std ("linux"); \
+ builtin_define_std ("unix"); \
+ builtin_assert ("system=linux"); \
+ builtin_assert ("system=unix"); \
+ builtin_assert ("system=posix"); \
+ } \
+ while (false)
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC \
+ "%{!shared:crt1%O%s} crti%O%s %{shared|pie:crtbeginS.o%s;:crtbegin.o%s}"
+
+#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0"
+
+#undef LINK_SPEC
+#define LINK_SPEC ENDIAN_LINK_SPEC \
+ "%{shared} %{fpie|fPIE:-pie} \
+ %{!shared: %{!static: \
+ %{rdynamic:-export-dynamic} \
+ %{!dynamic-linker:-dynamic-linker " UCLIBC_DYNAMIC_LINKER "}} \
+ %{static}}"
+
+#undef DRIVER_SELF_SPECS
+#define DRIVER_SELF_SPECS "%{!mno-dsbt:-mdsbt}"
+
+/* Clear the instruction cache from `beg' to `end'. This makes an
+ inline system call to SYS_cacheflush. */
+#undef CLEAR_INSN_CACHE
+#define CLEAR_INSN_CACHE(BEG, END) \
+{ \
+ register unsigned long _beg __asm ("A4") = (unsigned long) (BEG); \
+ register unsigned long _end __asm ("B4") = (unsigned long) (END); \
+ register unsigned long _scno __asm ("B0") = 244; \
+ __asm __volatile ("swe ; sys_cache_sync" \
+ : "=a" (_beg) \
+ : "0" (_beg), "b" (_end), "b" (_scno)); \
+}
+
@item
Bernd Schmidt for various code generation improvements and major
-work in the reload pass as well a serving as release manager for
-GCC 2.95.3.
+work in the reload pass, serving as release manager for
+GCC 2.95.3, and work on the Blackfin and C6X ports.
@item
Peter Schmid for constant testing of libstdc++---especially application
* RX Built-in Functions::
* SPARC VIS Built-in Functions::
* SPU Built-in Functions::
+* TI C6X Built-in Functions::
@end menu
@node Alpha Built-in Functions
implement the required functionality, but these are not supported and
are subject to change without notice.
+@node TI C6X Built-in Functions
+@subsection TI C6X Built-in Functions
+
+GCC provides intrinsics to access certain instructions of the TI C6X
+processors. These intrinsics, listed below, are available after
+inclusion of the @code{c6x_intrinsics.h} header file. They map directly
+to C6X instructions.
+
+@smallexample
+
+int _sadd (int, int)
+int _ssub (int, int)
+int _sadd2 (int, int)
+int _ssub2 (int, int)
+long long _mpy2 (int, int)
+long long _smpy2 (int, int)
+int _add4 (int, int)
+int _sub4 (int, int)
+int _saddu4 (int, int)
+
+int _smpy (int, int)
+int _smpyh (int, int)
+int _smpyhl (int, int)
+int _smpylh (int, int)
+
+int _sshl (int, int)
+int _subc (int, int)
+
+int _avg2 (int, int)
+int _avgu4 (int, int)
+
+int _clrr (int, int)
+int _extr (int, int)
+int _extru (int, int)
+int _abs (int)
+int _abs2 (int)
+
+@end smallexample
+
@node Target Format Checks
@section Format Checks Specific to Particular Target Machines
@item
@uref{#sparcv9-x-solaris2,,sparcv9-*-solaris2*}
@item
+@uref{#c6x-x-x,,c6x-*-*}
+@item
@uref{#x-x-vxworks,,*-*-vxworks*}
@item
@uref{#x86-64-x-x,,x86_64-*-*, amd64-*-*}
This is a synonym for @samp{sparc64-*-solaris2*}.
+@html
+<hr />
+@end html
+@heading @anchor{c6x-x-x}c6x-*-*
+
+The C6X family of processors. This port requires binutils-2.22 or newer.
+
@html
<hr />
@end html
-mfast-fp -minline-plt -mmulticore -mcorea -mcoreb -msdram @gol
-micplb}
+@emph{C6X Options}
+@gccoptlist{-mbig-endian -mlittle-endian -march=@var{cpu} @gol
+-msim -msdata=@var{sdata-type}}
+
@emph{CRIS Options}
@gccoptlist{-mcpu=@var{cpu} -march=@var{cpu} -mtune=@var{cpu} @gol
-mmax-stack-frame=@var{n} -melinux-stacksize=@var{n} @gol
* ARM Options::
* AVR Options::
* Blackfin Options::
+* C6X Options::
* CRIS Options::
* Darwin Options::
* DEC Alpha Options::
are enabled; for standalone applications the default is off.
@end table
+@node C6X Options
+@subsection C6X Options
+@cindex C6X Options
+
+@table @gcctabopt
+@item -march=@var{name}
+@opindex march
+This specifies the name of the target architecture. GCC uses this
+name to determine what kind of instructions it can emit when generating
+assembly code. Permissible names are: @samp{c62x},
+@samp{c64x}, @samp{c64x+}, @samp{c67x}, @samp{c67x+}, @samp{c674x}.
+
+@item -mbig-endian
+@opindex mbig-endian
+Generate code for a big endian target.
+
+@item -mlittle-endian
+@opindex mlittle-endian
+Generate code for a little endian target. This is the default.
+
+@item -msim
+@opindex msim
+Choose startup files and linker script suitable for the simulator.
+
+@item -msdata=default
+@opindex msdata=default
+Put small global and static data in the @samp{.neardata} section,
+which is pointed to by register @code{B14}. Put small uninitialized
+global and static data in the @samp{.bss} section, which is adjacent
+to the @samp{.neardata} section. Put small read-only data into the
+@samp{.rodata} section. The corresponding sections used for large
+pieces of data are @samp{.fardata}, @samp{.far} and @samp{.const}.
+
+@item -msdata=all
+@opindex msdata=all
+Put all data, not just small objets, into the sections reserved for
+small data, and use addressing relative to the @code{B14} register to
+access them.
+
+@item -msdata=none
+@opindex msdata=none
+Make no use of the sections reserved for small data, and use absolute
+addresses to access all data. Put all initialized global and static
+data in the @samp{.fardata} section, and all uninitialized data in the
+@samp{.far} section. Put all constant data into the @samp{.const}
+section.
+@end table
+
@node CRIS Options
@subsection CRIS Options
@cindex CRIS Options
@end table
+@item TI C6X family---@file{config/c6x/constraints.md}
+@table @code
+@item a
+Register file A (A0--A31).
+
+@item b
+Register file B (B0--B31).
+
+@item A
+Predicate registers in register file A (A0--A2 on C64X and
+higher, A1 and A2 otherwise).
+
+@item B
+Predicate registers in register file B (B0--B2).
+
+@item C
+A call-used register in register file B (B0--B9, B16--B31).
+
+@item Da
+Register file A, excluding predicate registers (A3--A31,
+plus A0 if not C64X or higher).
+
+@item Db
+Register file B, excluding predicate registers (B3--B31).
+
+@item Iu4
+Integer constant in the range 0 @dots{} 15.
+
+@item Iu5
+Integer constant in the range 0 @dots{} 31.
+
+@item In5
+Integer constant in the range @minus{}31 @dots{} 0.
+
+@item Is5
+Integer constant in the range @minus{}16 @dots{} 15.
+
+@item I5x
+Integer constant that can be the operand of an ADDA or a SUBA insn.
+
+@item IuB
+Integer constant in the range 0 @dots{} 65535.
+
+@item IsB
+Integer constant in the range @minus{}32768 @dots{} 32767.
+
+@item IsC
+Integer constant in the range @math{-2^{20}} @dots{} @math{2^{20} - 1}.
+
+@item Jc
+Integer constant that is a valid mask for the clr instruction.
+
+@item Js
+Integer constant that is a valid mask for the set instruction.
+
+@item Q
+Memory location with A base register.
+
+@item R
+Memory location with B base register.
+
+@ifset INTERNALS
+@item S0
+On C64x+ targets, a GP-relative small data reference.
+
+@item S1
+Any kind of @code{SYMBOL_REF}, for use in a call address.
+
+@item Si
+Any kind of immediate operand, unless it matches the S0 constraint.
+
+@item T
+Memory location with B base register, but not using a long offset.
+
+@item W
+A memory operand with an address that can't be used in an unaligned access.
+
+@end ifset
+@item Z
+Register B14 (aka DP).
+
+@end table
+
@item Xtensa---@file{config/xtensa/constraints.md}
@table @code
@item a
} while (0)
#endif /* __vax__ */
+#ifdef _TMS320C6X
+#define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+ do \
+ { \
+ UDItype __ll; \
+ __asm__ ("addu .l1 %1, %2, %0" \
+ : "=a" (__ll) : "a" (al), "a" (bl)); \
+ (sl) = (USItype)__ll; \
+ (sh) = ((USItype)(__ll >> 32)) + (ah) + (bh); \
+ } \
+ while (0)
+
+#ifdef _TMS320C6400_PLUS
+#define __umulsidi3(u,v) ((UDItype)(USItype)u*(USItype)v)
+#define umul_ppmm(w1, w0, u, v) \
+ do { \
+ UDItype __x = (UDItype) (USItype) (u) * (USItype) (v); \
+ (w1) = (USItype) (__x >> 32); \
+ (w0) = (USItype) (__x); \
+ } while (0)
+#endif /* _TMS320C6400_PLUS */
+
+#define count_leading_zeros(count, x) ((count) = __builtin_clz (x))
+#ifdef _TMS320C6400
+#define count_trailing_zeros(count, x) ((count) = __builtin_ctz (x))
+#endif
+#define UMUL_TIME 4
+#define UDIV_TIME 40
+#endif /* _TMS320C6X */
+
#if defined (__xtensa__) && W_TYPE_SIZE == 32
/* This code is not Xtensa-configuration-specific, so rely on the compiler
to expand builtin functions depending on what configuration features
+2011-07-15 Bernd Schmidt <bernds@codesourcery.com>
+
+ * config.host: Handle tic6x-*-*.
+ * config/c6x/c6x-abi.h: New file.
+
2011-07-13 Rainer Orth <ro@CeBiTec.Uni-Bielefeld.DE>
* config/i386/crtprec.c: New file.
v850*-*-*)
cpu_type=v850
;;
+tic6x-*-*)
+ cpu_type=c6x
+ ;;
esac
# Common parts for widely ported systems.
;;
spu-*-elf*)
;;
+tic6x-*-*)
+ tmake_file="${tmake_file} t-gnu-prefix"
+ ;;
v850*-*-*)
;;
vax-*-linux*)
--- /dev/null
+/* Header file for C6X ABI versions of libgcc functions.
+ Copyright (C) 2011
+ Free Software Foundation, Inc.
+ Contributed by Bernd Schmidt <bernds@codesourcery.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+/* Make __c6xabi_AEABI_NAME an alias for __GCC_NAME. */
+#define RENAME_LIBRARY(GCC_NAME, AEABI_NAME) \
+ __asm__ (".globl\t__c6xabi_" #AEABI_NAME "\n" \
+ ".set\t__c6xabi_" #AEABI_NAME \
+ ", __gnu_" #GCC_NAME "\n");
+
+/* Rename helper functions to the names specified in the C6000 ELF ABI. */
+#ifdef L_divsi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divsi3, divi)
+#endif
+#ifdef L_divdi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (divdi3, divlli)
+#endif
+#ifdef L_udivsi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivsi3, divu)
+#endif
+#ifdef L_udivdi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivdi3, divull)
+#endif
+#ifdef L_udivmoddi4
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (udivmoddi4, divremull)
+#endif
+#ifdef L_modsi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (modsi3, remi)
+#endif
+#ifdef L_moddi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (moddi3, remlli)
+#endif
+#ifdef L_umodsi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umodsi3, remu)
+#endif
+#ifdef L_umoddi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (umoddi3, remull)
+#endif
+#ifdef L_negdi2
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (negdi2, negll)
+#endif
+#ifdef L_muldi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (muldi3, mpyll)
+#endif
+#ifdef L_ashrdi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashrdi3, llshr)
+#endif
+#ifdef L_lshrdi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (lshrdi3, llshru)
+#endif
+#ifdef L_ashldi3
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (ashldi3, llshl)
+#endif
+
+/* The following are excluded from softfp due to softfp_exclude_libgcc2,
+ so we rename them here rather than in sfp-machine.h. */
+#ifdef L_fixdfdi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixdfdi, fixdlli)
+#endif
+#ifdef L_fixunsdfsi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunsdfsi, fixdu)
+#endif
+#ifdef L_fixunsdfdi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunsdfdi, fixdull)
+#endif
+#ifdef L_fixsfdi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixsfdi, fixflli)
+#endif
+#ifdef L_fixunssfsi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfsi, fixfu)
+#endif
+#ifdef L_fixunssfdi
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (fixunssfdi, fixfull)
+#endif
+#ifdef L_floatdidf
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdidf, fltllid)
+#endif
+#ifdef L_floatundidf
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatundidf, fltulld)
+#endif
+#ifdef L_floatdisf
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatdisf, fltllif)
+#endif
+#ifdef L_floatundisf
+#define DECLARE_LIBRARY_RENAMES RENAME_LIBRARY (floatundisf, fltullf)
+#endif
+
+#define LIBGCC2_GNU_PREFIX