+2015-05-16 James Bowman <james.bowman@ftdichip.com>
+
+ * configure.ac: FT32 target added.
+ * libgcc/config.host: FT32 target added.
+ * gcc/config/ft32/: FT32 target added.
+ * libgcc/config/ft32/: FT32 target added.
+ * gcc/doc/install.texi, invoke.texi, md.texi: FT32 details added.
+ * gcc/doc/contrib.texi: self added.
+ * contrib/config-list.mk: FT32 target added.
+ * configure: Regenerated.
+
2015-05-16 Iain Buclaw <ibuclaw@gdcproject.org>
* MAINTAINERS (Write After Approval): Add myself.
avr-*-*)
noconfigdirs="$noconfigdirs target-libstdc++-v3"
;;
+ ft32-*-*)
+ noconfigdirs="$noconfigdirs target-libstdc++-v3"
+ ;;
esac
fi
avr-*-*)
noconfigdirs="$noconfigdirs target-libstdc++-v3"
;;
+ ft32-*-*)
+ noconfigdirs="$noconfigdirs target-libstdc++-v3"
+ ;;
esac
fi
bfin-elf bfin-uclinux bfin-linux-uclibc bfin-rtems bfin-openbsd \
c6x-elf c6x-uclinux cr16-elf cris-elf cris-linux crisv32-elf crisv32-linux \
epiphany-elf epiphany-elfOPT-with-stack-offset=16 fido-elf \
- fr30-elf frv-elf frv-linux h8300-elf h8300-rtems hppa-linux-gnu \
+ fr30-elf frv-elf frv-linux ft32-elf h8300-elf h8300-rtems hppa-linux-gnu \
hppa-linux-gnuOPT-enable-sjlj-exceptions=yes hppa64-linux-gnu \
hppa2.0-hpux10.1 hppa64-hpux11.3 \
hppa64-hpux11.0OPT-enable-sjlj-exceptions=yes hppa2.0-hpux11.9 \
frv*) cpu_type=frv
extra_options="${extra_options} g.opt"
;;
+ft32*) cpu_type=ft32
+ target_has_targetm_common=no
+ ;;
moxie*) cpu_type=moxie
target_has_targetm_common=no
;;
gnu-user.h linux.h glibc-stdint.h frv/linux.h"
tmake_file="${tmake_file} frv/t-frv frv/t-linux"
;;
+ft32-*-elf)
+ gas=yes
+ gnu_ld=yes
+ tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file}"
+ tmake_file="${tmake_file} ft32/t-ft32"
+ ;;
moxie-*-elf)
gas=yes
gnu_ld=yes
--- /dev/null
+;; Constraint definitions for FT32
+;; Copyright (C) 2015 Free Software Foundation, Inc.
+;; Contributed by FTDI <support@ftdi.com>
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; -------------------------------------------------------------------------
+;; Constraints
+;; -------------------------------------------------------------------------
+
+(define_memory_constraint "A"
+ "An absolute address."
+ (and (match_code "mem")
+ (match_test "(!ft32_is_mem_pm(op))")
+ (ior (match_test "GET_CODE (XEXP (op, 0)) == SYMBOL_REF")
+ (match_test "GET_CODE (XEXP (op, 0)) == LABEL_REF")
+ (match_test "GET_CODE (XEXP (op, 0)) == CONST_INT")
+ (and (match_test "(GET_CODE (XEXP (op, 0)) == PLUS)")
+ (ior (match_test "GET_CODE (XEXP (XEXP (op, 0), 0)) == SYMBOL_REF")
+ (match_test "GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF")
+ (match_test "GET_CODE (XEXP (XEXP (op, 0), 0)) == CONST_INT"))
+ (ior (match_test "GET_CODE (XEXP (XEXP (op, 0), 1)) == SYMBOL_REF")
+ (match_test "GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF")
+ (match_test "GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST_INT"))))))
+
+(define_memory_constraint "B"
+ "An offset address."
+ (and (match_code "mem")
+ (match_test "(!ft32_is_mem_pm(op))")
+ (match_test "(GET_CODE (XEXP (op, 0)) == PLUS)")))
+
+(define_memory_constraint "W"
+ "A register indirect memory operand."
+ (and (match_code "mem")
+ (match_test "!ft32_is_mem_pm(op)
+ && REG_P (XEXP (op, 0))
+ && REGNO_OK_FOR_BASE_P (REGNO (XEXP (op, 0)))")))
+
+(define_memory_constraint "e"
+ "An offset address."
+ (and (match_code "mem")
+ (match_test "ft32_is_mem_pm(op) && (
+ (GET_CODE (XEXP (op, 0)) == SYMBOL_REF) ||
+ (GET_CODE (XEXP (op, 0)) == LABEL_REF) ||
+ (GET_CODE (XEXP (op, 0)) == CONST_INT) ||
+ (GET_CODE (XEXP (op, 0)) == CONST))"
+ )))
+
+(define_memory_constraint "f"
+ "An offset address."
+ (and (match_code "mem")
+ (match_test "ft32_is_mem_pm(op) && (
+ ((GET_CODE (XEXP (op, 0)) == PLUS)) ||
+ (GET_CODE (XEXP (op, 0)) == REG))"
+ )))
+
+(define_constraint "O"
+ "The constant zero or one"
+ (and (match_code "const_int")
+ (match_test "((ival == 0) || (ival == 1))")))
+
+(define_constraint "I"
+ "A 16-bit signed constant (-32768..32767)"
+ (and (match_code "const_int")
+ (match_test "ival >= -32768 && ival <= 32767")))
+
+(define_constraint "w"
+ "A bitfield mask suitable for bext or bins"
+ (and (match_code "const_int")
+ (match_test "ft32_as_bitfield(ival) != -1")))
+
+(define_constraint "x"
+ "An inverted bitfield mask suitable for bext or bins"
+ (and (match_code "const_int")
+ (match_test "ft32_as_bitfield(0xffffffff ^ ival) != -1")))
+
+(define_constraint "L"
+ "A 16-bit unsigned constant, multiple of 4 (-65532..0)"
+ (and (match_code "const_int")
+ (match_test "-65532 <= ival && ival <= 0 && (ival & 3) == 0")))
+
+(define_constraint "S"
+ "A 20-bit signed constant (-524288..524287)"
+ (ior
+ (and (match_code "const_int")
+ (match_test "ival >= -524288 && ival <= 524287"))
+ (match_test "GET_CODE (op) == LABEL_REF")
+ (match_test "GET_CODE (op) == SYMBOL_REF")
+ (match_test "GET_CODE (op) == CONST")))
+
+(define_constraint "b"
+ "A constant for a bitfield width (1..16)"
+ (and (match_code "const_int")
+ (match_test "1 <= ival && ival <= 16")))
+
+(define_constraint "KA"
+ "A 10-bit signed constant (-512..511)"
+ (and (match_code "const_int")
+ (match_test "-512 <= ival && ival <= 511")))
--- /dev/null
+/* Prototypes for ft32.c functions used in the md file & elsewhere.
+ Copyright (C) 2015 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation; either version 3, or (at your option)
+any later version.
+
+GCC is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with GCC; see the file COPYING3. If not see
+<http://www.gnu.org/licenses/>. */
+
+extern void ft32_expand_prologue (void);
+extern void ft32_expand_epilogue (void);
+extern int ft32_initial_elimination_offset (int, int);
+extern void ft32_print_operand (FILE *, rtx, int);
+extern void ft32_print_operand_address (FILE *, rtx);
+extern const char* ft32_load_immediate(rtx, int32_t i);
+extern int ft32_as_bitfield(unsigned int x);
--- /dev/null
+/* Target Code for ft32
+ Copyright (C) 2015 Free Software Foundation
+ Contributed by FTDI <support@ftdi.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#include "config.h"
+#include "system.h"
+#include "coretypes.h"
+#include "tm.h"
+#include "rtl.h"
+#include "regs.h"
+#include "hard-reg-set.h"
+#include "insn-config.h"
+#include "conditions.h"
+#include "insn-flags.h"
+#include "output.h"
+#include "insn-attr.h"
+#include "flags.h"
+#include "recog.h"
+#include "reload.h"
+#include "diagnostic-core.h"
+#include "obstack.h"
+#include "hash-set.h"
+#include "machmode.h"
+#include "vec.h"
+#include "double-int.h"
+#include "input.h"
+#include "alias.h"
+#include "symtab.h"
+#include "wide-int.h"
+#include "inchash.h"
+#include "tree.h"
+#include "stor-layout.h"
+#include "calls.h"
+#include "expr.h"
+#include "optabs.h"
+#include "except.h"
+#include "function.h"
+#include "ggc.h"
+#include "target.h"
+#include "target-def.h"
+#include "tm_p.h"
+#include "langhooks.h"
+#include "dominance.h"
+#include "cfg.h"
+#include "cfgrtl.h"
+#include "cfganal.h"
+#include "lcm.h"
+#include "cfgbuild.h"
+#include "cfgcleanup.h"
+#include "predict.h"
+#include "basic-block.h"
+#include "df.h"
+#include "builtins.h"
+
+
+#include <stdint.h>
+
+#define LOSE_AND_RETURN(msgid, x) \
+ do \
+ { \
+ ft32_operand_lossage (msgid, x); \
+ return; \
+ } while (0)
+
+/* Worker function for TARGET_RETURN_IN_MEMORY. */
+
+static bool
+ft32_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
+{
+ const HOST_WIDE_INT size = int_size_in_bytes (type);
+ return (size == -1 || size > 2 * UNITS_PER_WORD);
+}
+
+/* Define how to find the value returned by a function.
+ VALTYPE is the data type of the value (as a tree).
+ If the precise function being called is known, FUNC is its
+ FUNCTION_DECL; otherwise, FUNC is 0.
+
+ We always return values in register $r0 for ft32. */
+
+static rtx
+ft32_function_value (const_tree valtype,
+ const_tree fntype_or_decl ATTRIBUTE_UNUSED,
+ bool outgoing ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (TYPE_MODE (valtype), FT32_R0);
+}
+
+/* Define how to find the value returned by a library function.
+
+ We always return values in register $r0 for ft32. */
+
+static rtx
+ft32_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
+{
+ return gen_rtx_REG (mode, FT32_R0);
+}
+
+/* Handle TARGET_FUNCTION_VALUE_REGNO_P.
+
+ We always return values in register $r0 for ft32. */
+
+static bool
+ft32_function_value_regno_p (const unsigned int regno)
+{
+ return (regno == FT32_R0);
+}
+
+/* Emit an error message when we're in an asm, and a fatal error for
+ "normal" insns. Formatted output isn't easily implemented, since we
+ use output_operand_lossage to output the actual message and handle the
+ categorization of the error. */
+
+static void
+ft32_operand_lossage (const char *msgid, rtx op)
+{
+ debug_rtx (op);
+ output_operand_lossage ("%s", msgid);
+}
+
+/* The PRINT_OPERAND_ADDRESS worker. */
+
+void
+ft32_print_operand_address (FILE * file, rtx x)
+{
+ switch (GET_CODE (x))
+ {
+ case REG:
+ fprintf (file, "%s,0", reg_names[REGNO (x)]);
+ break;
+
+ case PLUS:
+ switch (GET_CODE (XEXP (x, 1)))
+ {
+ case CONST_INT:
+ fprintf (file, "%s,%ld",
+ reg_names[REGNO (XEXP (x, 0))], INTVAL (XEXP (x, 1)));
+ break;
+ case SYMBOL_REF:
+ output_addr_const (file, XEXP (x, 1));
+ fprintf (file, "(%s)", reg_names[REGNO (XEXP (x, 0))]);
+ break;
+ case CONST:
+ {
+ rtx plus = XEXP (XEXP (x, 1), 0);
+ if (GET_CODE (XEXP (plus, 0)) == SYMBOL_REF
+ && CONST_INT_P (XEXP (plus, 1)))
+ {
+ output_addr_const (file, XEXP (plus, 0));
+ fprintf (file, "+%ld(%s)", INTVAL (XEXP (plus, 1)),
+ reg_names[REGNO (XEXP (x, 0))]);
+ }
+ else
+ abort ();
+ }
+ break;
+ default:
+ abort ();
+ }
+ break;
+
+ default:
+ output_addr_const (file, x);
+ break;
+ }
+}
+
+/* The PRINT_OPERAND worker. */
+
+void
+ft32_print_operand (FILE * file, rtx x, int code)
+{
+ rtx operand = x;
+
+ /* New code entries should just be added to the switch below. If
+ handling is finished, just return. If handling was just a
+ modification of the operand, the modified operand should be put in
+ "operand", and then do a break to let default handling
+ (zero-modifier) output the operand. */
+
+ switch (code)
+ {
+ case 0:
+ /* No code, print as usual. */
+ break;
+
+ case 'h':
+ if (GET_CODE (operand) != REG)
+ internal_error ("'h' applied to non-register operand");
+ fprintf (file, "%s", reg_names[REGNO (operand) + 1]);
+ return;
+
+ case 'm':
+ fprintf (file, "%d", -INTVAL(x));
+ return;
+
+ case 'd': // a DW spec, from an integer alignment (for BLKmode insns)
+ {
+ int i = INTVAL (x);
+ char dwspec;
+ switch (i)
+ {
+ case 1:
+ dwspec = 'b';
+ break;
+ case 2:
+ dwspec = 's';
+ break;
+ case 4:
+ dwspec = 'l';
+ break;
+ default:
+ if ((i % 4) != 0)
+ internal_error ("bad alignment: %d", i);
+ else
+ dwspec = 'l';
+ break;
+ }
+ fprintf (file, "%c", dwspec);
+ return;
+ }
+
+ case 'f':
+ {
+ int bf = ft32_as_bitfield (INTVAL (x));
+ fprintf (file, "512|(%d<<5)|%d", bf >> 5, bf & 31);
+ return;
+ }
+
+ case 'g':
+ {
+ int bf = ft32_as_bitfield (0xffffffff ^ INTVAL (x));
+ fprintf (file, "(%d<<5)|%d", bf >> 5, bf & 31);
+ return;
+ }
+
+ case 'b':
+ {
+ ft32_print_operand (file, XEXP (x, 0), 0);
+ return;
+ }
+
+ default:
+ LOSE_AND_RETURN ("invalid operand modifier letter", x);
+ }
+
+ /* Print an operand as without a modifier letter. */
+ switch (GET_CODE (operand))
+ {
+ case REG:
+ fprintf (file, "%s", reg_names[REGNO (operand)]);
+ return;
+
+ case MEM:
+ output_address (XEXP (operand, 0));
+ return;
+
+ default:
+ /* No need to handle all strange variants, let output_addr_const
+ do it for us. */
+ if (CONSTANT_P (operand))
+ {
+ output_addr_const (file, operand);
+ return;
+ }
+
+ LOSE_AND_RETURN ("unexpected operand", x);
+ }
+}
+
+const char *
+ft32_load_immediate (rtx dst, int32_t i)
+{
+ char pattern[100];
+
+ if ((-524288 <= i) && (i <= 524287))
+ {
+ sprintf (pattern, "ldk.l %%0,%d", i);
+ output_asm_insn (pattern, &dst);
+ }
+ else if ((-536870912 <= i) && (i <= 536870911))
+ {
+ ft32_load_immediate (dst, i >> 10);
+ sprintf (pattern, "ldl.l %%0,%%0,%d", i & 1023);
+ output_asm_insn (pattern, &dst);
+ }
+ else
+ {
+ int rd; // rotate distance
+ uint32_t u = i;
+ for (rd = 1; rd < 32; rd++)
+ {
+ u = ((u >> 31) & 1) | (u << 1);
+ if ((-524288 <= (int32_t) u) && ((int32_t) u <= 524287))
+ {
+ ft32_load_immediate (dst, (int32_t) u);
+ sprintf (pattern, "ror.l %%0,%%0,%d", rd);
+ output_asm_insn (pattern, &dst);
+ return "";
+ }
+ }
+ ft32_load_immediate (dst, i >> 10);
+ sprintf (pattern, "ldl.l %%0,%%0,%d", i & 1023);
+ output_asm_insn (pattern, &dst);
+ }
+
+ return "";
+}
+
+// x is a bit mask, for example:
+// 00000000000000000000001111111110
+// If x contains a single bit mask, return the bitfield spec.
+// in the above case it returns ((9 << 5) | 1)
+// Otherwise return -1.
+//
+
+#define NBITS(n) ((1U << (n)) - 1U)
+
+int
+ft32_as_bitfield (unsigned int x)
+{
+ int lobit, hibit;
+
+ if (x == 0)
+ return -1;
+
+ for (lobit = 0; lobit < 32; lobit++)
+ if (x & (1 << lobit))
+ break;
+ for (hibit = 31; hibit >= 0; hibit--)
+ if (x & (1 << hibit))
+ break;
+
+ int width = 1 + hibit - lobit;
+ if (width > 16)
+ return -1;
+
+ if (x != (NBITS (width) << lobit))
+ return -1; // not a clean bitfield
+
+ return ((width & 15) << 5) | lobit;
+}
+
+/* Per-function machine data. */
+struct GTY (()) machine_function
+{
+ /* Number of bytes saved on the stack for callee saved registers. */
+ int callee_saved_reg_size;
+
+ /* Number of bytes saved on the stack for local variables. */
+ int local_vars_size;
+
+ /* The sum of 2 sizes: locals vars and padding byte for saving the
+ * registers. Used in expand_prologue () and expand_epilogue (). */
+ int size_for_adjusting_sp;
+};
+
+/* Zero initialization is OK for all current fields. */
+
+static struct machine_function *
+ft32_init_machine_status (void)
+{
+ return ggc_cleared_alloc < machine_function > ();
+}
+
+
+/* The TARGET_OPTION_OVERRIDE worker.
+ All this curently does is set init_machine_status. */
+static void
+ft32_option_override (void)
+{
+ /* Set the per-function-data initializer. */
+ init_machine_status = ft32_init_machine_status;
+}
+
+/* Implement targetm.select_section. */
+static section *
+ft32_select_section (tree decl, int reloc, unsigned HOST_WIDE_INT align)
+{
+ /* Variables and constants defined in the __ea address space
+ go into a special section named "._ea". */
+ if (TREE_TYPE (decl) != error_mark_node
+ && TYPE_ADDR_SPACE (TREE_TYPE (decl)) == ADDR_SPACE_PM)
+ {
+ /* We might get called with string constants, but get_named_section
+ doesn't like them as they are not DECLs. Also, we need to set
+ flags in that case. */
+ if (!DECL_P (decl))
+ return get_section ("._pm", SECTION_WRITE | SECTION_DEBUG, NULL);
+
+ return get_named_section (decl, "._pm", reloc);
+ }
+
+ return default_elf_select_section (decl, reloc, align);
+}
+
+/* Compute the size of the local area and the size to be adjusted by the
+ * prologue and epilogue. */
+
+static void
+ft32_compute_frame (void)
+{
+ /* For aligning the local variables. */
+ int stack_alignment = STACK_BOUNDARY / BITS_PER_UNIT;
+ int padding_locals;
+ int regno;
+
+ /* Padding needed for each element of the frame. */
+ cfun->machine->local_vars_size = get_frame_size ();
+
+ /* Align to the stack alignment. */
+ padding_locals = cfun->machine->local_vars_size % stack_alignment;
+ if (padding_locals)
+ padding_locals = stack_alignment - padding_locals;
+
+ cfun->machine->local_vars_size += padding_locals;
+
+ cfun->machine->callee_saved_reg_size = 0;
+
+ /* Save callee-saved registers. */
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ if (df_regs_ever_live_p (regno) && (!call_used_regs[regno]))
+ cfun->machine->callee_saved_reg_size += 4;
+
+ cfun->machine->size_for_adjusting_sp =
+ crtl->args.pretend_args_size
+ + cfun->machine->local_vars_size
+ + (ACCUMULATE_OUTGOING_ARGS ? crtl->outgoing_args_size : 0);
+}
+
+// Must use LINK/UNLINK when...
+// the frame is bigger than 512 bytes so cannot just "SUB" from SP
+// the function actually uses $fp
+
+static int
+must_link (void)
+{
+ int bigframe = (cfun->machine->size_for_adjusting_sp >= 512);
+ return (bigframe || frame_pointer_needed || df_regs_ever_live_p (FT32_FP)
+ || df_regs_ever_live_p (FT32_FP));
+}
+
+void
+ft32_expand_prologue (void)
+{
+ int regno;
+ rtx insn;
+
+ ft32_compute_frame ();
+
+ if (!must_link () && (cfun->machine->callee_saved_reg_size == 4))
+ {
+ insn =
+ emit_insn (gen_link
+ (gen_rtx_REG (Pmode, FT32_R13),
+ GEN_INT (-cfun->machine->size_for_adjusting_sp)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ return;
+ }
+ /* Save callee-saved registers. */
+ if (optimize_size)
+ {
+ for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0;)
+ {
+ if (!fixed_regs[regno] && !call_used_regs[regno]
+ && df_regs_ever_live_p (regno))
+ {
+ rtx preg = gen_rtx_REG (Pmode, regno);
+ emit_insn (gen_call_prolog (preg));
+ break;
+ }
+ }
+ }
+ else
+ {
+ for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+ {
+ if (!fixed_regs[regno] && df_regs_ever_live_p (regno)
+ && !call_used_regs[regno])
+ {
+ insn = emit_insn (gen_movsi_push (gen_rtx_REG (Pmode, regno)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ }
+ }
+
+ if (65536 <= cfun->machine->size_for_adjusting_sp)
+ {
+ error ("stack frame must be smaller than 64K");
+ return;
+ }
+ if (must_link ())
+ {
+ insn =
+ emit_insn (gen_link
+ (gen_rtx_REG (Pmode, FT32_FP),
+ GEN_INT (-cfun->machine->size_for_adjusting_sp)));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+ else if (cfun->machine->size_for_adjusting_sp > 0)
+ {
+ insn = emit_insn (gen_addsi3 (gen_rtx_REG (SImode, FT32_SP),
+ gen_rtx_REG (SImode, FT32_SP),
+ GEN_INT (-(cfun->machine->
+ size_for_adjusting_sp))));
+ RTX_FRAME_RELATED_P (insn) = 1;
+ }
+}
+
+void
+ft32_expand_epilogue (void)
+{
+ int regno;
+
+ if (!must_link ()
+ && (cfun->machine->size_for_adjusting_sp == 24)
+ && (cfun->machine->callee_saved_reg_size == 0))
+ {
+ emit_jump_insn (gen_returner24 ());
+ return;
+ }
+
+ // Set when the epilog code will also add 24 to $sp
+ int epilog24 = (!must_link ()
+ && (cfun->machine->size_for_adjusting_sp == 24)
+ && optimize_size);
+
+ if (must_link ())
+ {
+ emit_insn (gen_unlink ());
+ }
+ else if (!epilog24 && (cfun->machine->size_for_adjusting_sp > 0))
+ {
+ emit_insn (gen_addsi3 (gen_rtx_REG (SImode, FT32_SP),
+ gen_rtx_REG (SImode, FT32_SP),
+ GEN_INT (cfun->machine->size_for_adjusting_sp)));
+ }
+
+ if (cfun->machine->callee_saved_reg_size != 0)
+ {
+ for (regno = FIRST_PSEUDO_REGISTER; regno-- > 0;)
+ {
+ if (!fixed_regs[regno] && !call_used_regs[regno]
+ && df_regs_ever_live_p (regno))
+ {
+ rtx preg = gen_rtx_REG (Pmode, regno);
+ if (optimize_size)
+ {
+ if (epilog24)
+ emit_insn (gen_jump_epilog24 (preg));
+ else
+ emit_insn (gen_jump_epilog (preg));
+ return;
+ }
+ emit_insn (gen_movsi_pop (preg));
+ }
+ }
+ }
+
+ emit_jump_insn (gen_returner ());
+}
+
+#undef TARGET_FRAME_POINTER_REQUIRED
+#define TARGET_FRAME_POINTER_REQUIRED ft32_frame_pointer_required
+static bool
+ft32_frame_pointer_required (void)
+{
+ return cfun->calls_alloca;
+}
+
+#undef TARGET_CAN_ELIMINATE
+#define TARGET_CAN_ELIMINATE ft32_can_eliminate
+
+/* Return true if register FROM can be eliminated via register TO. */
+
+static bool
+ft32_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+{
+ return 1;
+ return (to == FRAME_POINTER_REGNUM) || !ft32_frame_pointer_required ();
+}
+
+/* Implements the macro INITIAL_ELIMINATION_OFFSET, return the OFFSET. */
+
+int
+ft32_initial_elimination_offset (int from, int to)
+{
+ ft32_compute_frame ();
+
+ if (from == ARG_POINTER_REGNUM && to == FRAME_POINTER_REGNUM)
+ {
+ return cfun->machine->callee_saved_reg_size + 2 * UNITS_PER_WORD;
+ }
+
+ if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
+ {
+ int arg_offset;
+ arg_offset = must_link ()? 2 : 1;
+ return ((cfun->machine->callee_saved_reg_size
+ + arg_offset * UNITS_PER_WORD)
+ + cfun->machine->size_for_adjusting_sp);
+ }
+
+ if ((from == FRAME_POINTER_REGNUM) && (to == STACK_POINTER_REGNUM))
+ {
+ return cfun->machine->size_for_adjusting_sp;
+ }
+
+ gcc_unreachable ();
+}
+
+/* Worker function for TARGET_SETUP_INCOMING_VARARGS. */
+
+static void
+ft32_setup_incoming_varargs (cumulative_args_t cum_v,
+ enum machine_mode mode ATTRIBUTE_UNUSED,
+ tree type ATTRIBUTE_UNUSED,
+ int *pretend_size, int no_rtl)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ int regno;
+ int regs = 8 - *cum;
+
+ *pretend_size = regs < 0 ? 0 : GET_MODE_SIZE (SImode) * regs;
+
+ if (no_rtl)
+ return;
+
+ for (regno = *cum; regno < 8; regno++)
+ {
+ rtx reg = gen_rtx_REG (SImode, regno);
+ rtx slot = gen_rtx_PLUS (Pmode,
+ gen_rtx_REG (SImode, ARG_POINTER_REGNUM),
+ GEN_INT (UNITS_PER_WORD * (regno - FT32_R0)));
+
+ emit_move_insn (gen_rtx_MEM (SImode, slot), reg);
+ }
+}
+
+
+/* Return the fixed registers used for condition codes. */
+
+static bool
+ft32_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
+{
+ *p1 = CC_REG;
+ *p2 = INVALID_REGNUM;
+ return true;
+}
+
+/* Return the next register to be used to hold a function argument or
+ NULL_RTX if there's no more space. */
+
+static rtx
+ft32_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type ATTRIBUTE_UNUSED,
+ bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+
+ if (*cum < 8)
+ return gen_rtx_REG (mode, *cum);
+ else
+ return NULL_RTX;
+}
+
+#define FT32_FUNCTION_ARG_SIZE(MODE, TYPE) \
+ ((MODE) != BLKmode ? GET_MODE_SIZE (MODE) \
+ : (unsigned) int_size_in_bytes (TYPE))
+
+static void
+ft32_function_arg_advance (cumulative_args_t cum_v, enum machine_mode mode,
+ const_tree type, bool named ATTRIBUTE_UNUSED)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+
+ *cum = (*cum < FT32_R6
+ ? *cum + ((3 + FT32_FUNCTION_ARG_SIZE (mode, type)) / 4) : *cum);
+}
+
+/* Return non-zero if the function argument described by TYPE is to be
+ passed by reference. */
+
+static bool
+ft32_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
+ enum machine_mode mode, const_tree type,
+ bool named ATTRIBUTE_UNUSED)
+{
+ unsigned HOST_WIDE_INT size;
+
+ if (type)
+ {
+ if (AGGREGATE_TYPE_P (type))
+ return true;
+ size = int_size_in_bytes (type);
+ }
+ else
+ size = GET_MODE_SIZE (mode);
+
+ return size > 4 * 6;
+}
+
+/* Some function arguments will only partially fit in the registers
+ that hold arguments. Given a new arg, return the number of bytes
+ that fit in argument passing registers. */
+
+static int
+ft32_arg_partial_bytes (cumulative_args_t cum_v,
+ enum machine_mode mode, tree type, bool named)
+{
+ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
+ int bytes_left, size;
+
+ if (*cum >= 8)
+ return 0;
+
+ if (ft32_pass_by_reference (cum_v, mode, type, named))
+ size = 4;
+ else if (type)
+ {
+ if (AGGREGATE_TYPE_P (type))
+ return 0;
+ size = int_size_in_bytes (type);
+ }
+ else
+ size = GET_MODE_SIZE (mode);
+
+ bytes_left = (4 * 6) - ((*cum - 2) * 4);
+
+ if (size > bytes_left)
+ return bytes_left;
+ else
+ return 0;
+}
+
+/* Used by constraints.md to distinguish between GENERIC and PM
+ memory addresses. */
+
+int
+ft32_is_mem_pm (rtx o)
+{
+ if (GET_CODE (o) != MEM)
+ return false;
+ if (MEM_EXPR (o))
+ return TYPE_ADDR_SPACE (TREE_TYPE (MEM_EXPR (o))) == ADDR_SPACE_PM;
+ else
+ return MEM_ADDR_SPACE (o) == ADDR_SPACE_PM;
+}
+
+/* The Global `targetm' Variable. */
+
+/* Initialize the GCC target structure. */
+
+#undef TARGET_PROMOTE_PROTOTYPES
+#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
+
+#undef TARGET_RETURN_IN_MEMORY
+#define TARGET_RETURN_IN_MEMORY ft32_return_in_memory
+#undef TARGET_MUST_PASS_IN_STACK
+#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+#undef TARGET_PASS_BY_REFERENCE
+#define TARGET_PASS_BY_REFERENCE ft32_pass_by_reference
+#undef TARGET_ARG_PARTIAL_BYTES
+#define TARGET_ARG_PARTIAL_BYTES ft32_arg_partial_bytes
+#undef TARGET_FUNCTION_ARG
+#define TARGET_FUNCTION_ARG ft32_function_arg
+#undef TARGET_FUNCTION_ARG_ADVANCE
+#define TARGET_FUNCTION_ARG_ADVANCE ft32_function_arg_advance
+
+
+#undef TARGET_SETUP_INCOMING_VARARGS
+#define TARGET_SETUP_INCOMING_VARARGS ft32_setup_incoming_varargs
+
+#undef TARGET_FIXED_CONDITION_CODE_REGS
+#define TARGET_FIXED_CONDITION_CODE_REGS ft32_fixed_condition_code_regs
+
+/* Define this to return an RTX representing the place where a
+ function returns or receives a value of data type RET_TYPE, a tree
+ node node representing a data type. */
+#undef TARGET_FUNCTION_VALUE
+#define TARGET_FUNCTION_VALUE ft32_function_value
+#undef TARGET_LIBCALL_VALUE
+#define TARGET_LIBCALL_VALUE ft32_libcall_value
+#undef TARGET_FUNCTION_VALUE_REGNO_P
+#define TARGET_FUNCTION_VALUE_REGNO_P ft32_function_value_regno_p
+
+#undef TARGET_OPTION_OVERRIDE
+#define TARGET_OPTION_OVERRIDE ft32_option_override
+
+#undef TARGET_ASM_SELECT_SECTION
+#define TARGET_ASM_SELECT_SECTION ft32_select_section
+
+#undef TARGET_VALID_POINTER_MODE
+#define TARGET_VALID_POINTER_MODE ft32_valid_pointer_mode
+static bool
+ft32_valid_pointer_mode (enum machine_mode mode)
+{
+ if (mode == SImode)
+ return 1;
+ return 0;
+}
+
+#undef TARGET_ADDR_SPACE_POINTER_MODE
+#define TARGET_ADDR_SPACE_POINTER_MODE ft32_addr_space_pointer_mode
+static enum machine_mode
+ft32_addr_space_pointer_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
+{
+ return Pmode;
+}
+
+#undef TARGET_ADDR_SPACE_ADDRESS_MODE
+#define TARGET_ADDR_SPACE_ADDRESS_MODE ft32_addr_space_address_mode
+static enum machine_mode
+ft32_addr_space_address_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
+{
+ return Pmode;
+}
+
+#undef TARGET_ADDR_SPACE_SUBSET_P
+#define TARGET_ADDR_SPACE_SUBSET_P ft32_addr_space_subset_p
+static bool
+ft32_addr_space_subset_p (addr_space_t subset ATTRIBUTE_UNUSED,
+ addr_space_t superset ATTRIBUTE_UNUSED)
+{
+ return false;
+}
+
+#undef TARGET_CASE_VALUES_THRESHOLD
+#define TARGET_CASE_VALUES_THRESHOLD ft32_target_case_values_threshold
+
+static unsigned int
+ft32_target_case_values_threshold (void)
+{
+ return 4;
+}
+
+#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
+#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
+ ft32_addr_space_legitimate_address_p
+
+
+// Enabling LRA gives the infamous
+// internal compiler error: Max. number of generated reload insns per insn is achieved (90)
+// errors e.g. when compiling sieve.c
+
+static bool
+ft32_lra_p (void)
+{
+ return ft32_lra_flag;
+}
+
+#undef TARGET_LRA_P
+#define TARGET_LRA_P ft32_lra_p
+
+static bool
+reg_ok_for_base_p (rtx r, bool strict)
+{
+ int NUM = REGNO (r);
+ if (strict)
+ return (HARD_REGNO_OK_FOR_BASE_P (NUM)
+ || HARD_REGNO_OK_FOR_BASE_P (reg_renumber[(NUM)]));
+ else
+ return ((NUM) >= FIRST_PSEUDO_REGISTER || HARD_REGNO_OK_FOR_BASE_P (NUM));
+}
+
+static bool
+ft32_addr_space_legitimate_address_p (enum machine_mode mode, rtx x,
+ bool strict,
+ addr_space_t as ATTRIBUTE_UNUSED)
+{
+ if (mode != BLKmode)
+ {
+ if (GET_CODE (x) == PLUS)
+ {
+ rtx op1, op2;
+ op1 = XEXP (x, 0);
+ op2 = XEXP (x, 1);
+ if (GET_CODE (op1) == REG
+ && CONST_INT_P (op2)
+ && INTVAL (op2) >= -128
+ && INTVAL (op2) < 128 && reg_ok_for_base_p (op1, strict))
+ goto yes;
+ if (GET_CODE (op1) == SYMBOL_REF && CONST_INT_P (op2))
+ goto yes;
+ }
+ if (REG_P (x) && reg_ok_for_base_p (x, strict))
+ goto yes;
+ if (GET_CODE (x) == SYMBOL_REF
+ || GET_CODE (x) == LABEL_REF || CONST_INT_P (x))
+ goto yes;
+ }
+ else
+ {
+ if (REG_P (x) && reg_ok_for_base_p (x, strict))
+ goto yes;
+ }
+
+ return 0;
+yes:
+ return 1;
+}
+
+struct gcc_target targetm = TARGET_INITIALIZER;
+
+#include "gt-ft32.h"
--- /dev/null
+/* Target Definitions for ft32.
+ Copyright (C) 2015 Free Software Foundation, Inc.
+ Contributed by FTDI <support@ftdi.com>
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published
+ by the Free Software Foundation; either version 3, or (at your
+ option) any later version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+ or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+ License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3. If not see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef GCC_FT32_H
+#define GCC_FT32_H
+
+#undef STARTFILE_SPEC
+#define STARTFILE_SPEC "crt0%O%s %{msim:crti.o%s} %{!msim:crti-hw.o%s} crtbegin.o%s"
+
+/* Provide an ENDFILE_SPEC appropriate for svr4. Here we tack on our own
+ magical crtend.o file (see crtstuff.c) which provides part of the
+ support for getting C++ file-scope static object constructed before
+ entering `main', followed by the normal svr3/svr4 "finalizer" file,
+ which is either `gcrtn.o' or `crtn.o'. */
+
+#undef ENDFILE_SPEC
+#define ENDFILE_SPEC "crtend.o%s crtn.o%s"
+
+/* Provide a LIB_SPEC appropriate for svr4. Here we tack on the default
+ standard C library (unless we are building a shared library) and
+ the simulator BSP code. */
+
+#undef LIB_SPEC
+#define LIB_SPEC "%{!shared:%{!symbolic:-lc}} \
+ %{msim:-Tsim.ld}"
+
+#undef LINK_SPEC
+#define LINK_SPEC "%{h*} %{v:-V} \
+ %{static:-Bstatic} %{shared:-shared} %{symbolic:-Bsymbolic}"
+
+/* Layout of Source Language Data Types */
+
+#define INT_TYPE_SIZE 32
+#define SHORT_TYPE_SIZE 16
+#define LONG_TYPE_SIZE 32
+#define LONG_LONG_TYPE_SIZE 64
+
+#define FLOAT_TYPE_SIZE 32
+#define DOUBLE_TYPE_SIZE 64
+#define LONG_DOUBLE_TYPE_SIZE 64
+
+#define DEFAULT_SIGNED_CHAR 1
+
+#undef SIZE_TYPE
+#define SIZE_TYPE "unsigned int"
+
+#undef PTRDIFF_TYPE
+#define PTRDIFF_TYPE "int"
+
+#undef WCHAR_TYPE
+#define WCHAR_TYPE "long int"
+
+#undef WCHAR_TYPE_SIZE
+#define WCHAR_TYPE_SIZE BITS_PER_WORD
+
+#define REGISTER_NAMES { \
+ "$fp", "$sp", "$r0", "$r1", \
+ "$r2", "$r3", "$r4", "$r5", \
+ "$r6", "$r7", "$r8", "$r9", \
+ "$r10", "$r11", "$r12", "$r13", \
+ "$r14", "$r15", "$r16", "$r17", "$r18", "$r19", "$r20", "$r21", "$r22", "$r23", "$r24", "$r25", "$r26", "$r27", "$r28", "$cc", \
+ "?fp", "?ap", "$pc", "?cc" }
+
+#define FT32_FP 0
+#define FT32_SP 1
+#define FT32_R0 2
+#define FT32_R1 3
+#define FT32_R2 4
+#define FT32_R3 5
+#define FT32_R4 6
+#define FT32_R5 7
+#define FT32_R6 8
+#define FT32_R7 9
+#define FT32_R8 10
+#define FT32_R9 11
+#define FT32_R10 12
+#define FT32_R11 13
+#define FT32_R12 14
+#define FT32_R13 15
+#define FT32_R14 16
+#define FT32_R15 17
+#define FT32_R16 18
+#define FT32_R17 19
+#define FT32_R18 20
+#define FT32_R19 21
+#define FT32_R20 22
+#define FT32_R21 23
+#define FT32_R22 24
+#define FT32_R23 25
+#define FT32_R24 26
+#define FT32_R25 27
+#define FT32_R26 28
+#define FT32_R27 29
+#define FT32_R28 30
+#define FT32_R29 31
+#define FT32_QAP (32 + 1)
+#define FT32_PC (32 + 2)
+#define FT32_CC (32 + 3)
+#define FIRST_PSEUDO_REGISTER (32 + 4)
+
+enum reg_class
+{
+ NO_REGS,
+ GENERAL_REGS,
+ SPECIAL_REGS,
+ CC_REGS,
+ ALL_REGS,
+ LIM_REG_CLASSES
+};
+
+#define REG_CLASS_CONTENTS \
+{ { 0x00000000, 0x00000000 }, /* Empty */ \
+ { 0xFFFFFFFF, 0x00000003 }, /* $fp, $sp, $r0 to $r13, ?fp */ \
+ { 0x00000000, 0x00000004 }, /* $pc */ \
+ { 0x00000000, 0x00000008 }, /* ?cc */ \
+ { 0xFFFFFFFF, 0x0000000F } /* All registers */ \
+}
+
+#define N_REG_CLASSES LIM_REG_CLASSES
+
+#define REG_CLASS_NAMES {\
+ "NO_REGS", \
+ "GENERAL_REGS", \
+ "SPECIAL_REGS", \
+ "CC_REGS", \
+ "ALL_REGS" }
+
+#define FIXED_REGISTERS /* fp sp r0 r1 */ { 1, 1, 0, 0, \
+ /* r2 r3 r4 r5 */ 0, 0, 0, 0, \
+ /* r6 r7 r8 r9 */ 0, 0, 0, 0, \
+ /* r10 r11 r12 r13 */ 0, 0, 0, 0, \
+ /* r14 r15 r16 r17 */ 0, 0, 0, 0, \
+ /* r18 r19 r20 r21 */ 0, 0, 0, 0, \
+ /* r22 r23 r24 r25 */ 0, 0, 0, 0, \
+ /* r26 r27 r28 r29 */ 0, 0, 1, 1, \
+ /* r30 r31 */ 1, 1, 1, 1 }
+
+#define CALL_USED_REGISTERS \
+ /* fp sp r0 r1 */ { 1, 1, 1, 1, \
+ /* r2 r3 r4 r5 */ 1, 1, 1, 1, \
+ /* r6 r7 r8 r9 */ 1, 1, 1, 1, \
+ /* r10 r11 r12 r13 */ 1, 1, 1, 0, \
+ /* r14 r15 r16 r17 */ 0, 0, 0, 0, \
+ /* r18 r19 r20 r21 */ 0, 0, 0, 0, \
+ /* r22 r23 r24 r25 */ 0, 0, 0, 0, \
+ /* r26 r27 r28 r29 */ 0, 0, 1, 1, \
+ /* r30 r31 */ 1, 1, 1, 1 }
+
+/* We can't copy to or from our CC register. */
+#define AVOID_CCMODE_COPIES 1
+
+/* A C expression that is nonzero if it is permissible to store a
+ value of mode MODE in hard register number REGNO (or in several
+ registers starting with that one). All gstore registers are
+ equivalent, so we can set this to 1. */
+#define HARD_REGNO_MODE_OK(R,M) 1
+
+/* A C expression whose value is a register class containing hard
+ register REGNO. */
+#define REGNO_REG_CLASS(R) ((R < FT32_PC) ? GENERAL_REGS : \
+ (R == FT32_CC ? CC_REGS : SPECIAL_REGS))
+
+/* A C expression for the number of consecutive hard registers,
+ starting at register number REGNO, required to hold a value of mode
+ MODE. */
+#define HARD_REGNO_NREGS(REGNO, MODE) \
+ ((GET_MODE_SIZE (MODE) + UNITS_PER_WORD - 1) \
+ / UNITS_PER_WORD)
+
+/* A C expression that is nonzero if a value of mode MODE1 is
+ accessible in mode MODE2 without copying. */
+#define MODES_TIEABLE_P(MODE1, MODE2) 1
+
+/* The Overall Framework of an Assembler File */
+
+#undef ASM_SPEC
+#define ASM_COMMENT_START "#"
+#define ASM_APP_ON ""
+#define ASM_APP_OFF ""
+
+#define FILE_ASM_OP "\t.file\n"
+
+/* Switch to the text or data segment. */
+#define TEXT_SECTION_ASM_OP "\t.text"
+#define DATA_SECTION_ASM_OP "\t.data"
+
+/* Assembler Commands for Alignment */
+
+#define ASM_OUTPUT_ALIGN(STREAM,POWER) \
+ fprintf (STREAM, "\t.p2align\t%d\n", POWER);
+
+/* A C compound statement to output to stdio stream STREAM the
+ assembler syntax for an instruction operand X. */
+#define PRINT_OPERAND(STREAM, X, CODE) ft32_print_operand (STREAM, X, CODE)
+
+#define PRINT_OPERAND_ADDRESS(STREAM ,X) ft32_print_operand_address (STREAM, X)
+
+/* Output and Generation of Labels */
+
+#define GLOBAL_ASM_OP "\t.global\t"
+
+#define JUMP_TABLES_IN_TEXT_SECTION 1
+
+/* This is how to output an element of a case-vector that is absolute. */
+
+#define ASM_OUTPUT_ADDR_VEC_ELT(FILE, VALUE) \
+ fprintf (FILE, "\tjmp\t.L%d\n", VALUE); \
+
+/* Passing Arguments in Registers */
+
+/* A C type for declaring a variable that is used as the first
+ argument of `FUNCTION_ARG' and other related values. */
+#define CUMULATIVE_ARGS unsigned int
+
+/* If defined, the maximum amount of space required for outgoing arguments
+ will be computed and placed into the variable
+ `current_function_outgoing_args_size'. No space will be pushed
+ onto the stack for each call; instead, the function prologue should
+ increase the stack frame size by this amount. */
+#define ACCUMULATE_OUTGOING_ARGS 1
+
+/* A C statement (sans semicolon) for initializing the variable CUM
+ for the state at the beginning of the argument list.
+ For ft32, the first arg is passed in register 2 (aka $r0). */
+#define INIT_CUMULATIVE_ARGS(CUM,FNTYPE,LIBNAME,FNDECL,N_NAMED_ARGS) \
+ (CUM = FT32_R0)
+
+/* How Scalar Function Values Are Returned */
+
+/* STACK AND CALLING */
+
+/* Define this macro if pushing a word onto the stack moves the stack
+ pointer to a smaller address. */
+#define STACK_GROWS_DOWNWARD
+
+#define INITIAL_FRAME_POINTER_OFFSET(DEPTH) (DEPTH) = 0
+
+/* Offset from the frame pointer to the first local variable slot to
+ be allocated. */
+#define STARTING_FRAME_OFFSET 0
+
+/* Define this if the above stack space is to be considered part of the
+ space allocated by the caller. */
+#define OUTGOING_REG_PARM_STACK_SPACE(FNTYPE) 1
+/* #define STACK_PARMS_IN_REG_PARM_AREA */
+
+/* Define this if it is the responsibility of the caller to allocate
+ the area reserved for arguments passed in registers. */
+#define REG_PARM_STACK_SPACE(FNDECL) (6 * UNITS_PER_WORD)
+
+/* Offset from the argument pointer register to the first argument's
+ address. On some machines it may depend on the data type of the
+ function. */
+#define FIRST_PARM_OFFSET(F) 0
+
+/* Define this macro to nonzero value if the addresses of local variable slots
+ are at negative offsets from the frame pointer. */
+#define FRAME_GROWS_DOWNWARD 1
+
+/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
+ the stack pointer does not matter. The value is tested only in
+ functions that have frame pointers.
+ No definition is equivalent to always zero. */
+
+#define EXIT_IGNORE_STACK 0
+
+/* Define this macro as a C expression that is nonzero for registers that are
+ used by the epilogue or the return pattern. The stack and frame
+ pointer registers are already assumed to be used as needed. */
+#define EPILOGUE_USES(R) (R == FT32_R5)
+
+/* A C expression whose value is RTL representing the location of the
+ incoming return address at the beginning of any function, before
+ the prologue. */
+#define INCOMING_RETURN_ADDR_RTX \
+ gen_frame_mem (Pmode, \
+ plus_constant (Pmode, stack_pointer_rtx, 333 * UNITS_PER_WORD))
+
+#define RETURN_ADDR_RTX(COUNT, FRAMEADDR) \
+ ((COUNT) == 0 \
+ ? gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, arg_pointer_rtx, GEN_INT (-4))) \
+ : NULL_RTX)
+
+/* Describe how we implement __builtin_eh_return. */
+#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N+2) : INVALID_REGNUM)
+
+/* Store the return handler into the call frame. */
+#define EH_RETURN_HANDLER_RTX \
+ gen_frame_mem (Pmode, \
+ plus_constant (Pmode, frame_pointer_rtx, UNITS_PER_WORD))
+
+/* Storage Layout */
+
+#define BITS_BIG_ENDIAN 0
+#define BYTES_BIG_ENDIAN 0
+#define WORDS_BIG_ENDIAN 0
+
+/* Alignment required for a function entry point, in bits. */
+#define FUNCTION_BOUNDARY 32
+
+#define BRANCH_COST(speed_p, predictable_p) 2
+
+/* Define this macro as a C expression which is nonzero if accessing
+ less than a word of memory (i.e. a `char' or a `short') is no
+ faster than accessing a word of memory. */
+#define SLOW_BYTE_ACCESS 1
+
+#define STORE_FLAG_VALUE 1
+
+#define MOVE_RATIO(speed) ((speed) ? 6 : 2)
+
+/* Number of storage units in a word; normally the size of a
+ general-purpose register, a power of two from 1 or 8. */
+#define UNITS_PER_WORD 4
+
+/* Define this macro to the minimum alignment enforced by hardware
+ for the stack pointer on this machine. The definition is a C
+ expression for the desired alignment (measured in bits). */
+#define STACK_BOUNDARY 32
+
+/* Normal alignment required for function parameters on the stack, in
+ bits. All stack parameters receive at least this much alignment
+ regardless of data type. */
+#define PARM_BOUNDARY 32
+
+/* Alignment of field after `int : 0' in a structure. */
+#define EMPTY_FIELD_BOUNDARY 32
+
+/* No data type wants to be aligned rounder than this. */
+#define BIGGEST_ALIGNMENT 32
+
+/* The best alignment to use in cases where we have a choice. */
+#define FASTEST_ALIGNMENT 32
+
+/* Align definitions of arrays, unions and structures so that
+ initializations and copies can be made more efficient. This is not
+ ABI-changing, so it only affects places where we can see the
+ definition. Increasing the alignment tends to introduce padding,
+ so don't do this when optimizing for size/conserving stack space. */
+#define FT32_EXPAND_ALIGNMENT(COND, EXP, ALIGN) \
+ (((COND) && ((ALIGN) < BITS_PER_WORD) \
+ && (TREE_CODE (EXP) == ARRAY_TYPE \
+ || TREE_CODE (EXP) == UNION_TYPE \
+ || TREE_CODE (EXP) == RECORD_TYPE)) ? BITS_PER_WORD : (ALIGN))
+
+/* Make arrays of chars word-aligned for the same reasons. */
+#define DATA_ALIGNMENT(TYPE, ALIGN) \
+ (TREE_CODE (TYPE) == ARRAY_TYPE \
+ && TYPE_MODE (TREE_TYPE (TYPE)) == QImode \
+ && (ALIGN) < FASTEST_ALIGNMENT ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Similarly, make sure that objects on the stack are sensibly aligned. */
+#define LOCAL_ALIGNMENT(EXP, ALIGN) \
+ FT32_EXPAND_ALIGNMENT(/*!flag_conserve_stack*/ 1, EXP, ALIGN)
+
+/* Every structures size must be a multiple of 8 bits. */
+#define STRUCTURE_SIZE_BOUNDARY 8
+
+/* Look at the fundamental type that is used for a bit-field and use
+ that to impose alignment on the enclosing structure.
+ struct s {int a:8}; should have same alignment as "int", not "char". */
+#define PCC_BITFIELD_TYPE_MATTERS 1
+
+/* Largest integer machine mode for structures. If undefined, the default
+ is GET_MODE_SIZE(DImode). */
+#define MAX_FIXED_MODE_SIZE 32
+
+/* Make strings word-aligned so strcpy from constants will be faster. */
+#define CONSTANT_ALIGNMENT(EXP, ALIGN) \
+ ((TREE_CODE (EXP) == STRING_CST \
+ && (ALIGN) < FASTEST_ALIGNMENT) \
+ ? FASTEST_ALIGNMENT : (ALIGN))
+
+/* Set this nonzero if move instructions will actually fail to work
+ when given unaligned data. */
+#define STRICT_ALIGNMENT 1
+
+/* Generating Code for Profiling */
+#define FUNCTION_PROFILER(FILE,LABELNO) (abort (), 0)
+
+/* Trampolines for Nested Functions. */
+#define TRAMPOLINE_SIZE (2 + 6 + 6 + 2 + 2 + 6)
+
+/* Alignment required for trampolines, in bits. */
+#define TRAMPOLINE_ALIGNMENT 32
+
+/* An alias for the machine mode for pointers. */
+#define Pmode SImode
+
+#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
+do { \
+ if (((MODE) == HImode) \
+ || ((MODE) == QImode)) \
+ (MODE) = SImode; \
+} while (0)
+
+/* An alias for the machine mode used for memory references to
+ functions being called, in `call' RTL expressions. */
+#define FUNCTION_MODE QImode
+
+#define STATIC_CHAIN_REGNUM FT32_R28
+
+/* The register number of the stack pointer register, which must also
+ be a fixed register according to `FIXED_REGISTERS'. */
+#define STACK_POINTER_REGNUM FT32_SP
+
+/* The register number of the frame pointer register, which is used to
+ access automatic variables in the stack frame. */
+#define FRAME_POINTER_REGNUM FT32_FP
+
+/* The register number of the arg pointer register, which is used to
+ access the function's argument list. */
+#define ARG_POINTER_REGNUM FT32_QAP
+
+#define ELIMINABLE_REGS \
+{{ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \
+ {ARG_POINTER_REGNUM, FRAME_POINTER_REGNUM}, \
+ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}}
+
+
+/* This macro is similar to `INITIAL_FRAME_POINTER_OFFSET'. It
+ specifies the initial difference between the specified pair of
+ registers. This macro must be defined if `ELIMINABLE_REGS' is
+ defined. */
+#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
+ do { \
+ (OFFSET) = ft32_initial_elimination_offset ((FROM), (TO)); \
+ } while (0)
+
+/* A C expression that is nonzero if REGNO is the number of a hard
+ register in which function arguments are sometimes passed. */
+#define FUNCTION_ARG_REGNO_P(r) (r >= FT32_R0 && r <= FT32_R5)
+
+/* A macro whose definition is the name of the class to which a valid
+ base register must belong. A base register is one used in an
+ address which is the register value plus a displacement. */
+#define BASE_REG_CLASS GENERAL_REGS
+
+#define INDEX_REG_CLASS NO_REGS
+
+#define HARD_REGNO_OK_FOR_BASE_P(NUM) \
+ ((unsigned) (NUM) < FIRST_PSEUDO_REGISTER \
+ && (REGNO_REG_CLASS(NUM) == GENERAL_REGS \
+ || (NUM) == HARD_FRAME_POINTER_REGNUM))
+
+/* A C expression which is nonzero if register number NUM is suitable
+ for use as a base register in operand addresses. */
+#ifdef REG_OK_STRICT
+#define REGNO_OK_FOR_BASE_P(NUM) \
+ (HARD_REGNO_OK_FOR_BASE_P(NUM) \
+ || HARD_REGNO_OK_FOR_BASE_P(reg_renumber[(NUM)]))
+#else
+#define REGNO_OK_FOR_BASE_P(NUM) \
+ ((NUM) >= FIRST_PSEUDO_REGISTER || HARD_REGNO_OK_FOR_BASE_P(NUM))
+#endif
+
+/* A C expression which is nonzero if register number NUM is suitable
+ for use as an index register in operand addresses. */
+#define REGNO_OK_FOR_INDEX_P(NUM) FT32_FP
+
+/* The maximum number of bytes that a single instruction can move
+ quickly between memory and registers or between two memory
+ locations. */
+#define MOVE_MAX 4
+#define TRULY_NOOP_TRUNCATION(op,ip) 1
+
+/* Define this to be nonzero if shift instructions ignore all but the low-order
+ few bits. */
+#define SHIFT_COUNT_TRUNCATED 1
+
+/* All load operations zero extend. */
+#define LOAD_EXTEND_OP(MEM) ZERO_EXTEND
+
+/* A number, the maximum number of registers that can appear in a
+ valid memory address. */
+#define MAX_REGS_PER_ADDRESS 1
+
+/* An alias for a machine mode name. This is the machine mode that
+ elements of a jump-table should have. */
+#define CASE_VECTOR_MODE SImode
+
+/* Run-time Target Specification */
+
+#define TARGET_CPU_CPP_BUILTINS() \
+ { \
+ builtin_define ("__FT32__"); \
+ }
+
+#define HAS_LONG_UNCOND_BRANCH true
+
+#define NO_FUNCTION_CSE 1
+
+#define ADDR_SPACE_PM 1
+
+#define REGISTER_TARGET_PRAGMAS() do { \
+ c_register_addr_space ("__flash__", ADDR_SPACE_PM); \
+} while (0);
+
+extern int ft32_is_mem_pm(rtx o);
+
+#endif /* GCC_FT32_H */
--- /dev/null
+;; Machine description for FT32
+;; Copyright (C) 2015 Free Software Foundation, Inc.
+;; Contributed by FTDI <support@ftdi.com>
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; -------------------------------------------------------------------------
+;; FT32 specific constraints, predicates and attributes
+;; -------------------------------------------------------------------------
+
+(include "constraints.md")
+(include "predicates.md")
+
+(define_constants [
+ (FP_REG 0)
+ (SP_REG 1)
+ (CC_REG 35)
+])
+
+(define_c_enum "unspec"
+ [UNSPEC_STRLEN
+ UNSPEC_MOVMEM
+ UNSPEC_SETMEM
+ UNSPEC_STPCPY
+ UNSPEC_INDEX_JMP
+ UNSPEC_LPM
+ UNSPEC_FMUL
+ UNSPEC_FMULS
+ UNSPEC_FMULSU
+ UNSPEC_COPYSIGN
+ UNSPEC_IDENTITY
+ UNSPEC_INSERT_BITS
+ UNSPEC_JMP_EPILOG
+ UNSPEC_JMP_EPILOG24
+ UNSPEC_JMP_PROLOG
+ UNSPEC_XCHG
+ ])
+
+;; -------------------------------------------------------------------------
+;; nop instruction
+;; -------------------------------------------------------------------------
+
+(define_insn "nop"
+ [(const_int 0)]
+ ""
+ "nop")
+
+;; -------------------------------------------------------------------------
+;; Arithmetic instructions
+;; -------------------------------------------------------------------------
+
+(define_insn "addsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (plus:SI
+ (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "ft32_rimm_operand" "KA,r")))
+ ]
+ ""
+ "add.l %0,%1,%2")
+
+(define_insn "subsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (minus:SI
+ (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "ft32_rimm_operand" "KA,r")))]
+ ""
+ "sub.l %0,%1,%2")
+
+(define_insn "mulsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (mult:SI
+ (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "ft32_rimm_operand" "KA,r")))]
+ ""
+ "mul.l %0,%1,%2")
+
+(define_insn "umulsidi3"
+ [(set (match_operand:DI 0 "register_operand" "=r,r")
+ (mult:DI (zero_extend:DI (match_operand:SI 1 "register_operand" "r,r"))
+ (zero_extend:DI (match_operand:SI 2 "ft32_rimm_operand" "r,KA"))))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "mul.l $cc,%1,%2\;muluh.l %h0,%1,%2\;move.l %0,$cc")
+
+(define_insn "divsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (div:SI
+ (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "ft32_rimm_operand" "r,KA")))]
+ ""
+ "div.l %0,%1,%2")
+
+(define_insn "modsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (mod:SI
+ (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "ft32_rimm_operand" "r,KA")))]
+ ""
+ "mod.l %0,%1,%2")
+
+(define_insn "udivsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (udiv:SI
+ (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "ft32_rimm_operand" "r,KA")))]
+ ""
+ "udiv.l %0,%1,%2")
+
+(define_insn "umodsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (umod:SI
+ (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "register_operand" "r,KA")))]
+ ""
+ "umod.l %0,%1,%2")
+
+(define_insn "extvsi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (sign_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "ft32_bwidth_operand" "b")
+ (match_operand:SI 3 "const_int_operand" "i")))]
+ ""
+ "bexts.l %0,%1,((15 & %2) << 5) | (%3)")
+
+(define_insn "extzvsi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (zero_extract:SI (match_operand:SI 1 "register_operand" "r")
+ (match_operand:SI 2 "ft32_bwidth_operand" "b")
+ (match_operand:SI 3 "const_int_operand" "i")))]
+ ""
+ "bextu.l %0,%1,((15 & %2) << 5) | (%3)")
+
+(define_insn "insvsi"
+ [(set (zero_extract:SI (match_operand:SI 0 "register_operand" "+r,r")
+ (match_operand:SI 1 "ft32_bwidth_operand" "b,b")
+ (match_operand:SI 2 "const_int_operand" "i,i"))
+ (match_operand:SI 3 "general_operand" "r,O"))
+ (clobber (match_scratch:SI 4 "=&r,r"))]
+ ""
+ {
+ if (which_alternative == 0)
+ {
+ return \"ldl.l %4,%3,((%1&15)<<5)|(%2)\;bins.l %0,%0,%4\";
+ }
+ else
+ {
+ if ((INTVAL(operands[3]) == 0) || (INTVAL(operands[1]) == 1))
+ return \"bins.l %0,%0,(%3<<9)|((%1&15)<<5)|(%2)\";
+ else
+ return \"ldk.l %4,(%3<<10)|((%1&15)<<5)|(%2)\;bins.l %0,%0,%4\";
+ }
+ })
+
+;; -------------------------------------------------------------------------
+;; Unary arithmetic instructions
+;; -------------------------------------------------------------------------
+
+(define_insn "one_cmplsi2"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (not:SI (match_operand:SI 1 "register_operand" "r")))]
+ ""
+ "xor.l %0,%1,-1")
+
+;; -------------------------------------------------------------------------
+;; Logical operators
+;; -------------------------------------------------------------------------
+
+(define_insn "andsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (and:SI (match_operand:SI 1 "register_operand" "r,r,r")
+ (match_operand:SI 2 "general_operand" "r,x,KA")))]
+ ""
+ "@
+ and.l %0,%1,%2
+ bins.l %0,%1,%g2
+ and.l %0,%1,%2")
+
+(define_insn "andqi3"
+ [(set (match_operand:QI 0 "register_operand" "=r,r,r")
+ (and:QI (match_operand:QI 1 "register_operand" "r,r,r")
+ (match_operand:QI 2 "general_operand" "r,x,KA")))]
+ ""
+ "@
+ and.b %0,%1,%2
+ bins.b %0,%1,%g2
+ and.b %0,%1,%2")
+
+(define_insn "xorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (xor:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "ft32_rimm_operand" "r,KA")))]
+ ""
+{
+ return "xor.l %0,%1,%2";
+})
+
+(define_insn "iorsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r,r")
+ (ior:SI (match_operand:SI 1 "register_operand" "r,r,r")
+ (match_operand:SI 2 "general_operand" "r,w,KA")))]
+ ""
+ "@
+ or.l %0,%1,%2
+ bins.l %0,%1,%f2
+ or.l %0,%1,%2")
+
+;; -------------------------------------------------------------------------
+;; Shifters
+;; -------------------------------------------------------------------------
+
+(define_insn "ashlsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashift:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "ft32_rimm_operand" "r,KA")))]
+ ""
+{
+ return "ashl.l %0,%1,%2";
+})
+
+(define_insn "ashrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (ashiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "ft32_rimm_operand" "r,KA")))]
+ ""
+{
+ return "ashr.l %0,%1,%2";
+})
+
+(define_insn "lshrsi3"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (lshiftrt:SI (match_operand:SI 1 "register_operand" "r,r")
+ (match_operand:SI 2 "ft32_rimm_operand" "r,KA")))]
+ ""
+{
+ return "lshr.l %0,%1,%2";
+})
+
+;; -------------------------------------------------------------------------
+;; Move instructions
+;; -------------------------------------------------------------------------
+
+;; SImode
+
+;; Push a register onto the stack
+(define_insn "movsi_push"
+ [(set (mem:SI (pre_dec:SI (reg:SI SP_REG)))
+ (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "push.l %0")
+
+;; Pop a register from the stack
+(define_insn "movsi_pop"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (mem:SI (post_inc:SI (reg:SI SP_REG))))]
+ ""
+ "pop.l %0")
+
+(define_expand "movsi"
+ [(set (match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "general_operand" ""))]
+ ""
+{
+ /* If this is a store, force the value into a register. */
+ if (!(reload_in_progress || reload_completed))
+ {
+ if (MEM_P (operands[0]))
+ {
+ operands[1] = force_reg (SImode, operands[1]);
+ if (MEM_P (XEXP (operands[0], 0)))
+ operands[0] = gen_rtx_MEM (SImode, force_reg (SImode, XEXP (operands[0], 0)));
+ }
+ else
+ {
+ if (MEM_P (operands[1]) && MEM_P (XEXP (operands[1], 0)))
+ operands[1] = gen_rtx_MEM (SImode, force_reg (SImode, XEXP (operands[1], 0)));
+ }
+ /*
+ if (MEM_P (operands[0])) {
+ rtx o = XEXP (operands[0], 0);
+ if (!REG_P(o) &&
+ !CONST_INT_P(o) &&
+ GET_CODE(o) != SYMBOL_REF &&
+ GET_CODE(o) != LABEL_REF) {
+ operands[0] = gen_rtx_MEM (SImode, force_reg (SImode, XEXP (operands[0], 0)));
+ }
+ }
+ */
+ }
+})
+
+(define_insn "*rtestsi"
+ [(set (reg:SI CC_REG)
+ (match_operand:SI 0 "register_operand" "r"))]
+ ""
+ "cmp.l %0,0"
+)
+
+(define_insn "*rtestqi"
+ [(set (reg:QI CC_REG)
+ (match_operand:QI 0 "register_operand" "r"))]
+ ""
+ "cmp.b %0,0"
+)
+
+(define_insn "*movsi"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,BW,r,r,r,r,A,r,r")
+ (match_operand:SI 1 "ft32_general_movsrc_operand" "r,r,BW,A,S,i,r,e,f"))]
+ "register_operand (operands[0], SImode) || register_operand (operands[1], SImode)"
+ "@
+ move.l %0,%1
+ sti.l %0,%1
+ ldi.l %0,%1
+ lda.l %0,%1
+ ldk.l %0,%1
+ *return ft32_load_immediate(operands[0], INTVAL(operands[1]));
+ sta.l %0,%1
+ lpm.l %0,%1
+ lpmi.l %0,%1"
+)
+
+(define_expand "movqi"
+ [(set (match_operand:QI 0 "general_operand" "")
+ (match_operand:QI 1 "general_operand" ""))]
+ ""
+{
+ /* If this is a store, force the value into a register. */
+ if (!(reload_in_progress || reload_completed))
+ {
+ if (MEM_P (operands[0]))
+ {
+ operands[1] = force_reg (QImode, operands[1]);
+ if (MEM_P (XEXP (operands[0], 0)))
+ operands[0] = gen_rtx_MEM (QImode, force_reg (SImode, XEXP (operands[0], 0)));
+ }
+ else
+ {
+ if (MEM_P (operands[1]) && MEM_P (XEXP (operands[1], 0)))
+ operands[1] = gen_rtx_MEM (QImode, force_reg (SImode, XEXP (operands[1], 0)));
+ }
+ if (MEM_P (operands[0]) && !REG_P(XEXP (operands[0], 0)))
+ {
+ operands[0] = gen_rtx_MEM (QImode, force_reg (SImode, XEXP (operands[0], 0)));
+ }
+ }
+})
+
+(define_insn "zero_extendqisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r")
+ (zero_extend:SI (match_operand:QI 1 "nonimmediate_operand" "BW,r,f")))]
+ ""
+ "@
+ ldi.b %0,%1
+ and.l %0,%1,255
+ lpmi.b %0,%1"
+)
+
+(define_insn "extendqisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r")
+ (sign_extend:SI (match_operand:QI 1 "nonimmediate_operand" "r")))]
+ ""
+ "bexts.l %0,%1,(8<<5)|0"
+)
+
+(define_insn "zero_extendhisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r")
+ (zero_extend:SI (match_operand:HI 1 "nonimmediate_operand" "BW,r,f")))]
+ ""
+ "@
+ ldi.s %0,%1
+ bextu.l %0,%1,(0<<5)|0
+ lpmi.s %0,%1"
+)
+
+(define_insn "extendhisi2"
+ [(set (match_operand:SI 0 "nonimmediate_operand" "=r")
+ (sign_extend:SI (match_operand:HI 1 "nonimmediate_operand" "r")))]
+ ""
+ "bexts.l %0,%1,(0<<5)|0"
+)
+
+(define_insn "*movqi"
+ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,BW,r,r,A,r,r,r")
+ (match_operand:QI 1 "ft32_general_movsrc_operand" "r,r,BW,A,r,I,e,f"))]
+ "register_operand (operands[0], QImode)
+ || register_operand (operands[1], QImode)"
+ "@
+ move.b %0,%1
+ sti.b %0,%1
+ ldi.b %0,%1
+ lda.b %0,%1
+ sta.b %0,%1
+ ldk.b %0,%1
+ lpm.b %0,%1
+ lpmi.b %0,%1"
+)
+
+(define_expand "movhi"
+ [(set (match_operand:HI 0 "general_operand" "")
+ (match_operand:HI 1 "general_operand" ""))]
+ ""
+{
+ /* If this is a store, force the value into a register. */
+ if (!(reload_in_progress || reload_completed))
+ {
+ if (MEM_P (operands[0]))
+ {
+ operands[1] = force_reg (HImode, operands[1]);
+ if (MEM_P (XEXP (operands[0], 0)))
+ operands[0] = gen_rtx_MEM (HImode, force_reg (SImode, XEXP (operands[0], 0)));
+ }
+ else
+ {
+ if (MEM_P (operands[1]) && MEM_P (XEXP (operands[1], 0)))
+ operands[1] = gen_rtx_MEM (HImode, force_reg (SImode, XEXP (operands[1], 0)));
+ }
+ if (MEM_P (operands[0]))
+ {
+ rtx o = XEXP (operands[0], 0);
+ if (!REG_P(o) &&
+ !CONST_INT_P(o) &&
+ GET_CODE(o) != SYMBOL_REF &&
+ GET_CODE(o) != LABEL_REF) {
+ operands[0] = gen_rtx_MEM (HImode, force_reg (SImode, XEXP (operands[0], 0)));
+ }
+ }
+ }
+})
+
+(define_insn "*movhi"
+ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,BW,r,r,A,r,r,r")
+ (match_operand:HI 1 "ft32_general_movsrc_operand" "r,r,BW,A,r,I,e,f"))]
+ "(register_operand (operands[0], HImode)
+ || register_operand (operands[1], HImode))"
+ "@
+ move.s %0,%1
+ sti.s %0,%1
+ ldi.s %0,%1
+ lda.s %0,%1
+ sta.s %0,%1
+ ldk.s %0,%1
+ lpm.s %0,%1
+ lpmi.s %0,%1"
+)
+
+(define_expand "movsf"
+ [(set (match_operand:SF 0 "general_operand" "")
+ (match_operand:SF 1 "general_operand" ""))]
+ ""
+{
+ /* If this is a store, force the value into a register. */
+ if (MEM_P (operands[0]))
+ operands[1] = force_reg (SFmode, operands[1]);
+ if (CONST_DOUBLE_P(operands[1]))
+ operands[1] = force_const_mem(SFmode, operands[1]);
+})
+
+(define_insn "*movsf"
+ [(set (match_operand:SF 0 "nonimmediate_operand" "=r,BW,r,r,A,r,r")
+ (match_operand:SF 1 "ft32_general_movsrc_operand" "r,r,BW,A,r,I,f"))]
+ "(register_operand (operands[0], SFmode)
+ || register_operand (operands[1], SFmode))"
+ "@
+ move.l %0,%1
+ sti.l %0,%1
+ ldi.l %0,%1
+ lda.l %0,%1
+ sta.l %0,%1
+ ldk.l %0,%1
+ lpmi.l %0,%1"
+)
+
+;; -------------------------------------------------------------------------
+;; Compare instructions
+;; -------------------------------------------------------------------------
+
+(define_expand "cbranchsi4"
+ [(set (reg:CC CC_REG)
+ (compare:CC
+ (match_operand:SI 1 "register_operand" "")
+ (match_operand:SI 2 "ft32_rimm_operand" "")))
+ (set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(reg:CC CC_REG) (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "cmpsi"
+ [(set (reg:CC CC_REG)
+ (compare:CC
+ (match_operand:SI 0 "register_operand" "r,r")
+ (match_operand:SI 1 "ft32_rimm_operand" "r,KA")))]
+ ""
+ "cmp.l %0,%1")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (ne (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 1 "const_int_operand" "i"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "btst.l %0,(1<<5)|%1\;jmpc nz,%l2")
+
+(define_insn ""
+ [(set (pc)
+ (if_then_else
+ (eq (zero_extract:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 1)
+ (match_operand:SI 1 "const_int_operand" "i"))
+ (const_int 0))
+ (label_ref (match_operand 2 "" ""))
+ (pc)))
+ (clobber (reg:CC CC_REG))]
+ ""
+ "btst.l %0,(1<<5)|%1\;jmpc z,%l2")
+
+(define_expand "cbranchqi4"
+ [(set (reg:CC CC_REG)
+ (compare:CC
+ (match_operand:QI 1 "register_operand" "")
+ (match_operand:QI 2 "ft32_rimm_operand" "")))
+ (set (pc)
+ (if_then_else (match_operator 0 "comparison_operator"
+ [(reg:CC CC_REG) (const_int 0)])
+ (label_ref (match_operand 3 "" ""))
+ (pc)))]
+ ""
+ "")
+
+(define_insn "*cmpqi"
+ [(set (reg:CC CC_REG)
+ (compare:CC
+ (match_operand:QI 0 "register_operand" "r,r")
+ (match_operand:QI 1 "ft32_rimm_operand" "r,KA")))]
+ ""
+ "cmp.b %0,%1")
+
+;; -------------------------------------------------------------------------
+;; Branch instructions
+;; -------------------------------------------------------------------------
+
+(define_code_iterator cond [ne eq lt ltu gt gtu ge le geu leu])
+(define_code_attr CC [(ne "nz") (eq "z") (lt "lt") (ltu "b")
+ (gt "gt") (gtu "a") (ge "gte") (le "lte")
+ (geu "ae") (leu "be") ])
+(define_code_attr rCC [(ne "z") (eq "nz") (lt "gte") (ltu "ae")
+ (gt "lte") (gtu "be") (ge "lt") (le "gt")
+ (geu "b") (leu "a") ])
+
+(define_insn "*b<cond:code>"
+ [(set (pc)
+ (if_then_else (cond (reg:CC CC_REG)
+ (const_int 0))
+ (label_ref (match_operand 0 "" ""))
+ (pc)))]
+ ""
+{
+ return "jmpc <CC>,%l0";
+}
+)
+
+(define_expand "cstoresi4"
+ [(set (reg:CC CC_REG)
+ (compare:CC (match_operand:SI 2 "register_operand" "r,r")
+ (match_operand:SI 3 "ft32_rimm_operand" "r,KA")))
+ (set (match_operand:SI 0 "register_operand")
+ (match_operator:SI 1 "ordered_comparison_operator"
+ [(reg:CC CC_REG) (const_int 0)]))]
+ ""
+{
+ rtx test;
+
+ switch (GET_CODE (operands[1])) {
+ case NE:
+ case GEU:
+ case LT:
+ case LE:
+ case LEU:
+ test = gen_rtx_fmt_ee (reverse_condition (GET_CODE (operands[1])),
+ SImode, operands[2], operands[3]);
+ emit_insn(gen_cstoresi4(operands[0], test, operands[2], operands[3]));
+ emit_insn(gen_xorsi3(operands[0], operands[0], gen_int_mode(1, SImode)));
+ DONE;
+ default:
+ ;
+ }
+})
+
+(define_insn "*seq"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (eq:SI (reg CC_REG) (const_int 0)))]
+ ""
+ "bextu.l %0,$cc,32|0"
+)
+
+(define_insn "*sltu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ltu:SI (reg CC_REG) (const_int 0)))]
+ ""
+ "bextu.l %0,$cc,32|1"
+)
+
+(define_insn "*sge"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (ge:SI (reg CC_REG) (const_int 0)))]
+ ""
+ "bextu.l %0,$cc,32|4"
+)
+
+(define_insn "*sgt"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (gt:SI (reg CC_REG) (const_int 0)))]
+ ""
+ "bextu.l %0,$cc,32|5"
+)
+
+(define_insn "*sgtu"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (gtu:SI (reg CC_REG) (const_int 0)))]
+ ""
+ "bextu.l %0,$cc,32|6"
+)
+
+;; -------------------------------------------------------------------------
+;; Call and Jump instructions
+;; -------------------------------------------------------------------------
+
+(define_expand "call"
+ [(call (match_operand:QI 0 "memory_operand" "")
+ (match_operand 1 "general_operand" ""))]
+ ""
+{
+ gcc_assert (MEM_P (operands[0]));
+})
+
+(define_insn "*call"
+ [(call (mem:QI (match_operand:SI
+ 0 "nonmemory_operand" "i,r"))
+ (match_operand 1 "" ""))]
+ ""
+ "@
+ call %0
+ calli %0"
+)
+
+(define_expand "call_value"
+ [(set (match_operand 0 "" "")
+ (call (match_operand:QI 1 "memory_operand" "")
+ (match_operand 2 "" "")))]
+ ""
+{
+ gcc_assert (MEM_P (operands[1]));
+})
+
+(define_insn "*call_value"
+ [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:QI (match_operand:SI
+ 1 "immediate_operand" "i"))
+ (match_operand 2 "" "")))]
+ ""
+ "call %1"
+)
+
+(define_insn "*call_value_indirect"
+ [(set (match_operand 0 "register_operand" "=r")
+ (call (mem:QI (match_operand:SI
+ 1 "register_operand" "r"))
+ (match_operand 2 "" "")))]
+ ""
+ "calli %1"
+)
+
+(define_insn "indirect_jump"
+ [(set (pc) (match_operand:SI 0 "nonimmediate_operand" "r"))]
+ ""
+ "jmpi %0")
+
+(define_insn "jump"
+ [(set (pc)
+ (label_ref (match_operand 0 "" "")))]
+ ""
+ "jmp %l0"
+)
+
+(define_insn "call_prolog"
+ [(unspec:SI [(match_operand 0 "" "")]
+ UNSPEC_JMP_PROLOG)]
+ ""
+ "call __prolog_%0"
+)
+
+(define_insn "jump_epilog"
+ [(unspec:SI [(match_operand 0 "" "")]
+ UNSPEC_JMP_EPILOG)]
+ ""
+ "jmp __epilog_%0"
+)
+
+(define_insn "jump_epilog24"
+ [(unspec:SI [(match_operand 0 "" "")]
+ UNSPEC_JMP_EPILOG24)]
+ ""
+ "jmp __epilog24_%0"
+)
+
+
+;; Subroutines of "casesi".
+;; operand 0 is index
+;; operand 1 is the minimum bound
+;; operand 2 is the maximum bound - minimum bound + 1
+;; operand 3 is CODE_LABEL for the table;
+;; operand 4 is the CODE_LABEL to go to if index out of range.
+
+(define_expand "casesi"
+ [(match_operand:SI 0 "general_operand" "")
+ (match_operand:SI 1 "const_int_operand" "")
+ (match_operand:SI 2 "const_int_operand" "")
+ (match_operand 3 "" "")
+ (match_operand 4 "" "")]
+ ""
+ "
+{
+ if (GET_CODE (operands[0]) != REG)
+ operands[0] = force_reg (SImode, operands[0]);
+
+ if (operands[1] != const0_rtx)
+ {
+ rtx index = gen_reg_rtx (SImode);
+ rtx offset = gen_reg_rtx (SImode);
+
+ emit_insn (gen_movsi (offset, operands[1]));
+ emit_insn (gen_subsi3 (index, operands[0], offset));
+ operands[0] = index;
+ }
+
+ {
+ rtx test = gen_rtx_GTU (VOIDmode, operands[0], operands[2]);
+ emit_jump_insn (gen_cbranchsi4 (test, operands[0], operands[2], operands[4]));
+ }
+
+ emit_jump_insn (gen_casesi0 (operands[0], operands[3]));
+ DONE;
+}")
+
+(define_insn "casesi0"
+ [(set (pc) (mem:SI (plus:SI
+ (mult:SI (match_operand:SI 0 "register_operand" "r")
+ (const_int 4))
+ (label_ref (match_operand 1 "" "")))))
+ (clobber (match_scratch:SI 2 "=&r"))
+ ]
+ ""
+ "ldk.l\t$cc,%l1\;ashl.l\t%2,%0,2\;add.l\t%2,%2,$cc\;jmpi\t%2"
+ )
+
+;; -------------------------------------------------------------------------
+;; Atomic exchange instruction
+;; -------------------------------------------------------------------------
+
+(define_insn "atomic_exchangesi"
+ [(set (match_operand:SI 0 "register_operand" "=&r,r") ;; output
+ (match_operand:SI 1 "memory_operand" "+BW,A")) ;; memory
+ (set (match_dup 1)
+ (unspec:SI
+ [(match_operand:SI 2 "register_operand" "0,0") ;; input
+ (match_operand:SI 3 "const_int_operand")] ;; model
+ UNSPEC_XCHG))]
+ ""
+ "@
+ exi.l %0,%1
+ exa.l %0,%1")
+
+(define_insn "atomic_exchangehi"
+ [(set (match_operand:HI 0 "register_operand" "=&r,r") ;; output
+ (match_operand:HI 1 "memory_operand" "+BW,A")) ;; memory
+ (set (match_dup 1)
+ (unspec:HI
+ [(match_operand:HI 2 "register_operand" "0,0") ;; input
+ (match_operand:HI 3 "const_int_operand")] ;; model
+ UNSPEC_XCHG))]
+ ""
+ "@
+ exi.s %0,%1
+ exa.s %0,%1")
+
+(define_insn "atomic_exchangeqi"
+ [(set (match_operand:QI 0 "register_operand" "=&r,r") ;; output
+ (match_operand:QI 1 "memory_operand" "+BW,A")) ;; memory
+ (set (match_dup 1)
+ (unspec:QI
+ [(match_operand:QI 2 "register_operand" "0,0") ;; input
+ (match_operand:QI 3 "const_int_operand")] ;; model
+ UNSPEC_XCHG))]
+ ""
+ "@
+ exi.b %0,%1
+ exa.b %0,%1")
+
+;; -------------------------------------------------------------------------
+;; String instructions
+;; -------------------------------------------------------------------------
+
+(define_insn "cmpstrsi"
+ [(set (match_operand:SI 0 "register_operand" "=r,r")
+ (compare:SI (match_operand:BLK 1 "memory_operand" "W,BW")
+ (match_operand:BLK 2 "memory_operand" "W,BW")))
+ (clobber (match_operand:SI 3))
+ ]
+ ""
+ "strcmp.%d3 %0,%b1,%b2"
+)
+
+(define_insn "movstr"
+[(set (match_operand:BLK 1 "memory_operand" "=W")
+ (match_operand:BLK 2 "memory_operand" "W"))
+ (use (match_operand:SI 0))
+ (clobber (match_dup 0))
+ ]
+"0"
+"stpcpy %b1,%b2 # %0 %b1 %b2"
+)
+
+(define_insn "movmemsi"
+ [(set (match_operand:BLK 0 "memory_operand" "=W,W,BW")
+ (match_operand:BLK 1 "memory_operand" "W,W,BW"))
+ (use (match_operand:SI 2 "ft32_rimm_operand" "r,KA,rKA"))
+ (use (match_operand:SI 3))
+ ]
+ ""
+ "memcpy.%d3 %b0,%b1,%2 # %3!"
+)
+
+(define_insn "setmemsi"
+ [(set (match_operand:BLK 0 "memory_operand" "=BW,BW") (unspec:BLK [
+ (use (match_operand:QI 2 "register_operand" "r,r"))
+ (use (match_operand:SI 1 "ft32_rimm_operand" "r,KA"))
+ ] UNSPEC_SETMEM))
+ (use (match_operand:SI 3))
+ ]
+ ""
+ "memset.%d3 %b0,%2,%1"
+)
+
+(define_insn "strlensi"
+ [(set (match_operand:SI 0 "register_operand" "=r")
+ (unspec:SI [(match_operand:BLK 1 "memory_operand" "W")
+ (match_operand:QI 2 "const_int_operand" "")
+ (match_operand:SI 3 "ft32_rimm_operand" "")]
+ UNSPEC_STRLEN))]
+ ""
+ "strlen.%d3 %0,%b1 # %2 %3"
+)
+
+;; -------------------------------------------------------------------------
+;; Prologue & Epilogue
+;; -------------------------------------------------------------------------
+
+(define_expand "prologue"
+ [(clobber (const_int 0))]
+ ""
+{
+ extern void ft32_expand_prologue();
+ ft32_expand_prologue ();
+ DONE;
+})
+
+(define_expand "epilogue"
+ [(return)]
+ ""
+{
+ extern void ft32_expand_epilogue();
+ ft32_expand_epilogue ();
+ DONE;
+})
+
+(define_insn "link"
+ [
+;; (set (mem:SI (pre_dec:SI (reg:SI SP_REG)))
+;; (reg:SI FP_REG))
+ (set (match_operand:SI 0)
+ (reg:SI SP_REG))
+ (set (reg:SI SP_REG)
+ (plus:SI (reg:SI SP_REG)
+ (match_operand:SI 1 "general_operand" "L")))]
+ ""
+ "link %0,%m1"
+)
+
+(define_insn "unlink"
+ [(set (reg:SI FP_REG)
+ (mem:SI (reg:SI FP_REG)))
+ (set (reg:SI SP_REG)
+ (plus:SI (reg:SI FP_REG)
+ (const_int 4)))]
+ ""
+ "unlink $r29"
+)
+
+(define_insn "returner"
+ [(return)]
+ "reload_completed"
+ "return")
+
+(define_insn "returner24"
+ [
+ (set (reg:SI SP_REG)
+ (plus:SI
+ (reg:SI SP_REG)
+ (const_int 24)))
+ (return)]
+ ""
+ "jmp __epilog24")
--- /dev/null
+; Options for the FT32 port of the compiler.
+
+; Copyright (C) 2015 Free Software Foundation, Inc.
+;
+; This file is part of GCC.
+;
+; GCC is free software; you can redistribute it and/or modify it under
+; the terms of the GNU General Public License as published by the Free
+; Software Foundation; either version 3, or (at your option) any later
+; version.
+;
+; GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+; WARRANTY; without even the implied warranty of MERCHANTABILITY or
+; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+; for more details.
+;
+; You should have received a copy of the GNU General Public License
+; along with GCC; see the file COPYING3. If not see
+; <http://www.gnu.org/licenses/>.
+
+msim
+Target Report Mask(SIM)
+target the software simulator
+
+mlra
+Target Report Var(ft32_lra_flag) Init(0) Save
+Use LRA instead of reload
--- /dev/null
+;; Predicate definitions for FT32
+;; Copyright (C) 2015 Free Software Foundation, Inc.
+;; Contributed by FTDI <support@ftdi.com>
+
+;; This file is part of GCC.
+
+;; GCC is free software; you can redistribute it and/or modify it
+;; under the terms of the GNU General Public License as published
+;; by the Free Software Foundation; either version 3, or (at your
+;; option) any later version.
+
+;; GCC is distributed in the hope that it will be useful, but WITHOUT
+;; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+;; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+;; License for more details.
+
+;; You should have received a copy of the GNU General Public License
+;; along with GCC; see the file COPYING3. If not see
+;; <http://www.gnu.org/licenses/>.
+
+;; -------------------------------------------------------------------------
+;; Predicates
+;; -------------------------------------------------------------------------
+
+;; Nonzero if OP can be source of a simple move operation.
+
+(define_predicate "ft32_general_movsrc_operand"
+ (match_code "mem,const_int,reg,subreg,symbol_ref,label_ref,const")
+{
+ /* Any (MEM LABEL_REF) is OK. That is a pc-relative load. */
+ if (MEM_P (op) && GET_CODE (XEXP (op, 0)) == LABEL_REF)
+ return 1;
+
+ if (MEM_P (op)
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == REG
+ && GET_CODE (XEXP (XEXP (op, 0), 1)) == CONST)
+ return 1;
+
+ return general_operand (op, mode);
+})
+
+(define_predicate "ft32_general_movdst_operand"
+ (match_code "mem,const_int,reg,subreg,symbol_ref,label_ref,const")
+{
+ if (MEM_P (op)
+ && GET_CODE (XEXP (op, 0)) == PLUS
+ && GET_CODE (XEXP (XEXP (op, 0), 0)) == REG
+ && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
+ return 1;
+ if (MEM_P (op) && GET_CODE (XEXP (op, 0)) == SYMBOL_REF)
+ return 1;
+ return REG_P(op) ||
+ (MEM_P(op) && REG_P(XEXP (op, 0)));
+})
+
+(define_predicate "reg_indirect"
+ (match_code "mem")
+{
+ return (MEM_P(op) && REG_P(XEXP (op, 0)));
+})
+
+;; Nonzero if OP can be an operand to an add/inc/dec instruction.
+
+(define_predicate "ft32_add_operand"
+ (ior (match_code "reg")
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -32768, 32767)"))))
+
+;; Nonzero if OP can be an operand to an sub/dec instruction.
+
+(define_predicate "ft32_sub_operand"
+ (ior (match_code "reg")
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -32767, 32768)"))))
+
+
+(define_predicate "ft32_rimm_operand"
+ (ior (match_code "reg")
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), -512, 511)"))))
+
+(define_predicate "ft32_bwidth_operand"
+ (and (match_code "const_int")
+ (match_test "IN_RANGE (INTVAL (op), 1, 16)")))
--- /dev/null
+# Target Makefile Fragment for ft32
+# Copyright (C) 2015 Free Software Foundation, Inc.
+# Contributed by FTDI <support@ftdi.com>
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published
+# by the Free Software Foundation; either version 3, or (at your
+# option) any later version.
+#
+# GCC is distributed in the hope that it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
+# License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
@item
Don Bowman for mips-vxworks contributions.
+@item
+James Bowman for the FT32 port.
+
@item
Dave Brolley for work on cpplib and Chill.
is required for java) may not configure properly on FreeBSD prior to
the FreeBSD 7.0 release with GNU binutils after 2.16.1.
+@html
+<hr />
+@end html
+@anchor{ft32-x-elf}
+@heading ft32-*-elf
+The FT32 processor.
+This configuration is intended for embedded systems.
+
@html
<hr />
@end html
@emph{FR30 Options}
@gccoptlist{-msmall-model -mno-lsim}
+@emph{FT32 Options}
+@gccoptlist{-msim -mlra}
+
@emph{FRV Options}
@gccoptlist{-mgpr-32 -mgpr-64 -mfpr-32 -mfpr-64 @gol
-mhard-float -msoft-float @gol
* Darwin Options::
* DEC Alpha Options::
* FR30 Options::
+* FT32 Options::
* FRV Options::
* GNU/Linux Options::
* H8/300 Options::
@end table
+@node FT32 Options
+@subsection FT32 Options
+@cindex FT32 Options
+
+These options are defined specifically for the FT32 port.
+
+@table @gcctabopt
+
+@item -msim
+@opindex msim
+Specifies that the program will be run on the simulator. This causes
+an alternate runtime startup and library to be linked.
+You must not use this option when generating programs that will run on
+real hardware; you must provide your own runtime library for whatever
+I/O functions are needed.
+
+@item -mlra
+@opindex mlra
+Enable Local Register Allocation. This is still experimental for FT32,
+so by default the compiler uses standard reload.
+
+@end table
+
@node FRV Options
@subsection FRV Options
@cindex FRV Options
@end table
+@item FT32---@file{config/ft32/constraints.md}
+@table @code
+@item A
+An absolute address
+
+@item B
+An offset address
+
+@item W
+A register indirect memory operand
+
+@item e
+An offset address.
+
+@item f
+An offset address.
+
+@item O
+The constant zero or one
+
+@item I
+A 16-bit signed constant (@minus{}32768 @dots{} 32767)
+
+@item w
+A bitfield mask suitable for bext or bins
+
+@item x
+An inverted bitfield mask suitable for bext or bins
+
+@item L
+A 16-bit unsigned constant, multiple of 4 (0 @dots{} 65532)
+
+@item S
+A 20-bit signed constant (@minus{}524288 @dots{} 524287)
+
+@item b
+A constant for a bitfield width (1 @dots{} 16)
+
+@item KA
+A 10-bit signed constant (@minus{}512 @dots{} 511)
+
+@end table
+
@item Hewlett-Packard PA-RISC---@file{config/pa/pa.h}
@table @code
@item a
;;
frv*) cpu_type=frv
;;
+ft32*) cpu_type=ft32
+ ;;
moxie*) cpu_type=moxie
;;
i[34567]86-*-*)
tmake_file="$tmake_file frv/t-frv frv/t-linux t-fdpbit"
tm_file="$tm_file frv/elf-lib.h frv/frv-abi.h"
;;
+ft32-*-elf)
+ tmake_file="ft32/t-ft32 t-softfp-sfdf t-softfp-excl t-softfp"
+ extra_parts="$extra_parts crti.o crti-hw.o crtn.o"
+ ;;
h8300-*-rtems*)
tmake_file="$tmake_file h8300/t-h8300 t-fpbit"
tm_file="$tm_file h8300/h8300-lib.h"
--- /dev/null
+.equ SYS_REGMSC0CFG_B3 , 0x1001b
+.equ SYS_REGIRQCTL_B3 , 0x100e3
+
+.global _start
+_start:
+# START Interrupt Vector Table [[
+ jmp 0x3fffc # RESET Vector
+ jmp _watchdog_isr # WATCHDOG Vector # TODO: Change me to reset the chip proper
+ jmp interrupt_0
+ jmp interrupt_1
+ jmp interrupt_2
+ jmp interrupt_3
+ jmp interrupt_4
+ jmp interrupt_5
+ jmp interrupt_6
+ jmp interrupt_7
+ jmp interrupt_8
+ jmp interrupt_9
+ jmp interrupt_10
+ jmp interrupt_11
+ jmp interrupt_12
+ jmp interrupt_13
+ jmp interrupt_14
+ jmp interrupt_15
+ jmp interrupt_16
+ jmp interrupt_17
+ jmp interrupt_18
+ jmp interrupt_19
+ jmp interrupt_20
+ jmp interrupt_21
+ jmp interrupt_22
+ jmp interrupt_23
+ jmp interrupt_24
+ jmp interrupt_25
+ jmp interrupt_26
+ jmp interrupt_27
+ jmp interrupt_28
+ jmp interrupt_29
+ jmp interrupt_30
+ jmp interrupt_31
+ jmp 0x3fff8
+# ]] END Interrupt Vector Table
+
+codestart:
+ jmp init
+
+
+.global _exithook
+_exithook: # Debugger uses '_exithook' at 0x90 to catch program exit
+ return
+
+init:
+ # Disable all interrupts
+ ldk $r0,0x80
+ sta.b 0x100e3,$r0
+
+ # Reset all peripherals
+ # lda.l $r0, 0x10018
+ # bins.l $r0, $r0, 0x23F # Set bit 31
+ # sta.l 0x10018, $r0
+
+ # Initialize DATA by copying from program memory
+ ldk.l $r0,__data_load_start
+ ldk.l $r1,__data_load_end
+ ldk.l $r2,0 # Will use __data after binutils patch
+
+ jmp .dscopy
+.dsloop:
+ # Copy PM[$r0] to RAM $r2
+ lpmi.l $r3,$r0,0
+ sti.l $r2,0,$r3
+ add.l $r0,$r0,4
+ add.l $r2,$r2,4
+.dscopy:
+ cmp.l $r0,$r1
+ jmpc lt,.dsloop
+
+ # Zero BSS
+ ldk.l $r0,_bss_start
+ ldk.l $r2,_end
+ sub.l $r2,$r2,$r0
+ ldk.l $r1,0
+ memset.l $r0,$r1,$r2
+
+ sub.l $sp,$sp,24 # Space for the caller argument frame
+ call main
+
+.equ EXITEXIT , 0x1fffc
+
+.global _exit
+_exit:
+ sta.l EXITEXIT,$r0 # simulator end of test
+ jmp _exithook
+
+_watchdog_isr:
+ ldk.l $sp, 0x80FFFF # Reset the stack pointer so it doesn't grow to a huge size
+ jmp 0
+
+# Macro to construct the interrupt stub code.
+# it just saves r0, loads r0 with the int vector
+# and branches to interrupt_common.
+
+.macro inth i=0
+interrupt_\i:
+ push $r0 # {
+ lda $r0,(vector_table + 4 * \i)
+ jmp interrupt_common
+.endm
+
+ inth 0
+ inth 1
+ inth 2
+ inth 3
+ inth 4
+ inth 5
+ inth 6
+ inth 7
+ inth 8
+ inth 9
+ inth 10
+ inth 11
+ inth 12
+ inth 13
+ inth 14
+ inth 15
+ inth 16
+ inth 17
+ inth 18
+ inth 19
+ inth 20
+ inth 21
+ inth 22
+ inth 23
+ inth 24
+ inth 25
+ inth 26
+ inth 27
+ inth 28
+ inth 29
+ inth 30
+ inth 31
+ inth 32
+
+ # On entry: r0, already saved, holds the handler function
+interrupt_common:
+ push $r1 # {
+ push $r2 # {
+ push $r3 # {
+ push $r4 # {
+ push $r5 # {
+ push $r6 # {
+ push $r7 # {
+ push $r8 # {
+ push $r9 # {
+ push $r10 # {
+ push $r11 # {
+ push $r12 # {
+ push $cc # {
+
+ calli $r0
+
+ pop $cc # }
+ pop $r12 # }
+ pop $r11 # }
+ pop $r10 # }
+ pop $r9 # }
+ pop $r8 # }
+ pop $r7 # }
+ pop $r6 # }
+ pop $r5 # }
+ pop $r4 # }
+ pop $r3 # }
+ pop $r2 # }
+ pop $r1 # }
+ pop $r0 # } matching push in interrupt_0-31 above
+ reti
+
+ # Null function for unassigned interrupt to point at
+.global nullvector
+nullvector:
+ return
+
+.section .data
+.global vector_table
+vector_table:
+ .rept 33
+ .long nullvector
+ .endr
+
+
+.section .text
+.global __gxx_personality_sj0
+__gxx_personality_sj0:
--- /dev/null
+# crti.S for FT32
+#
+# Copyright (C) 2009-2013 Free Software Foundation, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just make a stack frame for the contents of the .fini and
+# .init sections. Users may put any desired instructions in those
+# sections.
+
+ .file "crti.S"
+
+ .section ".init"
+ .global _init
+ .type _init, @function
+ .p2align 2
+_init:
+
+ .section ".fini"
+ .global _fini
+ .type _fini,@function
+ .p2align 2
+_fini:
--- /dev/null
+# crtn.S for FT32
+#
+# Copyright (C) 2009-2013 Free Software Foundation, Inc.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+# This file just makes sure that the .fini and .init sections do in
+# fact return. Users may put any desired instructions in those sections.
+# This file is the last thing linked into any executable.
+
+ .file "crtn.S"
+
+ .section ".init"
+ return
+
+ .section ".fini"
+ return
--- /dev/null
+ .macro e r=0
+ .global __epilog_$r\r
+__epilog_$r\r:
+ pop $r\r
+ .endm
+
+ e 28
+ e 27
+ e 26
+ e 25
+ e 24
+ e 23
+ e 22
+ e 21
+ e 20
+ e 19
+ e 18
+ e 17
+ e 16
+ e 15
+ e 14
+ e 13
+ return
+
+ .global __epilog24
+__epilog24:
+ add $sp,$sp,24
+ return
+
+ .macro f r=0
+ .global __epilog24_$r\r
+__epilog24_$r\r:
+ add $sp,$sp,24
+ jmp __epilog_$r\r
+ .endm
+
+ f 13
+ f 14
+ f 15
+ f 16
+ f 17
+ f 18
+ f 19
+ f 20
+ f 21
+ f 22
+ f 23
+ f 24
+ f 25
+ f 26
+ f 27
+ f 28
--- /dev/null
+# ieee754 sf routines for FT32
+
+/* Copyright (C) 1995-2014 Free Software Foundation, Inc.
+
+This file is free software; you can redistribute it and/or modify it
+under the terms of the GNU General Public License as published by the
+Free Software Foundation; either version 3, or (at your option) any
+later version.
+
+This file is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+General Public License for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+# See http://www.ens-lyon.fr/LIP/Pub/Rapports/PhD/PhD2006/PhD2006-02.pdf
+# for implementation details of all except division which is detailed below
+#
+
+// .global __cmpsf2_
+
+nan: .long 0x7FFFFFFF # also abs mask
+inf: .long 0x7F800000
+sign_mask: .long 0x80000000
+m_mask: .long 0x007FFFFF
+exp_bias: .long 127
+edge_case: .long 0x00FFFFFF
+smallest_norm: .long 0x00800000 # implicit bit
+high_FF: .long 0xFF000000
+high_uint: .long 0xFFFFFFFF
+
+# Supply a few 'missing' instructions
+
+# not
+.macro not rd,r1
+ xor \rd,\r1,-1
+.endm
+
+# negate
+.macro neg x
+ not \x, \x
+ add \x, \x, 1
+.endm
+
+# set $cc from the result of "ashl reg,dist"
+.macro ashlcc reg,dist
+ .long 0x5de04008 | (\reg << 15) | (\dist << 4)
+.endm
+
+
+# converts an unsigned number x to a signed rep based on the bits in sign
+# sign should be 0x00000000 or 0xffffffff.
+.macro to_signed x, sign
+ add \x,\x,\sign # conditionally decrement x
+ xor \x,\x,\sign # conditionally complement x
+.endm
+
+
+.macro ld32 r,v
+ ldk \r,(\v>>10)
+ ldl \r,\r,(\v & 1023)
+.endm
+
+# calculate trailing zero count in x, also uses scr.
+# Using Seal's algorithm
+.macro ntz x, scr
+ not \scr, \x
+ add \scr, \scr, 1
+ and \x, \x, \scr
+ ashl \scr, \x, 4
+ add \x, \scr, \x
+ ashl \scr, \x, 6
+ add \x, \scr, \x
+ ashl \scr, \x, 16
+ sub \x, \scr, \x
+ lshr \x, \x, 26
+ ldk \scr, ntz_table
+ add \x, \x, \scr
+ lpmi.b \x, \x, 0
+.endm
+
+ntz_table:
+ .byte 32,0,1,12,2,6,0,13,3,0,7,0,0,0,0,14
+ .byte 10,4,0,0,8,0,0,25,0,0,0,0,0,21,27,15
+ .byte 31,11,5,0,0,0,0,0,9,0,0,24,0,0,20,26
+ .byte 30,0,0,0,0,23,0,19,29,0,22,18,28,17,16,0
+
+# calculate leading zero count
+.macro nlz x, scr
+ flip \x, \x, 31
+ ntz \x, \scr
+.endm
+
+
+# Round 26 bit mantissa to nearest
+# | 23 bits frac | G | R | S |
+.macro round m, s1, s2
+ ldk \s1,0xc8
+ and \s2,\m,7
+ lshr \s1,\s1,\s2
+ and \s1,\s1,1
+ lshr \m,\m,2
+ add \m,\m,\s1
+.endm
+
+# If NZ, set the LSB of reg
+.macro sticky reg
+ jmpc z,1f
+ or \reg,\reg,1 # set the sticky bit to 1
+1:
+.endm
+
+##########################################################################
+##########################################################################
+## addition & subtraction
+
+#if defined(L_subsf3) || defined(L_addsub_sf)
+.global __subsf3
+__subsf3:
+ # this is subtraction, so we just change the sign of r1
+ lpm $r2,sign_mask
+ xor $r1,$r1,$r2
+ jmp __addsf3
+#endif
+
+#if defined(L_addsf3) || defined(L_addsub_sf)
+.global __addsf3
+__addsf3:
+ # x in $r0, y in $r1, result z in $r0 --||| 100 instructions +/- |||--
+ # unpack e, calc d
+ bextu $r2,$r0,(8<<5)|23 # ex in r2
+ bextu $r3,$r1,(8<<5)|23 # ey in r3
+ sub $r5,$r2,$r3 # d = ex - ey
+
+ # Special values are 0x00 and 0xff in ex and ey.
+ # If (ex&ey) != 0 or (xy|ey)=255 then there may be
+ # a special value.
+ tst $r2,$r3
+ jmpc nz,1f
+ jmp slow
+1: or $r4,$r2,$r3
+ cmp $r4,255
+ jmpc nz,no_special_vals
+slow:
+ # Check for early exit
+ cmp $r2,0
+ jmpc z,test_if_not_255
+ cmp $r3,0
+ jmpc nz,no_early_exit
+test_if_not_255:
+ cmp $r2,255
+ jmpc z,no_early_exit
+ cmp $r3,255
+ jmpc z,no_early_exit
+ or $r6,$r2,$r3
+ cmp $r6,0
+ jmpc nz,was_not_zero
+ and $r0,$r0,$r1
+ lpm $r1,sign_mask
+ and $r0,$r0,$r1
+ return
+was_not_zero:
+ cmp $r2,0
+ jmpc nz,ret_x
+ move $r0,$r1
+ return
+ret_x:
+ return
+no_early_exit:
+ # setup to test for special values
+ sub $r6,$r2,1
+ and $r6,$r6,0xFE
+ sub $r7,$r3,1
+ and $r7,$r7,0xFE
+ # test for special values
+ cmp $r6,$r7
+ jmpc gte,ex_spec_is_gte
+ move $r6,$r7
+ex_spec_is_gte:
+ cmp $r6,0xFE
+ jmpc nz,no_special_vals
+ cmp $r5,0
+ jmpc ns,d_gte_0
+ cmp $r3,0xFF
+ jmpc z,ret_y
+ cmp $r2,0
+ jmpc z,ret_y
+ret_y:
+ move $r0,$r1
+ return
+d_gte_0:
+ cmp $r5,0
+ jmpc z,d_is_0
+ cmp $r2,0xFF
+ jmpc z,ret_x
+ cmp $r3,0
+ jmpc z,ret_x
+d_is_0:
+ cmp $r2,0xFF
+ jmpc nz,no_special_vals
+ ashl $r6,$r0,9 # clear all except x frac
+ ashl $r7,$r1,9 # clear all except y frac
+ or $r6,$r6,$r7
+ cmp $r6,0
+ jmpc nz,ret_nan
+ lshr $r4,$r0,31 # sx in r4
+ lshr $r5,$r1,31 # sy in r4
+ cmp $r4,$r5
+ jmpc nz,ret_nan
+ return
+ret_nan:
+ lpm $r0,nan
+ return
+no_special_vals:
+ ldk $r8,(1<<10)|(9<<5)|26 # setup implicit bit and mask for e
+ #----------------------
+ ashr $r4,$r0,31 # sx in r4
+ ashl $r0,$r0,3 # shift mx 3 for GRS bits
+ bins $r0,$r0,$r8 # clear sx, ex and add implicit bit mx
+ # change mx to signed mantissa
+ to_signed $r0,$r4
+ #----------------------
+ ashr $r4,$r1,31 # sy in r4
+ ashl $r1,$r1,3 # shift my 3 for GRS bits
+ bins $r1,$r1,$r8 # clear sy, ey and add implicit bit my
+ # change my to signed mantissa
+ to_signed $r1,$r4
+ #----------------------
+ # test if we swap ms based on d sign
+ cmp $r5,0
+ jmpc gte,noswap
+ # swap mx & my
+ xor $r0,$r0,$r1
+ xor $r1,$r0,$r1
+ xor $r0,$r0,$r1
+ # d positive means that ex>=ey, so ez = ex
+ # d negative means that ey>ex, so ez = ey
+ move $r2,$r3
+ # |d|
+ neg $r5
+noswap:
+ # now $r2 = ez = max(ex,ey)
+ cmp $r5,26 # max necessary alignment shift is 26
+ jmpc lt,under_26
+ ldk $r5,26
+under_26:
+ ldk $r7,-1
+ ashl $r7,$r7,$r5 # create inverse of mask for test of S bit value in discarded my
+ not $r7,$r7
+ tst $r1,$r7 # determine value of sticky bit
+ # shift my >> |d|
+ ashr $r1,$r1,$r5
+ sticky $r1
+
+ # add ms
+ add $r0,$r0,$r1
+
+ # $r4 = sign(mx), mx = |mx|
+ ashr $r4,$r0,31
+ xor $r0,$r0,$r4
+ sub $r0,$r0,$r4
+
+ # realign mantissa using leading zero count
+ flip $r7,$r0,31
+ ntz $r7,$r8
+ ashl $r0,$r0,$r7
+ btst $r0,(6<<5)|0 # test low bits for sticky again
+ lshr $r0,$r0,6
+ sticky $r0
+
+ # update exponent
+ add $r2,$r2,5
+ sub $r2,$r2,$r7
+
+ # Round to nearest
+ round $r0,$r7,$r6
+
+ # detect_exp_update
+ lshr $r6,$r0,24
+ add $r2,$r2,$r6
+
+ # final tests
+ # mz == 0? if so, we just bail with a +0
+ cmp $r0,0
+ jmpc nz,msum_not_zero
+ ldk $r0,0
+ return
+msum_not_zero:
+ # Combined check that (1 <= ez <= 254)
+ sub $r3,$r2,1
+ cmp $r3,254
+ jmpc b,no_special_ret
+ # underflow?
+ cmp $r2,0
+ jmpc gt,no_under
+ ldk $r0,0
+ jmp pack_sz
+no_under:
+ # overflow?
+ cmp $r2,255
+ jmpc lt,no_special_ret
+ ldk $r0,0x7F8
+ ashl $r0,$r0,20
+ jmp pack_sz
+no_special_ret:
+ # Pack ez
+ ldl $r2,$r2,(8<<5)|23
+ bins $r0,$r0,$r2 # width = 8, pos = 23 pack ez
+ # Pack sz
+pack_sz:
+ ldl $r4,$r4,(1<<5)|31
+ bins $r0,$r0,$r4 # width = 1, pos = 31 set sz to sy
+ return
+#endif
+
+##########################################################################
+##########################################################################
+## multiplication
+
+#ifdef L_mulsf3
+.global __mulsf3
+__mulsf3:
+ # x in $r0, y in $r1, result z in $r0 --||| 61 instructions +/- |||--
+
+ # unpack e
+ bextu $r2,$r0,(8<<5)|23 # ex in r2
+ bextu $r3,$r1,(8<<5)|23 # ey in r3
+ # calc result sign
+ xor $r4,$r0,$r1
+ lpm $r5,sign_mask
+ and $r4,$r4,$r5 # sz in r4
+
+ # unpack m add implicit bit
+ ldk $r5,(1<<10)|(9<<5)|23 # setup implicit bit and mask for e
+ #----------------------
+ bins $r0,$r0,$r5 # clear sx, ex and add implicit bit mx
+
+ sub $r6,$r2,1
+ cmp $r6,254
+ jmpc b,1f
+ jmp slow_mul
+1: sub $r6,$r3,1
+ cmp $r6,254
+ jmpc b,no_special_vals_mul
+
+slow_mul:
+ # Check for early exit
+ cmp $r2,0
+ jmpc z,op_is_zero
+ cmp $r3,0
+ jmpc nz,no_early_exit_mul
+op_is_zero:
+ cmp $r2,255
+ jmpc z,no_early_exit_mul
+ cmp $r3,255
+ jmpc z,no_early_exit_mul
+ move $r0,$r4
+ return
+no_early_exit_mul:
+ # setup to test for special values
+ sub $r6,$r2,1
+ and $r6,$r6,0xFE
+ sub $r7,$r3,1
+ and $r7,$r7,0xFE
+ # test for special values
+ cmp $r6,$r7
+ jmpc gte,ex_spec_is_gte_ey_mul
+ move $r6,$r7
+ex_spec_is_gte_ey_mul:
+ cmp $r6,0xFE
+ jmpc nz,no_special_vals_mul
+ cmp $r2,0xFF
+ jmpc nz,ex_not_FF_mul
+ ashl $r6,$r0,9
+ cmp $r6,0
+ jmpc nz,ret_nan
+ cmp $r3,0
+ jmpc z,ret_nan
+ ashl $r6,$r1,1
+ lpm $r7,high_FF
+ cmp $r6,$r7
+ jmpc a,ret_nan
+ cmp $r6,0
+ jmpc z,ret_nan
+ # infinity
+ lpm $r0,inf
+ or $r0,$r0,$r4
+ return
+ex_not_FF_mul:
+ cmp $r2,0
+ jmpc nz,no_nan_mul
+ cmp $r3,0xFF
+ jmpc nz,no_nan_mul
+ jmp ret_nan
+no_nan_mul:
+ lpm $r0,nan
+ and $r0,$r0,$r1
+ or $r0,$r0,$r4
+ return
+
+ret_nan:
+ lpm $r0,nan
+ return
+
+no_special_vals_mul:
+ bins $r1,$r1,$r5 # clear sy, ey and add implicit bit my
+ # calc ez
+ add $r3,$r2,$r3
+ sub $r3,$r3,127 # ez in r3
+
+ # (r1,r2) = R0 * R1
+ mul $r2,$r0,$r1
+ muluh $r1,$r0,$r1
+
+ btst $r1,(1<<5)|15 # XXX use jmpx
+ jmpc z,mul_z0
+
+ # mz is 1X.XX...X
+ # 48-bit product is in (r1,r2). The low 22 bits of r2
+ # are discarded.
+ lshr $r0,$r2,22
+ ashl $r1,$r1,10
+ or $r0,$r0,$r1 # r0 = (r1,r2) >> 22
+ ashlcc 2,10
+ sticky $r0
+ add $r3,$r3,1 # bump exponent
+
+ # Round to nearest
+ round $r0, $r1, $r2
+ lshr $r6,$r0,24
+ add $r3,$r3,$r6
+
+ sub $r6,$r3,1
+ cmp $r6,254
+ jmpc b,no_special_ret_mul
+
+special_ret_mul:
+ # When the final exponent <= 0, result is flushed to 0 except
+ # for the border case 0x00FFFFFF which is promoted to next higher
+ # FP no., that is, the smallest "normalized" number.
+ cmp $r3,0
+ jmpc gt,exp_normal
+ # Pack ez
+ ldl $r3,$r3,(8<<5)|23
+ bins $r0,$r0,$r3 # width = 8, pos = 23 pack ez
+ lpm $r2,edge_case
+ cmp $r0,$r2
+ jmpc nz,no_edge_case
+ lpm $r0,smallest_norm
+ jmp pack_sz_mul
+no_edge_case:
+ ldk $r0,0
+ jmp pack_sz_mul
+exp_normal:
+ # overflow?
+ cmp $r3,255
+ jmpc lt,no_special_ret_mul
+ ldk $r0,0x7F8
+ ashl $r0,$r0,20
+ jmp pack_sz_mul
+no_special_ret_mul:
+ # Pack ez
+ ldl $r3,$r3,(8<<5)|23
+ bins $r0,$r0,$r3 # width = 8, pos = 23 pack ez
+ # Pack sz
+pack_sz_mul:
+ or $r0,$r0,$r4
+ return
+
+mul_z0:
+ # mz is 0X.XX...X
+ # 48-bit product is in (r1,r2). The low 21 bits of r2
+ # are discarded.
+ lshr $r0,$r2,21
+ ashl $r1,$r1,11
+ or $r0,$r0,$r1 # r0 = (r1,r2) >> 22
+ ashlcc 2,11
+ sticky $r0
+ # Round to nearest
+ round $r0, $r1, $r2
+ lshr $r6,$r0,24
+ add $r3,$r3,$r6
+
+ sub $r6,$r3,1
+ cmp $r6,254
+ jmpc b,no_special_ret_mul
+ jmp special_ret_mul
+#endif
+
+##########################################################################
+##########################################################################
+## division
+
+## See http://perso.ens-lyon.fr/gilles.villard/BIBLIOGRAPHIE/PDF/arith19.pdf
+## for implementation details
+
+
+dc_1: .long 0xffffe7d7
+dc_2: .long 0xffffffe8
+dc_3: .long 0xffbad86f
+dc_4: .long 0xfffbece7
+dc_5: .long 0xf3672b51
+dc_6: .long 0xfd9d3a3e
+dc_7: .long 0x9a3c4390
+dc_8: .long 0xd4d2ce9b
+dc_9: .long 0x1bba92b3
+dc_10: .long 0x525a1a8b
+dc_11: .long 0x0452b1bf
+dc_12: .long 0xFFFFFFC0
+spec_val_test: .long 0x7F7FFFFF
+
+
+
+#ifdef L_divsf3
+.global __divsf3
+__divsf3:
+ push $r13
+ # x in $r0, y in $r1, result z in $r0 --||| 73 instructions +/- |||-
+ bextu $r10,$r0,(8<<5)|23 # ex in r2
+ bextu $r11,$r1,(8<<5)|23 # ey in r3
+ lpm $r6, m_mask
+ and $r2, $r0, $r6 # mx
+ and $r3, $r1, $r6 # my
+ cmp $r2,$r3
+ bextu $r2,$r30,(1<<5)|4 # c = Tx >= T;
+ ashl $r3,$r3,9 # T = X << 9;
+ lpm $r13, sign_mask
+ ashl $r4,$r0,8 # X8 = X << 8;
+ or $r4,$r4,$r13 # Mx = X8 | 0x80000000;
+ lshr $r5,$r4,$r2 # S = Mx >> c;
+ # calc D
+ sub $r2, $r11, $r2
+ add $r12, $r10, 125
+ sub $r2, $r12, $r2 # int D = (Ex + 125) - (Ey - c);
+ # calc result sign
+ xor $r12,$r0,$r1
+ and $r12,$r12,$r13 # Sr = ( X ˆ Y ) & 0x80000000;
+ # check early exit
+ cmp $r10, 0
+ jmpc nz, no_early_ret_dev
+ cmp $r11, 0
+ jmpc z, no_early_ret_dev
+ cmp $r11, 255
+ jmpc z, no_early_ret_dev
+ move $r0, $r12
+ pop $r13
+ return
+no_early_ret_dev:
+ # setup to test for special values
+ sub $r8,$r10,1
+ and $r8,$r8,0xFE
+ sub $r9,$r11,1
+ and $r9,$r9,0xFE
+ # test for special values
+ cmp $r8, $r9
+ jmpc gte, absXm1_gte_absYm1
+ move $r8, $r9
+absXm1_gte_absYm1:
+ cmp $r8, 0xFE
+ jmpc nz, no_spec_ret_div
+ cmp $r10, 0xFF
+ jmpc nz, ex_not_FF_div
+ lpm $r6, m_mask
+ and $r2, $r0, $r6 # mx
+ cmp $r2, 0
+ jmpc nz, ret_nan_div
+ cmp $r11, 0xFF
+ jmpc z, ret_nan_div
+ jmp ret_inf_div
+ex_not_FF_div:
+ cmp $r11, 0xFF
+ jmpc nz, ey_not_FF_div
+ ashl $r13, $r1, 9
+ cmp $r13, 0
+ jmpc nz, ret_nan_div
+ move $r0, $r12
+ pop $r13
+ return
+ey_not_FF_div:
+ or $r10, $r10, $r11
+ cmp $r10, 0
+ jmpc z, ret_nan_div
+ret_inf_div:
+ lpm $r6, inf
+ move $r0, $r6
+ or $r0, $r0, $r12
+ pop $r13
+ return
+ret_nan_div:
+ lpm $r0, nan
+ pop $r13
+ return
+
+no_spec_ret_div:
+# check for overflow
+ ldk $r6, 0xFE
+ cmp $r2, $r6
+ jmpc lt, no_overflow_div
+ lpm $r6, inf
+ or $r0, $r12, $r6
+ pop $r13
+ return
+no_overflow_div:
+# check for underflow
+ cmp $r2, 0
+ jmpc ns, no_underflow_div
+ xnor $r6, $r6, $r6 # -1
+ cmp $r2, $r6
+ jmpc nz, ret_sr_div
+ ldk $r7, 0xFF
+ xor $r6, $r6, $r7 # 0xFF ^ -1 = 0xFFFFFF00
+ cmp $r4, $r6
+ jmpc nz, ret_sr_div
+ lpm $r6, sign_mask
+ cmp $r4, $r6
+ jmpc nz, ret_sr_div
+ lshr $r0, $r6, 8
+ or $r0, $r0, $r12
+ pop $r13
+ return
+ret_sr_div:
+ move $r0, $r12
+ pop $r13
+ return
+no_underflow_div:
+ lpm $r6, dc_1
+ muluh $r7, $r3, $r6 # i0 = mul( T , 0xffffe7d7 );
+ lpm $r6, dc_2
+ sub $r7, $r6, $r7 # i1 = 0xffffffe8 - i0;
+ muluh $r7, $r5, $r7 # i2 = mul( S , i1 );
+ add $r7, $r7, 0x20 # i3 = 0x00000020 + i2;
+ muluh $r8, $r3, $r3 # i4 = mul( T , T );
+ muluh $r9, $r5, $r8 # i5 = mul( S , i4 );
+ lpm $r6, dc_3
+ muluh $r10, $r3, $r6 # i6 = mul( T , 0xffbad86f );
+ lpm $r6, dc_4
+ sub $r10, $r6, $r10 # i7 = 0xfffbece7 - i6;
+ muluh $r10, $r9, $r10 # i8 = mul( i5 , i7 );
+ add $r7, $r7, $r10 # i9 = i3 + i8;
+ muluh $r9, $r8, $r9 # i10 = mul( i4 , i5 );
+ lpm $r6, dc_5
+ muluh $r10, $r3, $r6 # i11 = mul( T , 0xf3672b51 );
+ lpm $r6, dc_6
+ sub $r10, $r6, $r10 # i12 = 0xfd9d3a3e - i11;
+ lpm $r6, dc_7
+ muluh $r11, $r3, $r6 # i13 = mul( T , 0x9a3c4390 );
+ lpm $r6, dc_8
+ sub $r11, $r6, $r11 # i14 = 0xd4d2ce9b - i13
+ muluh $r11, $r8, $r11 # i15 = mul( i4 , i14 );
+ add $r10, $r10, $r11 # i16 = i12 + i15;
+ muluh $r10, $r9, $r10 # i17 = mul( i10 , i16 )
+ add $r7, $r7, $r10 # i18 = i9 + i17;
+ muluh $r10, $r8, $r8 # i19 = mul( i4 , i4 );
+ lpm $r6, dc_9
+ muluh $r11, $r3, $r6 # i20 = mul( T , 0x1bba92b3 );
+ lpm $r6, dc_10
+ sub $r11, $r6, $r11 # i21 = 0x525a1a8b - i20;
+ lpm $r6, dc_11
+ muluh $r8, $r8, $r6 # i22 = mul( i4 , 0x0452b1bf );
+ add $r8, $r11, $r8 # i23 = i21 + i22;
+ muluh $r8, $r10, $r8 # i24 = mul( i19 , i23 );
+ muluh $r8, $r9, $r8 # i25 = mul( i10 , i24 );
+ add $r3, $r7, $r8 # V = i18 + i25;
+# W = V & 0xFFFFFFC0;
+ lpm $r6, dc_12
+ and $r3, $r3, $r6 # W
+# round and pack final values
+ ashl $r0, $r2, 23 # pack D
+ or $r0, $r0, $r12 # pack Sr
+ ashl $r12, $r1, 8
+ or $r12, $r12, $r13 # My
+ muluh $r10, $r3, $r12
+ lshr $r11, $r5, 1
+ cmp $r10, $r11
+ jmpc gte, div_ret_1
+ add $r3, $r3, 0x40
+div_ret_1:
+ lshr $r3, $r3, 7
+ add $r0, $r0, $r3
+ pop $r13
+ return
+#endif
+
+##########################################################################
+##########################################################################
+## Negate
+
+#ifdef L_negsf
+.global __negsf
+__negsf:
+ lpm $r1, sign_mask
+ xor $r0, $r0, $r1
+ return
+#endif
+
+##########################################################################
+##########################################################################
+## float to int & unsigned int
+
+#ifdef L_fixsfsi
+.global __fixsfsi
+__fixsfsi: # 20 instructions
+ bextu $r1,$r0,(8<<5)|23 # e in r1
+ lshr $r2,$r0,31 # s in r2
+ lpm $r3, m_mask
+ and $r0,$r0,$r3 # m in r0
+ # test nan
+ cmp $r1,0xFF
+ jmpc nz, int_not_nan
+ cmp $r0,0
+ jmpc z, int_not_nan
+ ldk $r0,0
+ return
+int_not_nan:
+ # test edges
+ cmp $r1, 127
+ jmpc gte, int_not_zero # lower limit
+ ldk $r0,0
+ return
+int_not_zero:
+ cmp $r1, 158
+ jmpc lt, int_not_max # upper limit
+ lpm $r0, nan
+ cmp $r2, 0
+ jmpc z, int_positive
+ xnor $r0, $r0, 0
+ return
+int_not_max:
+ lpm $r3, smallest_norm
+ or $r0, $r0, $r3 # set implicit bit
+ sub $r1, $r1, 150
+ cmp $r1, 0
+ jmpc s, shift_right
+ ashl $r0, $r0, $r1
+ jmp set_int_sign
+shift_right:
+ xnor $r1, $r1, 0
+ add $r1, $r1, 1
+ lshr $r0, $r0, $r1
+set_int_sign:
+ cmp $r2, 0
+ jmpc z, int_positive
+ xnor $r0, $r0, 0
+ add $r0, $r0, 1
+int_positive:
+ return
+#endif
+
+#ifdef L_fixunssfsi
+.global __fixunssfsi
+__fixunssfsi: # 19 instructions
+ lshr $r2, $r0, 31 # s in r2
+ cmp $r2, 0
+ jmpc z, uint_not_neg
+ ldk $r0, 0
+ return
+uint_not_neg:
+ bextu $r1, $r0, (8<<5)|23 # e in r1
+ sub $r1, $r1, 127
+ lpm $r3, m_mask
+ and $r0, $r0, $r3 # m in r0
+ # test nan
+ cmp $r1, 0xFF
+ jmpc nz, uint_not_nan
+ cmp $r0, 0
+ jmpc z, uint_not_nan
+ ldk $r0, 0
+ return
+uint_not_nan:
+ # test edges
+ cmp $r1, 0
+ jmpc ns, uint_not_zero # lower limit
+ ldk $r0, 0
+ return
+uint_not_zero:
+ lpm $r3, smallest_norm
+ or $r0, $r0, $r3 # set implicit bit
+ cmp $r1, 23
+ jmpc lt, shift_uint_right
+ sub $r1, $r1, 23
+ ashl $r0, $r0, $r1
+ return
+shift_uint_right:
+ ldk $r3, 23
+ sub $r1, $r3, $r1
+ lshr $r0, $r0, $r1
+ return
+#endif
+
+##########################################################################
+##########################################################################
+## int & unsigned int to float
+
+
+.macro i2f x, s1, s2, s3, lbl
+ move \s1, \x
+ nlz \s1, \s2
+ cmp \s1, 8
+ jmpc s, float_round\lbl
+ sub \s2, \s1, 8
+ ashl \x, \x, \s2
+ jmp float_no_round\lbl
+float_round\lbl:
+ cmp \s1, 6
+ jmpc s, float_shift_right\lbl
+ sub \s2, \s1, 6
+ ashl \x, \x, \s2
+ jmp float_round_and_pack\lbl
+float_shift_right\lbl:
+ ldk \s2, 6
+ sub \s2, \s2, \s1
+ xnor \s3, \s3 ,\s3 # 0xFFFFFFFF
+ ashl \s3, \s3 ,\s2 # create inverse of mask for test of S bit value in discarded my
+ xnor \s3, \s3 ,0 # NOT
+ tst \x, \s3 # determine value of sticky bit
+ lshr \x, \x, \s2
+ jmpc z,float_round_and_pack\lbl
+ or \x, \x, 1 # set the sticky bit to 1
+float_round_and_pack\lbl:
+ bextu \s2, \x, (1<<5)|2 # extract low bit of m
+ or \x, \x, \s2 # or p into r
+ add \x, \x, 1
+ lshr \x, \x, 2
+ btst \x, (1<<5)|24 # test for carry from round
+ jmpc z, float_no_round\lbl
+ sub \s1, \s1, 1 # inc e for carry (actually dec nlz)
+ lshr \x, \x, 1
+float_no_round\lbl:
+ ldk \s2, 158
+ sub \s1, \s2, \s1
+ # Pack e
+ ldl \s1, \s1, (8<<5)|23
+ bins \x, \x, \s1
+.endm
+
+
+#ifdef L_floatsisf
+.global __floatsisf
+__floatsisf: # 32 instructions
+ cmp $r0, 0
+ jmpc nz, float_not_zero
+ return
+float_not_zero:
+ ashr $r1, $r0, 31 # s in r1
+ xor $r0, $r0, $r1 # cond neg
+ sub $r0, $r0, $r1
+ i2f $r0, $r2, $r3, $r4, 1
+ ldl $r1, $r1, (1<<5)|31
+ bins $r0, $r0, $r1
+ return
+#endif
+
+#ifdef L_floatunsisf
+.global __floatunsisf
+__floatunsisf: # 26 instructions
+ cmp $r0, 0
+ jmpc nz, float_not_zero2
+ return
+float_not_zero2:
+ i2f $r0, $r1, $r2, $r3, 2
+ return
+#endif
+
+##########################################################################
+##########################################################################
+## float compare
+
+
+__cmpsf2_:
+ # calc abs vals
+ lpm $r3, nan # also abs mask
+ and $r2, $r0, $r3
+ and $r3, $r1, $r3
+ # test if either abs is nan
+ lpm $r4, inf
+ cmp $r2, $r4
+ jmpc gt, cmp_is_gt
+ cmp $r3, $r4
+ jmpc gt, cmp_is_gt
+ # test if both are 0
+ or $r2, $r2, $r3
+ cmp $r2, 0
+ jmpc z, cmp_is_eq
+ # test if eq
+ cmp $r0, $r1
+ jmpc z, cmp_is_eq
+ # -- if either is pos
+ and $r2, $r0, $r1
+ cmp $r2, 0
+ jmpc s, cmp_both_neg
+ cmp $r0, $r1
+ jmpc gt, cmp_is_gt
+ # r0 < r1
+ lpm $r0, high_uint
+ return
+cmp_both_neg:
+ cmp $r0, $r1
+ jmpc lt, cmp_is_gt
+ # r0 < r1
+ lpm $r0, high_uint
+ return
+cmp_is_gt:
+ ldk $r0, 1
+ return
+cmp_is_eq:
+ ldk $r0, 0
+ return
+
+
+
+
--- /dev/null
+ .global __prolog_$r13
+__prolog_$r13:
+ exi $r13,$sp,0
+ jmpi $r13
+
+ .global __prolog_$r14
+__prolog_$r14:
+ exi $r13,$sp,0
+ push $r14
+ jmpi $r13
+
+ .global __prolog_$r15
+__prolog_$r15:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ jmpi $r13
+
+ .global __prolog_$r16
+__prolog_$r16:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ jmpi $r13
+
+ .global __prolog_$r17
+__prolog_$r17:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ jmpi $r13
+
+ .global __prolog_$r18
+__prolog_$r18:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ jmpi $r13
+
+ .global __prolog_$r19
+__prolog_$r19:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ push $r19
+ jmpi $r13
+
+ .global __prolog_$r20
+__prolog_$r20:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ push $r19
+ push $r20
+ jmpi $r13
+
+ .global __prolog_$r21
+__prolog_$r21:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ push $r19
+ push $r20
+ push $r21
+ jmpi $r13
+
+ .global __prolog_$r22
+__prolog_$r22:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ push $r19
+ push $r20
+ push $r21
+ push $r22
+ jmpi $r13
+
+ .global __prolog_$r23
+__prolog_$r23:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ push $r19
+ push $r20
+ push $r21
+ push $r22
+ push $r23
+ jmpi $r13
+
+ .global __prolog_$r24
+__prolog_$r24:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ push $r19
+ push $r20
+ push $r21
+ push $r22
+ push $r23
+ push $r24
+ jmpi $r13
+
+ .global __prolog_$r25
+__prolog_$r25:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ push $r19
+ push $r20
+ push $r21
+ push $r22
+ push $r23
+ push $r24
+ push $r25
+ jmpi $r13
+
+ .global __prolog_$r26
+__prolog_$r26:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ push $r19
+ push $r20
+ push $r21
+ push $r22
+ push $r23
+ push $r24
+ push $r25
+ push $r26
+ jmpi $r13
+
+ .global __prolog_$r27
+__prolog_$r27:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ push $r19
+ push $r20
+ push $r21
+ push $r22
+ push $r23
+ push $r24
+ push $r25
+ push $r26
+ push $r27
+ jmpi $r13
+
+ .global __prolog_$r28
+__prolog_$r28:
+ exi $r13,$sp,0
+ push $r14
+ push $r15
+ push $r16
+ push $r17
+ push $r18
+ push $r19
+ push $r20
+ push $r21
+ push $r22
+ push $r23
+ push $r24
+ push $r25
+ push $r26
+ push $r27
+ push $r28
+ jmpi $r13
--- /dev/null
+#define _FP_W_TYPE_SIZE 32
+#define _FP_W_TYPE unsigned long
+#define _FP_WS_TYPE signed long
+#define _FP_I_TYPE long
+
+/* The type of the result of a floating point comparison. This must
+ match `__libgcc_cmp_return__' in GCC for the target. */
+typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
+#define CMPtype __gcc_CMPtype
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_loop(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S ((_FP_QNANBIT_S << 1) - 1)
+#define _FP_NANFRAC_D ((_FP_QNANBIT_D << 1) - 1), -1
+#define _FP_NANFRAC_Q ((_FP_QNANBIT_Q << 1) - 1), -1, -1, -1
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_NANSIGN_Q 0
+
+#define _FP_KEEPNANFRACP 1
+#define _FP_QNANNEGATEDP 0
+
+/* Someone please check this. */
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ if ((_FP_FRAC_HIGH_RAW_##fs(X) & _FP_QNANBIT_##fs) \
+ && !(_FP_FRAC_HIGH_RAW_##fs(Y) & _FP_QNANBIT_##fs)) \
+ { \
+ R##_s = Y##_s; \
+ _FP_FRAC_COPY_##wc(R,Y); \
+ } \
+ else \
+ { \
+ R##_s = X##_s; \
+ _FP_FRAC_COPY_##wc(R,X); \
+ } \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+/* Not checked. */
+#define _FP_TININESS_AFTER_ROUNDING 0
+
+#define __LITTLE_ENDIAN 1234
+#define __BIG_ENDIAN 4321
+
+# define __BYTE_ORDER __LITTLE_ENDIAN
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+
--- /dev/null
+LIB2ADD = $(srcdir)/config/ft32/epilog.S $(srcdir)/config/ft32/prolog.S
+
+crti-hw.o: $(srcdir)/config/ft32/crti-hw.S
+ $(crt_compile) -c -x assembler-with-cpp $<