+2017-02-06 Palmer Dabbelt <palmer@dabbelt.com>
+
+ * config.host: Add RISC-V tuples.
+ * config/riscv/atomic.c: New file.
+ * config/riscv/crti.S: Likewise.
+ * config/riscv/crtn.S: Likewise.
+ * config/riscv/div.S: Likewise.
+ * config/riscv/linux-unwind.h: Likewise.
+ * config/riscv/muldi3.S: Likewise.
+ * config/riscv/multi3.S: Likewise.
+ * config/riscv/save-restore.S: Likewise.
+ * config/riscv/sfp-machine.h: Likewise.
+ * config/riscv/t-elf: Likewise.
+ * config/riscv/t-elf32: Likewise.
+ * config/riscv/t-elf64: Likewise.
+ * config/riscv/t-softfp32: Likewise.
+ * config/riscv/t-softfp64: Likewise.
+
2017-01-24 Jakub Jelinek <jakub@redhat.com>
* soft-fp/op-common.h (_FP_MUL, _FP_FMA, _FP_DIV): Add
;;
rs6000*-*-*)
;;
+riscv*-*-*)
+ cpu_type=riscv
+ ;;
sparc64*-*-*)
cpu_type=sparc
;;
tmake_file="${tmake_file} rs6000/t-ppccomm rs6000/t-crtstuff t-crtstuff-pic t-fdpbit"
extra_parts="$extra_parts crtbegin.o crtend.o crtbeginS.o crtendS.o crtbeginT.o ecrti.o ecrtn.o ncrti.o ncrtn.o"
;;
+riscv*-*-linux*)
+ tmake_file="${tmake_file} riscv/t-softfp${host_address} t-softfp riscv/t-elf riscv/t-elf${host_address}"
+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o crtendS.o crtbeginT.o"
+ md_unwind_header=riscv/linux-unwind.h
+ ;;
+riscv*-*-*)
+ tmake_file="${tmake_file} riscv/t-softfp${host_address} t-softfp riscv/t-elf riscv/t-elf${host_address}"
+ extra_parts="$extra_parts crtbegin.o crtend.o crti.o crtn.o"
+ ;;
rs6000-ibm-aix4.[3456789]* | powerpc-ibm-aix4.[3456789]*)
md_unwind_header=rs6000/aix-unwind.h
tmake_file="t-fdpbit rs6000/t-ppc64-fp rs6000/t-slibgcc-aix rs6000/t-ibm-ldouble"
--- /dev/null
+/* Legacy sub-word atomics for RISC-V.
+
+ Copyright (C) 2016-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#ifdef __riscv_atomic
+
+#include <stdbool.h>
+
+#define INVERT "not %[tmp1], %[tmp1]\n\t"
+#define DONT_INVERT ""
+
+#define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop) \
+ type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v) \
+ { \
+ unsigned long aligned_addr = ((unsigned long) p) & ~3UL; \
+ int shift = (((unsigned long) p) & 3) * 8; \
+ unsigned mask = ((1U << ((sizeof v) * 8)) - 1) << shift; \
+ unsigned old, tmp1, tmp2; \
+ \
+ asm volatile ("1:\n\t" \
+ "lr.w.aq %[old], %[mem]\n\t" \
+ #insn " %[tmp1], %[old], %[value]\n\t" \
+ invert \
+ "and %[tmp1], %[tmp1], %[mask]\n\t" \
+ "and %[tmp2], %[old], %[not_mask]\n\t" \
+ "or %[tmp2], %[tmp2], %[tmp1]\n\t" \
+ "sc.w.rl %[tmp1], %[tmp2], %[mem]\n\t" \
+ "bnez %[tmp1], 1b" \
+ : [old] "=&r" (old), \
+ [mem] "+A" (*(volatile unsigned*) aligned_addr), \
+ [tmp1] "=&r" (tmp1), \
+ [tmp2] "=&r" (tmp2) \
+ : [value] "r" (((unsigned) v) << shift), \
+ [mask] "r" (mask), \
+ [not_mask] "r" (~mask)); \
+ \
+ return (type) (old >> shift); \
+ } \
+ \
+ type __sync_ ## opname ## _and_fetch_ ## size (type *p, type v) \
+ { \
+ type o = __sync_fetch_and_ ## opname ## _ ## size (p, v); \
+ return cop; \
+ }
+
+#define GENERATE_COMPARE_AND_SWAP(type, size) \
+ type __sync_val_compare_and_swap_ ## size (type *p, type o, type n) \
+ { \
+ unsigned long aligned_addr = ((unsigned long) p) & ~3UL; \
+ int shift = (((unsigned long) p) & 3) * 8; \
+ unsigned mask = ((1U << ((sizeof o) * 8)) - 1) << shift; \
+ unsigned old, tmp1; \
+ \
+ asm volatile ("1:\n\t" \
+ "lr.w.aq %[old], %[mem]\n\t" \
+ "and %[tmp1], %[old], %[mask]\n\t" \
+ "bne %[tmp1], %[o], 1f\n\t" \
+ "and %[tmp1], %[old], %[not_mask]\n\t" \
+ "or %[tmp1], %[tmp1], %[n]\n\t" \
+ "sc.w.rl %[tmp1], %[tmp1], %[mem]\n\t" \
+ "bnez %[tmp1], 1b\n\t" \
+ "1:" \
+ : [old] "=&r" (old), \
+ [mem] "+A" (*(volatile unsigned*) aligned_addr), \
+ [tmp1] "=&r" (tmp1) \
+ : [o] "r" ((((unsigned) o) << shift) & mask), \
+ [n] "r" ((((unsigned) n) << shift) & mask), \
+ [mask] "r" (mask), \
+ [not_mask] "r" (~mask)); \
+ \
+ return (type) (old >> shift); \
+ } \
+ bool __sync_bool_compare_and_swap_ ## size (type *p, type o, type n) \
+ { \
+ return __sync_val_compare_and_swap(p, o, n) == o; \
+ }
+
+#define GENERATE_ALL(type, size) \
+ GENERATE_FETCH_AND_OP(type, size, add, add, DONT_INVERT, o + v) \
+ GENERATE_FETCH_AND_OP(type, size, sub, sub, DONT_INVERT, o - v) \
+ GENERATE_FETCH_AND_OP(type, size, and, and, DONT_INVERT, o & v) \
+ GENERATE_FETCH_AND_OP(type, size, xor, xor, DONT_INVERT, o ^ v) \
+ GENERATE_FETCH_AND_OP(type, size, or, or, DONT_INVERT, o | v) \
+ GENERATE_FETCH_AND_OP(type, size, nand, and, INVERT, ~(o & v)) \
+ GENERATE_COMPARE_AND_SWAP(type, size)
+
+GENERATE_ALL(unsigned char, 1)
+GENERATE_ALL(unsigned short, 2)
+
+#endif
--- /dev/null
+/* crti.S is empty because .init_array/.fini_array are used exclusively. */
--- /dev/null
+/* crtn.S is empty because .init_array/.fini_array are used exclusively. */
--- /dev/null
+/* Integer division routines for RISC-V.
+
+ Copyright (C) 2016-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+ .text
+ .align 2
+
+#if __riscv_xlen == 32
+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
+# define __udivdi3 __udivsi3
+# define __umoddi3 __umodsi3
+# define __divdi3 __divsi3
+# define __moddi3 __modsi3
+#else
+ .globl __udivsi3
+__udivsi3:
+ /* Compute __udivdi3(a0 << 32, a1 << 32); cast result to uint32_t. */
+ sll a0, a0, 32
+ sll a1, a1, 32
+ move t0, ra
+ jal __udivdi3
+ sext.w a0, a0
+ jr t0
+
+ .globl __umodsi3
+__umodsi3:
+ /* Compute __udivdi3((uint32_t)a0, (uint32_t)a1); cast a1 to uint32_t. */
+ sll a0, a0, 32
+ sll a1, a1, 32
+ srl a0, a0, 32
+ srl a1, a1, 32
+ move t0, ra
+ jal __udivdi3
+ sext.w a0, a1
+ jr t0
+
+ .globl __modsi3
+ __modsi3 = __moddi3
+
+ .globl __divsi3
+__divsi3:
+ /* Check for special case of INT_MIN/-1. Otherwise, fall into __divdi3. */
+ li t0, -1
+ beq a1, t0, .L20
+#endif
+
+ .globl __divdi3
+__divdi3:
+ bltz a0, .L10
+ bltz a1, .L11
+ /* Since the quotient is positive, fall into __udivdi3. */
+
+ .globl __udivdi3
+__udivdi3:
+ mv a2, a1
+ mv a1, a0
+ li a0, -1
+ beqz a2, .L5
+ li a3, 1
+ bgeu a2, a1, .L2
+.L1:
+ blez a2, .L2
+ slli a2, a2, 1
+ slli a3, a3, 1
+ bgtu a1, a2, .L1
+.L2:
+ li a0, 0
+.L3:
+ bltu a1, a2, .L4
+ sub a1, a1, a2
+ or a0, a0, a3
+.L4:
+ srli a3, a3, 1
+ srli a2, a2, 1
+ bnez a3, .L3
+.L5:
+ ret
+
+ .globl __umoddi3
+__umoddi3:
+ /* Call __udivdi3(a0, a1), then return the remainder, which is in a1. */
+ move t0, ra
+ jal __udivdi3
+ move a0, a1
+ jr t0
+
+ /* Handle negative arguments to __divdi3. */
+.L10:
+ neg a0, a0
+ bgez a1, .L12 /* Compute __udivdi3(-a0, a1), then negate the result. */
+ neg a1, a1
+ j __udivdi3 /* Compute __udivdi3(-a0, -a1). */
+.L11: /* Compute __udivdi3(a0, -a1), then negate the result. */
+ neg a1, a1
+.L12:
+ move t0, ra
+ jal __udivdi3
+ neg a0, a0
+ jr t0
+
+ .globl __moddi3
+__moddi3:
+ move t0, ra
+ bltz a1, .L31
+ bltz a0, .L32
+.L30:
+ jal __udivdi3 /* The dividend is not negative. */
+ move a0, a1
+ jr t0
+.L31:
+ neg a1, a1
+ bgez a0, .L30
+.L32:
+ neg a0, a0
+ jal __udivdi3 /* The dividend is hella negative. */
+ neg a0, a1
+ jr t0
+
+#if __riscv_xlen == 64
+ /* continuation of __divsi3 */
+.L20:
+ sll t0, t0, 31
+ bne a0, t0, __divdi3
+ ret
+#endif
--- /dev/null
+/* Copyright (C) 2016-2017 Free Software Foundation, Inc.
+
+ This file is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef inhibit_libc
+
+#include <signal.h>
+#include <stdint.h>
+#include <sys/ucontext.h>
+
+#define LI_A7_8B 0x08b00893
+#define ECALL 0x00000073
+
+#define MD_FALLBACK_FRAME_STATE_FOR riscv_fallback_frame_state
+
+static _Unwind_Reason_Code
+riscv_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState * fs)
+{
+ /* The kernel creates an rt_sigframe on the stack immediately prior
+ to delivering a signal.
+
+ This structure must have the same shape as the linux kernel
+ equivalent. */
+ struct rt_sigframe
+ {
+ siginfo_t info;
+ struct ucontext uc;
+ };
+
+ struct rt_sigframe *rt_;
+ _Unwind_Ptr new_cfa;
+ uint16_t *pc = context->ra;
+ struct sigcontext *sc;
+ int i;
+
+ /* A signal frame will have a return address pointing to
+ __default_sa_restorer. This code is hardwired as:
+
+ 0x08b00893 li a7,0x8b
+ 0x00000073 ecall
+
+ Note, the PC might only have 2-byte alignment.
+ */
+ if (pc[0] != (uint16_t)LI_A7_8B || pc[1] != (uint16_t)(LI_A7_8B >> 16)
+ || pc[2] != (uint16_t)ECALL || pc[3] != (uint16_t)(ECALL >> 16))
+ return _URC_END_OF_STACK;
+
+ rt_ = context->cfa;
+ sc = &rt_->uc.uc_mcontext;
+
+ new_cfa = (_Unwind_Ptr) sc;
+ fs->regs.cfa_how = CFA_REG_OFFSET;
+ fs->regs.cfa_reg = __LIBGCC_STACK_POINTER_REGNUM__;
+ fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa;
+
+ for (i = 0; i < 32; i++)
+ {
+ fs->regs.reg[i].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i].loc.offset = (_Unwind_Ptr) &sc->gregs[i] - new_cfa;
+ }
+
+ fs->signal_frame = 1;
+ fs->retaddr_column = __LIBGCC_DWARF_ALT_FRAME_RETURN_COLUMN__;
+ fs->regs.reg[fs->retaddr_column].how = REG_SAVED_VAL_OFFSET;
+ fs->regs.reg[fs->retaddr_column].loc.offset =
+ (_Unwind_Ptr) sc->gregs[0] - new_cfa;
+
+ return _URC_NO_REASON;
+}
+
+#endif
--- /dev/null
+/* Integer multiplication routines for RISC-V.
+
+ Copyright (C) 2016-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+ .text
+ .align 2
+
+#if __riscv_xlen == 32
+/* Our RV64 64-bit routine is equivalent to our RV32 32-bit routine. */
+# define __muldi3 __mulsi3
+#endif
+
+ .globl __muldi3
+__muldi3:
+ mv a2, a0
+ li a0, 0
+.L1:
+ andi a3, a1, 1
+ beqz a3, .L2
+ add a0, a0, a2
+.L2:
+ srli a1, a1, 1
+ slli a2, a2, 1
+ bnez a1, .L1
+ ret
--- /dev/null
+/* Integer multiplication routines for RISC-V.
+
+ Copyright (C) 2016-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+ .text
+ .align 2
+
+#if __riscv_xlen == 32
+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
+# define __multi3 __muldi3
+#endif
+
+ .globl __multi3
+__multi3:
+
+#if __riscv_xlen == 32
+/* Our RV64 64-bit routines are equivalent to our RV32 32-bit routines. */
+# define __muldi3 __mulsi3
+#endif
+
+/* We rely on the fact that __muldi3 doesn't clobber the t-registers. */
+
+ mv t0, ra
+ mv t5, a0
+ mv a0, a1
+ mv t6, a3
+ mv a1, t5
+ mv a4, a2
+ li a5, 0
+ li t2, 0
+ li t4, 0
+.L1:
+ add a6, t2, a1
+ andi t3, a4, 1
+ slli a7, a5, 1
+ slti t1, a1, 0
+ srli a4, a4, 1
+ add a5, t4, a5
+ beqz t3, .L2
+ sltu t3, a6, t2
+ mv t2, a6
+ add t4, t3, a5
+.L2:
+ slli a1, a1, 1
+ or a5, t1, a7
+ bnez a4, .L1
+ beqz a0, .L3
+ mv a1, a2
+ call __muldi3
+ add t4, t4, a0
+.L3:
+ beqz t6, .L4
+ mv a1, t6
+ mv a0, t5
+ call __muldi3
+ add t4, t4, a0
+.L4:
+ mv a0, t2
+ mv a1, t4
+ jr t0
--- /dev/null
+/* Callee-saved register spill and fill routines for RISC-V.
+
+ Copyright (C) 2016-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+ .text
+
+ .globl __riscv_save_12
+ .globl __riscv_save_11
+ .globl __riscv_save_10
+ .globl __riscv_save_9
+ .globl __riscv_save_8
+ .globl __riscv_save_7
+ .globl __riscv_save_6
+ .globl __riscv_save_5
+ .globl __riscv_save_4
+ .globl __riscv_save_3
+ .globl __riscv_save_2
+ .globl __riscv_save_1
+ .globl __riscv_save_0
+
+ .globl __riscv_restore_12
+ .globl __riscv_restore_11
+ .globl __riscv_restore_10
+ .globl __riscv_restore_9
+ .globl __riscv_restore_8
+ .globl __riscv_restore_7
+ .globl __riscv_restore_6
+ .globl __riscv_restore_5
+ .globl __riscv_restore_4
+ .globl __riscv_restore_3
+ .globl __riscv_restore_2
+ .globl __riscv_restore_1
+ .globl __riscv_restore_0
+
+#if __riscv_xlen == 64
+
+__riscv_save_12:
+ .cfi_startproc
+ # __riscv_save_* routine use t0/x5 as return address
+ .cfi_return_column 5
+ addi sp, sp, -112
+ .cfi_def_cfa_offset 112
+ li t1, 0
+ sd s11, 8(sp)
+ .cfi_offset 27, -104
+ j .Ls10
+
+__riscv_save_11:
+__riscv_save_10:
+ .cfi_restore 27
+ addi sp, sp, -112
+ .cfi_def_cfa_offset 112
+ li t1, -16
+.Ls10:
+ sd s10, 16(sp)
+ .cfi_offset 26, -96
+ sd s9, 24(sp)
+ .cfi_offset 25, -88
+ j .Ls8
+
+__riscv_save_9:
+__riscv_save_8:
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ addi sp, sp, -112
+ .cfi_def_cfa_offset 112
+ li t1, -32
+.Ls8:
+ sd s8, 32(sp)
+ .cfi_offset 24, -80
+ sd s7, 40(sp)
+ .cfi_offset 23, -72
+ j .Ls6
+
+__riscv_save_7:
+__riscv_save_6:
+ .cfi_restore 23
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ addi sp, sp, -112
+ .cfi_def_cfa_offset 112
+ li t1, -48
+.Ls6:
+ sd s6, 48(sp)
+ .cfi_offset 22, -64
+ sd s5, 56(sp)
+ .cfi_offset 21, -56
+ j .Ls4
+
+__riscv_save_5:
+__riscv_save_4:
+ .cfi_restore 21
+ .cfi_restore 22
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ addi sp, sp, -112
+ .cfi_def_cfa_offset 112
+ li t1, -64
+.Ls4:
+ sd s4, 64(sp)
+ .cfi_offset 20, -48
+ sd s3, 72(sp)
+ .cfi_offset 19, -40
+ j .Ls2
+
+__riscv_save_3:
+__riscv_save_2:
+ .cfi_restore 19
+ .cfi_restore 20
+ .cfi_restore 21
+ .cfi_restore 22
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ addi sp, sp, -112
+ .cfi_def_cfa_offset 112
+ li t1, -80
+.Ls2:
+ sd s2, 80(sp)
+ .cfi_offset 18, -32
+ sd s1, 88(sp)
+ .cfi_offset 9, -24
+ sd s0, 96(sp)
+ .cfi_offset 8, -16
+ sd ra, 104(sp)
+ .cfi_offset 1, -8
+ # CFA info is not correct in next 2 instruction since t1's
+ # value is depend on how may register really save.
+ sub sp, sp, t1
+ jr t0
+ .cfi_endproc
+
+__riscv_save_1:
+__riscv_save_0:
+ .cfi_startproc
+ # __riscv_save_* routine use t0/x5 as return address
+ .cfi_return_column 5
+ addi sp, sp, -16
+ .cfi_def_cfa_offset 16
+ sd s0, 0(sp)
+ .cfi_offset 8, -16
+ sd ra, 8(sp)
+ .cfi_offset 1, -8
+ jr t0
+ .cfi_endproc
+
+__riscv_restore_12:
+ .cfi_startproc
+ .cfi_def_cfa_offset 112
+ .cfi_offset 27, -104
+ .cfi_offset 26, -96
+ .cfi_offset 25, -88
+ .cfi_offset 24, -80
+ .cfi_offset 23, -72
+ .cfi_offset 22, -64
+ .cfi_offset 21, -56
+ .cfi_offset 20, -48
+ .cfi_offset 19, -40
+ .cfi_offset 18, -32
+ .cfi_offset 9, -24
+ .cfi_offset 8, -16
+ .cfi_offset 1, -8
+ ld s11, 8(sp)
+ .cfi_restore 27
+ addi sp, sp, 16
+
+__riscv_restore_11:
+__riscv_restore_10:
+ .cfi_restore 27
+ .cfi_def_cfa_offset 96
+ ld s10, 0(sp)
+ .cfi_restore 26
+ ld s9, 8(sp)
+ .cfi_restore 25
+ addi sp, sp, 16
+
+__riscv_restore_9:
+__riscv_restore_8:
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ .cfi_def_cfa_offset 80
+ ld s8, 0(sp)
+ .cfi_restore 24
+ ld s7, 8(sp)
+ .cfi_restore 23
+ addi sp, sp, 16
+
+__riscv_restore_7:
+__riscv_restore_6:
+ .cfi_restore 23
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ .cfi_def_cfa_offset 64
+ ld s6, 0(sp)
+ .cfi_restore 22
+ ld s5, 8(sp)
+ .cfi_restore 21
+ addi sp, sp, 16
+
+__riscv_restore_5:
+__riscv_restore_4:
+ .cfi_restore 21
+ .cfi_restore 22
+ .cfi_restore 23
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ .cfi_def_cfa_offset 48
+ ld s4, 0(sp)
+ .cfi_restore 20
+ ld s3, 8(sp)
+ .cfi_restore 19
+ addi sp, sp, 16
+
+__riscv_restore_3:
+__riscv_restore_2:
+ .cfi_restore 19
+ .cfi_restore 20
+ .cfi_restore 21
+ .cfi_restore 22
+ .cfi_restore 23
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ .cfi_def_cfa_offset 32
+ ld s2, 0(sp)
+ .cfi_restore 18
+ ld s1, 8(sp)
+ .cfi_restore 9
+ addi sp, sp, 16
+
+__riscv_restore_1:
+__riscv_restore_0:
+ .cfi_restore 9
+ .cfi_restore 18
+ .cfi_restore 19
+ .cfi_restore 20
+ .cfi_restore 21
+ .cfi_restore 22
+ .cfi_restore 23
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ .cfi_def_cfa_offset 16
+ ld s0, 0(sp)
+ .cfi_restore 8
+ ld ra, 8(sp)
+ .cfi_restore 1
+ addi sp, sp, 16
+ .cfi_def_cfa_offset 0
+ ret
+ .cfi_endproc
+
+#else
+
+__riscv_save_12:
+ .cfi_startproc
+ # __riscv_save_* routine use t0/x5 as return address
+ .cfi_return_column 5
+ addi sp, sp, -64
+ .cfi_def_cfa_offset 64
+ li t1, 0
+ sw s11, 12(sp)
+ .cfi_offset 27, -52
+ j .Ls10
+
+__riscv_save_11:
+__riscv_save_10:
+__riscv_save_9:
+__riscv_save_8:
+ .cfi_restore 27
+ addi sp, sp, -64
+ .cfi_def_cfa_offset 64
+ li t1, -16
+.Ls10:
+ sw s10, 16(sp)
+ .cfi_offset 26, -48
+ sw s9, 20(sp)
+ .cfi_offset 25, -44
+ sw s8, 24(sp)
+ .cfi_offset 24, -40
+ sw s7, 28(sp)
+ .cfi_offset 23, -36
+ j .Ls6
+
+__riscv_save_7:
+__riscv_save_6:
+__riscv_save_5:
+__riscv_save_4:
+ .cfi_restore 23
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ addi sp, sp, -64
+ .cfi_def_cfa_offset 64
+ li t1, -32
+.Ls6:
+ sw s6, 32(sp)
+ .cfi_offset 22, -32
+ sw s5, 36(sp)
+ .cfi_offset 21, -28
+ sw s4, 40(sp)
+ .cfi_offset 20, -24
+ sw s3, 44(sp)
+ .cfi_offset 19, -20
+ sw s2, 48(sp)
+ .cfi_offset 18, -16
+ sw s1, 52(sp)
+ .cfi_offset 9, -12
+ sw s0, 56(sp)
+ .cfi_offset 8, -8
+ sw ra, 60(sp)
+ .cfi_offset 1, -4
+ # CFA info is not correct in next 2 instruction since t1's
+ # value is depend on how may register really save.
+ sub sp, sp, t1
+ jr t0
+ .cfi_endproc
+
+__riscv_save_3:
+__riscv_save_2:
+__riscv_save_1:
+__riscv_save_0:
+ .cfi_startproc
+ # __riscv_save_* routine use t0/x5 as return address
+ .cfi_return_column 5
+ addi sp, sp, -16
+ .cfi_def_cfa_offset 16
+ sw s2, 0(sp)
+ sw s1, 4(sp)
+ .cfi_offset 9, -16
+ sw s0, 8(sp)
+ .cfi_offset 8, -8
+ sw ra, 12(sp)
+ .cfi_offset 1, -4
+ jr t0
+ .cfi_endproc
+
+__riscv_restore_12:
+ .cfi_startproc
+ .cfi_def_cfa_offset 64
+ .cfi_offset 27, -52
+ .cfi_offset 26, -48
+ .cfi_offset 25, -44
+ .cfi_offset 24, -40
+ .cfi_offset 23, -36
+ .cfi_offset 22, -32
+ .cfi_offset 21, -28
+ .cfi_offset 20, -24
+ .cfi_offset 19, -20
+ .cfi_offset 18, -16
+ .cfi_offset 9, -12
+ .cfi_offset 8, -8
+ .cfi_offset 1, -4
+ lw s11, 12(sp)
+ .cfi_restore 27
+ addi sp, sp, 16
+
+__riscv_restore_11:
+__riscv_restore_10:
+__riscv_restore_9:
+__riscv_restore_8:
+ .cfi_restore 27
+ .cfi_def_cfa_offset 48
+ lw s10, 0(sp)
+ .cfi_restore 26
+ lw s9, 4(sp)
+ .cfi_restore 25
+ lw s8, 8(sp)
+ .cfi_restore 24
+ lw s7, 12(sp)
+ .cfi_restore 23
+ addi sp, sp, 16
+
+__riscv_restore_7:
+__riscv_restore_6:
+__riscv_restore_5:
+__riscv_restore_4:
+ .cfi_restore 23
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ .cfi_def_cfa_offset 32
+ lw s6, 0(sp)
+ .cfi_restore 22
+ lw s5, 4(sp)
+ .cfi_restore 21
+ lw s4, 8(sp)
+ .cfi_restore 20
+ lw s3, 12(sp)
+ .cfi_restore 19
+ addi sp, sp, 16
+
+__riscv_restore_3:
+__riscv_restore_2:
+__riscv_restore_1:
+__riscv_restore_0:
+ .cfi_restore 19
+ .cfi_restore 20
+ .cfi_restore 21
+ .cfi_restore 22
+ .cfi_restore 24
+ .cfi_restore 25
+ .cfi_restore 26
+ .cfi_restore 27
+ .cfi_def_cfa_offset 16
+ lw s2, 0(sp)
+ .cfi_restore 18
+ lw s1, 4(sp)
+ .cfi_restore 9
+ lw s0, 8(sp)
+ .cfi_restore 8
+ lw ra, 12(sp)
+ .cfi_restore 1
+ addi sp, sp, 16
+ .cfi_def_cfa_offset 0
+ ret
+ .cfi_endproc
+
+#endif
--- /dev/null
+/* Software floating-point machine description for RISC-V.
+
+ Copyright (C) 2016-2017 Free Software Foundation, Inc.
+
+This file is part of GCC.
+
+GCC is free software; you can redistribute it and/or modify it under
+the terms of the GNU General Public License as published by the Free
+Software Foundation; either version 3, or (at your option) any later
+version.
+
+GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+WARRANTY; without even the implied warranty of MERCHANTABILITY or
+FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+for more details.
+
+Under Section 7 of GPL version 3, you are granted additional
+permissions described in the GCC Runtime Library Exception, version
+3.1, as published by the Free Software Foundation.
+
+You should have received a copy of the GNU General Public License and
+a copy of the GCC Runtime Library Exception along with this program;
+see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+<http://www.gnu.org/licenses/>. */
+
+#if __riscv_xlen == 32
+
+#define _FP_W_TYPE_SIZE 32
+#define _FP_W_TYPE unsigned long
+#define _FP_WS_TYPE signed long
+#define _FP_I_TYPE long
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_S,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_2_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_4_wide(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(S,R,X,Y)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_2_udiv(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_4_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S _FP_QNANBIT_S
+#define _FP_NANFRAC_D _FP_QNANBIT_D, 0
+#define _FP_NANFRAC_Q _FP_QNANBIT_Q, 0, 0, 0
+
+#else
+
+#define _FP_W_TYPE_SIZE 64
+#define _FP_W_TYPE unsigned long long
+#define _FP_WS_TYPE signed long long
+#define _FP_I_TYPE long long
+
+#define _FP_MUL_MEAT_S(R,X,Y) \
+ _FP_MUL_MEAT_1_imm(_FP_WFRACBITS_S,R,X,Y)
+#define _FP_MUL_MEAT_D(R,X,Y) \
+ _FP_MUL_MEAT_1_wide(_FP_WFRACBITS_D,R,X,Y,umul_ppmm)
+#define _FP_MUL_MEAT_Q(R,X,Y) \
+ _FP_MUL_MEAT_2_wide_3mul(_FP_WFRACBITS_Q,R,X,Y,umul_ppmm)
+
+#define _FP_DIV_MEAT_S(R,X,Y) _FP_DIV_MEAT_1_imm(S,R,X,Y,_FP_DIV_HELP_imm)
+#define _FP_DIV_MEAT_D(R,X,Y) _FP_DIV_MEAT_1_udiv_norm(D,R,X,Y)
+#define _FP_DIV_MEAT_Q(R,X,Y) _FP_DIV_MEAT_2_udiv(Q,R,X,Y)
+
+#define _FP_NANFRAC_S _FP_QNANBIT_S
+#define _FP_NANFRAC_D _FP_QNANBIT_D
+#define _FP_NANFRAC_Q _FP_QNANBIT_Q, 0
+
+#endif
+
+#if __riscv_xlen == 64
+typedef int TItype __attribute__ ((mode (TI)));
+typedef unsigned int UTItype __attribute__ ((mode (TI)));
+#define TI_BITS (__CHAR_BIT__ * (int)sizeof(TItype))
+#endif
+
+/* The type of the result of a floating point comparison. This must
+ match __libgcc_cmp_return__ in GCC for the target. */
+typedef int __gcc_CMPtype __attribute__ ((mode (__libgcc_cmp_return__)));
+#define CMPtype __gcc_CMPtype
+
+#define _FP_NANSIGN_S 0
+#define _FP_NANSIGN_D 0
+#define _FP_NANSIGN_Q 0
+
+#define _FP_KEEPNANFRACP 0
+#define _FP_QNANNEGATEDP 0
+
+#define _FP_CHOOSENAN(fs, wc, R, X, Y, OP) \
+ do { \
+ R##_s = _FP_NANSIGN_##fs; \
+ _FP_FRAC_SET_##wc(R,_FP_NANFRAC_##fs); \
+ R##_c = FP_CLS_NAN; \
+ } while (0)
+
+#define _FP_DECL_EX int _frm __attribute__ ((unused));
+#define FP_ROUNDMODE _frm
+
+#define FP_RND_NEAREST 0x0
+#define FP_RND_ZERO 0x1
+#define FP_RND_PINF 0x3
+#define FP_RND_MINF 0x2
+
+#define FP_EX_INVALID 0x10
+#define FP_EX_OVERFLOW 0x04
+#define FP_EX_UNDERFLOW 0x02
+#define FP_EX_DIVZERO 0x08
+#define FP_EX_INEXACT 0x01
+
+#define _FP_TININESS_AFTER_ROUNDING 1
+
+#ifdef __riscv_flen
+#define FP_INIT_ROUNDMODE \
+do { \
+ __asm__ volatile ("frrm %0" : "=r" (_frm)); \
+} while (0)
+
+#define FP_HANDLE_EXCEPTIONS \
+do { \
+ if (__builtin_expect (_fex, 0)) \
+ __asm__ volatile ("csrs fflags, %0" : : "rK" (_fex)); \
+} while (0)
+#else
+#define FP_INIT_ROUNDMODE _frm = FP_RND_NEAREST
+#endif
+
+#define __LITTLE_ENDIAN 1234
+#define __BIG_ENDIAN 4321
+
+#define __BYTE_ORDER __LITTLE_ENDIAN
+
+
+/* Define ALIASNAME as a strong alias for NAME. */
+# define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+# define _strong_alias(name, aliasname) \
+ extern __typeof (name) aliasname __attribute__ ((alias (#name)));
--- /dev/null
+LIB2ADD += $(srcdir)/config/riscv/save-restore.S \
+ $(srcdir)/config/riscv/muldi3.S \
+ $(srcdir)/config/riscv/multi3.S \
+ $(srcdir)/config/riscv/div.S \
+ $(srcdir)/config/riscv/atomic.c \
+
--- /dev/null
+LIB2FUNCS_EXCLUDE += _divsi3 _modsi3 _udivsi3 _umodsi3 _mulsi3 _muldi3
--- /dev/null
+LIB2FUNCS_EXCLUDE += _divsi3 _modsi3 _udivsi3 _umodsi3 _mulsi3 _muldi3
--- /dev/null
+ABI_SINGLE:=$(findstring __riscv_float_abi_single,$(shell $(gcc_compile_bare) -dM -E - </dev/null))
+ABI_DOUBLE:=$(findstring __riscv_float_abi_double,$(shell $(gcc_compile_bare) -dM -E - </dev/null))
+ABI_QUAD:=$(findstring __riscv_float_abi_quad,$(shell $(gcc_compile_bare) -dM -E - </dev/null))
+
+softfp_int_modes := si di
+softfp_exclude_libgcc2 := n
+
+ifndef ABI_QUAD
+ifdef ABI_DOUBLE
+
+softfp_float_modes := tf
+softfp_extensions := sftf dftf
+softfp_truncations := tfsf tfdf
+
+else
+
+softfp_float_modes := df tf
+softfp_extensions := sfdf sftf dftf
+softfp_truncations := dfsf tfsf tfdf
+
+ifndef ABI_SINGLE
+softfp_float_modes += sf
+endif
+
+endif
+endif
--- /dev/null
+include $(srcdir)/config/riscv/t-softfp32
+
+softfp_int_modes += ti