+2018-08-17 Jojo <jijie_rong@c-sky.com>
+ Huibin Wang <huibin_wang@c-sky.com>
+ Sandra Loosemore <sandra@codesourcery.com>
+ Chung-Lin Tang <cltang@codesourcery.com>
+
+ C-SKY port: libgcc
+
+ * config.host: Add C-SKY support.
+ * config/csky/*: New.
+
2018-08-12 Chung-Ju Wu <jasonwucj@gmail.com>
* config/nds32/t-nds32-isr: Rearrange object dependency.
crisv32-*-*)
cpu_type=cris
;;
+csky*-*-*)
+ cpu_type=csky
+ ;;
fido-*-*)
cpu_type=m68k
;;
cris-*-linux* | crisv32-*-linux*)
tmake_file="$tmake_file cris/t-cris t-softfp-sfdf t-softfp cris/t-linux"
;;
+csky-*-elf*)
+ tmake_file="csky/t-csky t-fdpbit"
+ extra_parts="$extra_parts crti.o crtn.o"
+ ;;
+csky-*-linux*)
+ tmake_file="$tmake_file csky/t-csky t-slibgcc-libgcc t-fdpbit csky/t-linux-csky"
+ extra_parts="$extra_parts crti.o crtn.o"
+ md_unwind_header=csky/linux-unwind.h
+ ;;
epiphany-*-elf* | epiphany-*-rtems*)
tmake_file="$tmake_file epiphany/t-epiphany t-fdpbit epiphany/t-custom-eqsf"
extra_parts="$extra_parts crti.o crtint.o crtrunc.o crtm1reg-r43.o crtm1reg-r63.o crtn.o"
--- /dev/null
+# Define _init and _fini entry points for C-SKY.
+# Copyright (C) 2018 Free Software Foundation, Inc.
+# Contributed by C-SKY Microsystems and Mentor Graphics.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+
+# This file just makes a stack frame for the contents of the .fini and
+# .init sections. Users may put any desired instructions in those
+# sections.
+
+ .file "crti.S"
+
+/* We use more complicated versions of this code with GLIBC. */
+#if defined(__gnu_linux__)
+
+#ifndef PREINIT_FUNCTION
+# define PREINIT_FUNCTION __gmon_start__
+#endif
+
+#ifndef PREINIT_FUNCTION_WEAK
+# define PREINIT_FUNCTION_WEAK 1
+#endif
+
+#if PREINIT_FUNCTION_WEAK
+ .global PREINIT_FUNCTION
+ .weak PREINIT_FUNCTION
+ .align 4
+ .type call_weak_fn, %function
+call_weak_fn:
+ // push lr
+ subi sp, 4
+ stw lr, (sp)
+#ifdef __PIC__
+ lrw a2, PREINIT_FUNCTION@GOT
+ addu a2, gb
+ ldw a2, (a2)
+#else
+ lrw a2, PREINIT_FUNCTION
+#endif
+ cmpnei a2, 0
+ bf 1f
+ jsr a2
+1:
+ // pop lr
+ ldw lr, (sp)
+ addi sp, 4
+ rts
+
+ .align 4
+#else
+ .hidden PREINIT_FUNCTION
+#endif /* PREINIT_FUNCTION_WEAK */
+
+ .section .init,"ax",@progbits
+ .align 4
+ .globl _init
+ .type _init, @function
+_init:
+ subi sp, 8
+ stw lr, (sp, 0)
+#ifdef __PIC__
+ // stw gb, (sp, 4)
+ bsr .Lgetpc
+.Lgetpc:
+ lrw gb, .Lgetpc@GOTPC
+ add gb, lr
+#endif
+#if PREINIT_FUNCTION_WEAK
+#ifdef __PIC__
+ lrw a2, call_weak_fn@GOTOFF
+ add a2, gb
+ jsr a2
+#else
+ jsri call_weak_fn
+#endif
+#else /* !PREINIT_FUNCTION_WEAK */
+#ifdef __PIC__
+ lrw a2, PREINIT_FUNCTION@PLT
+ addu a2, gb
+ ldw a2, (a2)
+ jsr a2
+#else
+ jsri PREINIT_FUNCTION
+#endif
+#endif /* PREINIT_FUNCTION_WEAK */
+
+ br 2f
+ .literals
+ .align 4
+2:
+ .section .fini,"ax",@progbits
+ .align 4
+ .globl _fini
+ .type _fini, @function
+_fini:
+ subi sp,8
+ stw lr, (sp, 0)
+ br 2f
+ .literals
+ .align 4
+2:
+
+/* These are the non-GLIBC versions. */
+#else /* !defined(__gnu_linux__) */
+ .section ".init"
+ .global _init
+ .type _init,@function
+ .align 2
+_init:
+ subi sp, 16
+ st.w lr, (sp, 12)
+ mov r0, r0
+
+ .section ".fini"
+ .global _fini
+ .type _fini,@function
+ .align 2
+_fini:
+ subi sp, 16
+ st.w lr, (sp, 12)
+ mov r0, r0
+#endif /* defined(__gnu_linux__) */
--- /dev/null
+# Terminate C-SKY .init and .fini sections.
+# Copyright (C) 2018 Free Software Foundation, Inc.
+# Contributed by C-SKY Microsystems and Mentor Graphics.
+#
+# This file is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any
+# later version.
+#
+# This file is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# General Public License for more details.
+#
+# Under Section 7 of GPL version 3, you are granted additional
+# permissions described in the GCC Runtime Library Exception, version
+# 3.1, as published by the Free Software Foundation.
+#
+# You should have received a copy of the GNU General Public License and
+# a copy of the GCC Runtime Library Exception along with this program;
+# see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+# <http://www.gnu.org/licenses/>.
+
+
+# This file just makes sure that the .fini and .init sections do in
+# fact return. Users may put any desired instructions in those sections.
+# This file is the last thing linked into any executable.
+
+ .file "crtn.S"
+
+# Is this the GLIBC version?
+#if defined(__gnu_linux__)
+ .section .init,"ax",@progbits
+ ldw lr, (sp, 0)
+ addi sp, 8
+ rts
+
+ .section .fini,"ax",@progbits
+ ldw lr, (sp, 0)
+ addi sp, 8
+ rts
+
+#else /* !defined(__gnu_linux__) */
+ .section ".init"
+ ldw lr, (sp, 12)
+ addi sp, 16
+ jmp lr
+
+ .section ".fini"
+ ldw lr, (sp, 12)
+ addi sp, 16
+ jmp lr
+
+# Th-th-th-that is all folks!
+#endif /* defined(__gnu_linux__) */
--- /dev/null
+/* libgcc routines for C-SKY.
+ Copyright (C) 2018 Free Software Foundation, Inc.
+ Contributed by C-SKY Microsystems and Mentor Graphics.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the
+ Free Software Foundation; either version 3, or (at your option) any
+ later version.
+
+ This file is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+
+/* Use the right prefix for global labels. */
+#define CONCAT1(a, b) CONCAT2(a, b)
+#define CONCAT2(a, b) a ## b
+#define SYM(x) CONCAT1 (__, x)
+
+#ifndef __CSKYBE__
+#define xl r0
+#define xh r1
+#define yl r2
+#define yh r3
+#else
+#define xh r0
+#define xl r1
+#define yh r2
+#define yl r3
+#endif
+
+
+#ifdef __ELF__
+#define TYPE(x) .type SYM (x),@function
+#define SIZE(x) .size SYM (x), . - SYM (x)
+#else
+#define TYPE(x)
+#define SIZE(x)
+#endif
+
+.macro FUNC_START name
+ .text
+ .align 2
+ .globl SYM (\name)
+ TYPE (\name)
+SYM (\name):
+.endm
+
+.macro FUNC_END name
+ SIZE (\name)
+.endm
+
+
+/* Emulate FF1 ("fast find 1") instruction on ck801.
+ Result goes in rx, clobbering ry. */
+#if defined(__CK801__)
+.macro FF1_M rx, ry
+ movi \rx, 32
+10:
+ cmphsi \ry, 1
+ bf 11f
+ subi \rx, \rx, 1
+ lsri \ry, \ry, 1
+ br 10b
+11:
+.endm
+#else
+.macro FF1_M rx, ry
+ ff1 \rx, \ry
+.endm
+#endif
+
+/* Likewise emulate lslc instruction ("logical left shift to C") on CK801. */
+#if defined(__CK801__)
+.macro LSLC_M rx
+ cmpne \rx, \rx
+ addc \rx, \rx
+.endm
+#else
+.macro LSLC_M rx
+ lslc \rx
+.endm
+#endif
+
+/* Emulate the abs instruction. */
+#if defined(__CK802__)
+.macro ABS_M rx
+ btsti \rx, 31
+ bf 10f
+ not \rx
+ addi \rx, 1
+10:
+.endm
+#elif defined(__CK801__)
+.macro ABS_M rx
+ cmplti \rx, 1
+ bf 10f
+ not \rx
+ addi \rx, 1
+10:
+.endm
+#else
+.macro ABS_M rx
+ abs \rx
+.endm
+#endif
+
+/* Emulate the ld.hs ("load signed halfword and extend") instruction
+ on ck801 and ck802. */
+#if defined(__CK801__)
+.macro LDBS_M rx, ry
+ ld.b \rx, (\ry, 0x0)
+ sextb \rx, \rx
+.endm
+#else
+.macro LDBS_M rx, ry
+ ld.bs \rx, (\ry, 0x0)
+.endm
+#endif
+
+#if defined(__CK801__)
+.macro LDHS_M rx, ry
+ ld.h \rx, (\ry, 0x0)
+ sexth \rx, \rx
+.endm
+#else
+.macro LDHS_M rx, ry
+ ld.hs \rx, (\ry, 0x0)
+.endm
+#endif
+
+
+/* Signed and unsigned div/mod/rem functions. */
+
+#ifdef L_udivsi3
+FUNC_START udiv32
+FUNC_START udivsi3
+ cmpnei a1, 0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations, skip across high order 0 bits in dividend
+ cmpnei a0, 0
+ bt 8f
+ jmp lr // 0 dividend quick return
+8:
+ push l0
+ movi a2, 1 // a2 is quotient (1 for a sentinel)
+ mov a3, a0
+ FF1_M l0, a3 // figure distance to skip
+ lsl a2, l0 // move the sentinel along (with 0's behind)
+ lsl a0, l0 // and the low 32 bits of numerator
+
+ // FIXME: Is this correct?
+ mov a3, a1 // looking at divisor
+ FF1_M l0, a3 // I can move 32-l0 more bits to left.
+ addi l0, 1 // ok, one short of that...
+ mov a3, a0
+ lsr a3, l0 // bits that came from low order...
+ not l0 // l0 == "32-n" == LEFT distance
+ addi l0, 33 // this is (32-n)
+ lsl a2,l0 // fixes the high 32 (quotient)
+ lsl a0,l0
+ cmpnei a2,0
+ bf 4f // the sentinel went away...
+
+ // run the remaining bits
+1:
+ LSLC_M a0 // 1 bit left shift of a3-a0
+ addc a3, a3
+ cmphs a3, a1 // upper 32 of dividend >= divisor?
+ bf 2f
+ subu a3, a1 // if yes, subtract divisor
+2:
+ addc a2, a2 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+
+4:
+ mov a0, a2 // return quotient
+ mov a1, a3 // and piggyback the remainder
+ pop l0
+FUNC_END udiv32
+FUNC_END udivsi3
+#endif
+
+#ifdef L_umodsi3
+FUNC_START urem32
+FUNC_START umodsi3
+ cmpnei a1, 0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations, skip across high order 0 bits in dividend
+ cmpnei a0, 0
+ bt 8f
+ jmp lr // 0 dividend quick return
+8:
+ mov a2, a0
+ FF1_M a3, a2 // figure distance to skip
+ movi a2, 1 // a2 is quotient (1 for a sentinel)
+ lsl a2, a3 // move the sentinel along (with 0's behind)
+ lsl a0, a3 // and the low 32 bits of numerator
+ movi a3, 0
+
+1:
+ LSLC_M a0 // 1 bit left shift of a3-a0
+ addc a3, a3
+ cmphs a3, a1 // upper 32 of dividend >= divisor?
+ bf 2f
+ subu a3, a1 // if yes, subtract divisor
+2:
+ addc a2, a2 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+
+4:
+ mov a0, a3 // and piggyback the remainder
+ jmp lr
+FUNC_END urem32
+FUNC_END umodsi3
+#endif
+
+
+#ifdef L_divsi3
+FUNC_START div32
+FUNC_START divsi3
+ cmpnei a1, 0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations, skip across high order 0 bits in dividend
+ cmpnei a0, 0
+ bt 8f
+ jmp lr // 0 dividend quick return
+8:
+ push l0, l1
+ mov l1, a0
+ xor l1, a1 // calc sign of quotient
+ ABS_M a0
+ ABS_M a1
+ movi a2, 1 // a2 is quotient (1 for a sentinel)
+ mov a3, a0
+ FF1_M l0, a3 // figure distance to skip
+ lsl a2, l0 // move the sentinel along (with 0's behind)
+ lsl a0, l0 // and the low 32 bits of numerator
+
+ // FIXME: is this correct?
+ mov a3, a1 // looking at divisor
+ FF1_M l0, a3 // I can move 32-l0 more bits to left.
+ addi l0, 1 // ok, one short of that...
+ mov a3, a0
+ lsr a3, l0 // bits that came from low order...
+ not l0 // l0 == "32-n" == LEFT distance
+ addi l0, 33 // this is (32-n)
+ lsl a2,l0 // fixes the high 32 (quotient)
+ lsl a0,l0
+ cmpnei a2,0
+ bf 4f // the sentinel went away...
+
+ // run the remaining bits
+1:
+ LSLC_M a0 // 1 bit left shift of a3-a0
+ addc a3, a3
+ cmphs a3, a1 // upper 32 of dividend >= divisor?
+ bf 2f
+ subu a3, a1 // if yes, subtract divisor
+2:
+ addc a2, a2 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+
+4:
+ mov a0, a2 // return quotient
+ mov a1, a3 // and piggyback the remainder
+ LSLC_M l1 // after adjusting for sign
+ bf 3f
+ not a0
+ addi a0, 1
+ not a1
+ addi a1, 1
+3:
+ pop l0, l1
+FUNC_END div32
+FUNC_END divsi3
+#endif
+
+#ifdef L_modsi3
+FUNC_START rem32
+FUNC_START modsi3
+ push l0
+ cmpnei a1, 0 // look for 0 divisor
+ bt 9f
+ trap 3 // divide by 0
+9:
+ // control iterations, skip across high order 0 bits in dividend
+ cmpnei a0, 0
+ bt 8f
+ pop l0 // 0 dividend quick return
+8:
+ mov l0, a0
+ ABS_M a0
+ ABS_M a1
+ mov a2, a0
+ FF1_M a3, a2 // figure distance to skip
+ movi a2, 1 // a2 is quotient (1 for a sentinel)
+ lsl a2, a3 // move the sentinel along (with 0's behind)
+ lsl a0, a3 // and the low 32 bits of numerator
+ movi a3, 0
+
+ // run the remaining bits
+1:
+ LSLC_M a0 // 1 bit left shift of a3-a0
+ addc a3, a3
+ cmphs a3, a1 // upper 32 of dividend >= divisor?
+ bf 2f
+ subu a3, a1 // if yes, subtract divisor
+2:
+ addc a2, a2 // shift by 1 and count subtracts
+ bf 1b // if sentinel falls out of quotient, stop
+
+4:
+ mov a0, a3 // and piggyback the remainder
+ LSLC_M l0 // after adjusting for sign
+ bf 3f
+ not a0
+ addi a0, 1
+3:
+ pop l0
+FUNC_END rem32
+FUNC_END modsi3
+#endif
+
+/* Unordered comparisons for single and double float. */
+
+#ifdef L_unordsf2
+FUNC_START unordsf2
+#if defined(__CK801__)
+ subi sp, 4
+ st.w r4, (sp, 0x0)
+ lsli r2, r0, 1
+ lsli r3, r1, 1
+ asri r4, r2, 24
+ not r4
+ cmpnei r4, 0
+ bt 1f
+ lsli r4, r0, 9
+ cmpnei r4, 0
+ bt 3f
+1:
+ asri r4, r3, 24
+ not r4
+ cmpnei r4, 0
+ bt 2f
+ lsli r4, r1, 9
+ cmpnei r4, 0
+ bt 3f
+2:
+ ld.w r4, (sp, 0x0)
+ addi sp, 4
+ movi r0, 0
+ rts
+3:
+ ld.w r4, (sp, 0x0)
+ addi sp, 4
+ movi r0, 1
+ rts
+#elif defined(__CK802__)
+ lsli r2, r0, 1
+ lsli r3, r1, 1
+ asri r2, r2, 24
+ not r13, r2
+ cmpnei r13, 0
+ bt 1f
+ lsli r13, r0, 9
+ cmpnei r13, 0
+ bt 3f
+1:
+ asri r3, r3, 24
+ not r13, r3
+ cmpnei r13, 0
+ bt 2f
+ lsli r13, r1, 9
+ cmpnei r13, 0
+ bt 3f
+2:
+ movi r0, 0
+ rts
+3:
+ movi r0, 1
+ rts
+#else
+ lsli r2, r0, 1
+ lsli r3, r1, 1
+ asri r2, r2, 24
+ not r13, r2
+ bnez r13, 1f
+ lsli r13, r0, 9
+ bnez r13, 3f
+1:
+ asri r3, r3, 24
+ not r13, r3
+ bnez r13, 2f
+ lsli r13, r1, 9
+ bnez r13, 3f
+2:
+ movi r0, 0
+ rts
+3:
+ movi r0, 1
+ rts
+#endif
+FUNC_END unordsf2
+#endif
+
+#ifdef L_unorddf2
+FUNC_START unorddf2
+#if defined(__CK801__)
+ subi sp, 8
+ st.w r4, (sp, 0x0)
+ st.w r5, (sp, 0x4)
+ lsli r4, xh, 1
+ asri r4, r4, 21
+ not r4
+ cmpnei r4, 0
+ bt 1f
+ mov r4, xl
+ lsli r5, xh, 12
+ or r4, r5
+ cmpnei r4, 0
+ bt 3f
+1:
+ lsli r4, yh, 1
+ asri r4, r4, 21
+ not r4
+ cmpnei r4, 0
+ bt 2f
+ mov r4,yl
+ lsli r5, yh, 12
+ or r4, r5
+ cmpnei r4, 0
+ bt 3f
+2:
+ ld.w r4, (sp, 0x0)
+ ld.w r5, (sp, 0x4)
+ addi sp, 8
+ movi r0, 0
+ rts
+3:
+ ld.w r4, (sp, 0x0)
+ ld.w r5, (sp, 0x4)
+ addi sp, 8
+ movi r0, 1
+ rts
+#elif defined(__CK802__)
+ lsli r13, xh, 1
+ asri r13, r13, 21
+ not r13
+ cmpnei r13, 0
+ bt 1f
+ lsli xh, xh, 12
+ or r13, xl, xh
+ cmpnei r13, 0
+ bt 3f
+1:
+ lsli r13, yh, 1
+ asri r13, r13, 21
+ not r13
+ cmpnei r13, 0
+ bt 2f
+ lsli yh, yh, 12
+ or r13, yl, yh
+ cmpnei r13, 0
+ bt 3f
+2:
+ movi r0, 0
+ rts
+3:
+ movi r0, 1
+ rts
+#else
+ lsli r13, xh, 1
+ asri r13, r13, 21
+ not r13
+ bnez r13, 1f
+ lsli xh, xh, 12
+ or r13, xl, xh
+ bnez r13, 3f
+1:
+ lsli r13, yh, 1
+ asri r13, r13, 21
+ not r13
+ bnez r13, 2f
+ lsli yh, yh, 12
+ or r13, yl, yh
+ bnez r13, 3f
+2:
+ movi r0, 0
+ rts
+3:
+ movi r0, 1
+ rts
+#endif
+FUNC_END unorddf2
+#endif
+
+/* When optimizing for size on ck801 and ck802, GCC emits calls to the
+ following helper functions when expanding casesi, instead of emitting
+ the table lookup and jump inline. Note that in these functions the
+ jump is handled by tweaking the value of lr before rts. */
+#ifdef L_csky_case_sqi
+FUNC_START _gnu_csky_case_sqi
+ subi sp, 4
+ st.w a1, (sp, 0x0)
+ mov a1, lr
+ add a1, a1, a0
+ LDBS_M a1, a1
+ lsli a1, a1, 1
+ add lr, lr, a1
+ ld.w a1, (sp, 0x0)
+ addi sp, 4
+ rts
+FUNC_END _gnu_csky_case_sqi
+#endif
+
+#ifdef L_csky_case_uqi
+FUNC_START _gnu_csky_case_uqi
+ subi sp, 4
+ st.w a1, (sp, 0x0)
+ mov a1, lr
+ add a1, a1, a0
+ ld.b a1, (a1, 0x0)
+ lsli a1, a1, 1
+ add lr, lr, a1
+ ld.w a1, (sp, 0x0)
+ addi sp, 4
+ rts
+FUNC_END _gnu_csky_case_uqi
+#endif
+
+#ifdef L_csky_case_shi
+FUNC_START _gnu_csky_case_shi
+ subi sp, 8
+ st.w a0, (sp, 0x4)
+ st.w a1, (sp, 0x0)
+ mov a1, lr
+ lsli a0, a0, 1
+ add a1, a1, a0
+ LDHS_M a1, a1
+ lsli a1, a1, 1
+ add lr, lr, a1
+ ld.w a0, (sp, 0x4)
+ ld.w a1, (sp, 0x0)
+ addi sp, 8
+ rts
+FUNC_END _gnu_csky_case_shi
+#endif
+
+#ifdef L_csky_case_uhi
+FUNC_START _gnu_csky_case_uhi
+ subi sp, 8
+ st.w a0, (sp, 0x4)
+ st.w a1, (sp, 0x0)
+ mov a1, lr
+ lsli a0, a0, 1
+ add a1, a1, a0
+ ld.h a1, (a1, 0x0)
+ lsli a1, a1, 1
+ add lr, lr, a1
+ ld.w a0, (sp, 0x4)
+ ld.w a1, (sp, 0x0)
+ addi sp, 8
+ rts
+FUNC_END _gnu_csky_case_uhi
+#endif
+
+#ifdef L_csky_case_si
+FUNC_START _gnu_csky_case_si
+ subi sp, 8
+ st.w a0, (sp, 0x4)
+ st.w a1, (sp, 0x0)
+ mov a1, lr
+ addi a1, a1, 2 // Align to word.
+ bclri a1, a1, 1
+ mov lr, a1
+ lsli a0, a0, 2
+ add a1, a1, a0
+ ld.w a0, (a1, 0x0)
+ add lr, lr, a0
+ ld.w a0, (sp, 0x4)
+ ld.w a1, (sp, 0x0)
+ addi sp, 8
+ rts
+FUNC_END _gnu_csky_case_si
+#endif
+
+/* GCC expects that {__eq,__ne,__gt,__ge,__le,__lt}{df2,sf2}
+ will behave as __cmpdf2. So, we stub the implementations to
+ jump on to __cmpdf2 and __cmpsf2.
+
+ All of these short-circuit the return path so that __cmp{sd}f2
+ will go directly back to the caller. */
+
+.macro COMPARE_DF_JUMP name
+ .import SYM (cmpdf2)
+FUNC_START \name
+ jmpi SYM (cmpdf2)
+FUNC_END \name
+.endm
+
+#ifdef L_eqdf2
+COMPARE_DF_JUMP eqdf2
+#endif /* L_eqdf2 */
+
+#ifdef L_nedf2
+COMPARE_DF_JUMP nedf2
+#endif /* L_nedf2 */
+
+#ifdef L_gtdf2
+COMPARE_DF_JUMP gtdf2
+#endif /* L_gtdf2 */
+
+#ifdef L_gedf2
+COMPARE_DF_JUMP gedf2
+#endif /* L_gedf2 */
+
+#ifdef L_ltdf2
+COMPARE_DF_JUMP ltdf2
+#endif /* L_ltdf2 */
+
+#ifdef L_ledf2
+COMPARE_DF_JUMP ledf2
+#endif /* L_ledf2 */
+
+/* Single-precision floating point stubs. */
+
+.macro COMPARE_SF_JUMP name
+ .import SYM (cmpsf2)
+FUNC_START \name
+ jmpi SYM (cmpsf2)
+FUNC_END \name
+.endm
+
+#ifdef L_eqsf2
+COMPARE_SF_JUMP eqsf2
+#endif /* L_eqsf2 */
+
+#ifdef L_nesf2
+COMPARE_SF_JUMP nesf2
+#endif /* L_nesf2 */
+
+#ifdef L_gtsf2
+COMPARE_SF_JUMP gtsf2
+#endif /* L_gtsf2 */
+
+#ifdef L_gesf2
+COMPARE_SF_JUMP __gesf2
+#endif /* L_gesf2 */
+
+#ifdef L_ltsf2
+COMPARE_SF_JUMP __ltsf2
+#endif /* L_ltsf2 */
+
+#ifdef L_lesf2
+COMPARE_SF_JUMP lesf2
+#endif /* L_lesf2 */
--- /dev/null
+/* Linux-specific atomic operations for C-SKY.
+ Copyright (C) 2018 Free Software Foundation, Inc.
+ Contributed by C-SKY Microsystems and Mentor Graphics.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify it under
+ the terms of the GNU General Public License as published by the Free
+ Software Foundation; either version 3, or (at your option) any later
+ version.
+
+ GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+ WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+/* Kernel helper for compare-and-exchange. */
+inline int
+__kernel_cmpxchg (int oldval, int newval, volatile int *ptr)
+{
+ register int _a0 asm ("a0") = oldval;
+ register int _a1 asm ("a1") = newval;
+ register volatile int *_a2 asm ("a2") = ptr;
+ __asm__ __volatile__ ("trap 2\n" \
+ :"+r" (_a0) :"r" (_a1) , "r" (_a2) \
+ : "a3", "memory"); \
+ return _a0;
+}
+
+
+/* Kernel helper for memory barrier. */
+inline void __kernel_dmb (void)
+{
+ asm ("sync":::"memory");
+}
+
+/* Note: we implement byte, short and int versions of atomic operations using
+ the above kernel helpers, but there is no support for "long long" (64-bit)
+ operations as yet. */
+
+#define HIDDEN __attribute__ ((visibility ("hidden")))
+
+#ifdef __CSKYLE__
+#define INVERT_MASK_1 0
+#define INVERT_MASK_2 0
+#else
+#define INVERT_MASK_1 24
+#define INVERT_MASK_2 16
+#endif
+
+#define MASK_1 0xffu
+#define MASK_2 0xffffu
+
+#define FETCH_AND_OP_WORD(OP, PFX_OP, INF_OP) \
+ int HIDDEN \
+ __sync_fetch_and_##OP##_4 (int *ptr, int val) \
+ { \
+ int failure, tmp; \
+ \
+ do \
+ { \
+ tmp = *ptr; \
+ failure = __kernel_cmpxchg (tmp, PFX_OP (tmp INF_OP val), ptr); \
+ } \
+ while (failure != 0); \
+ \
+ return tmp; \
+ }
+
+FETCH_AND_OP_WORD (add, , +)
+FETCH_AND_OP_WORD (sub, , -)
+FETCH_AND_OP_WORD (or, , |)
+FETCH_AND_OP_WORD (and, , &)
+FETCH_AND_OP_WORD (xor, , ^)
+FETCH_AND_OP_WORD (nand, ~, &)
+
+#define NAME_oldval(OP, WIDTH) __sync_fetch_and_##OP##_##WIDTH
+#define NAME_newval(OP, WIDTH) __sync_##OP##_and_fetch_##WIDTH
+
+/* Implement both __sync_<op>_and_fetch and __sync_fetch_and_<op> for
+ subword-sized quantities. */
+
+#define SUBWORD_SYNC_OP(OP, PFX_OP, INF_OP, TYPE, WIDTH, RETURN) \
+ TYPE HIDDEN \
+ NAME##_##RETURN (OP, WIDTH) (TYPE *ptr, TYPE val) \
+ { \
+ int *wordptr = (int *) ((unsigned int) ptr & ~3); \
+ unsigned int mask, shift, oldval, newval; \
+ int failure; \
+ \
+ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ do \
+ { \
+ oldval = *wordptr; \
+ newval = ((PFX_OP (((oldval & mask) >> shift) \
+ INF_OP (unsigned int) val)) << shift) & mask; \
+ newval |= oldval & ~mask; \
+ failure = __kernel_cmpxchg (oldval, newval, wordptr); \
+ } \
+ while (failure != 0); \
+ \
+ return (RETURN & mask) >> shift; \
+ }
+
+SUBWORD_SYNC_OP (add, , +, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (or, , |, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (and, , &, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, oldval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, oldval)
+
+SUBWORD_SYNC_OP (add, , +, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (or, , |, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (and, , &, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, oldval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, oldval)
+
+#define OP_AND_FETCH_WORD(OP, PFX_OP, INF_OP) \
+ int HIDDEN \
+ __sync_##OP##_and_fetch_4 (int *ptr, int val) \
+ { \
+ int tmp, failure; \
+ \
+ do \
+ { \
+ tmp = *ptr; \
+ failure = __kernel_cmpxchg (tmp, PFX_OP tmp INF_OP val, ptr); \
+ } \
+ while (failure != 0); \
+ \
+ return PFX_OP tmp INF_OP val; \
+ }
+
+OP_AND_FETCH_WORD (add, , +)
+OP_AND_FETCH_WORD (sub, , -)
+OP_AND_FETCH_WORD (or, , |)
+OP_AND_FETCH_WORD (and, , &)
+OP_AND_FETCH_WORD (xor, , ^)
+OP_AND_FETCH_WORD (nand, ~, &)
+
+SUBWORD_SYNC_OP (add, , +, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (sub, , -, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (or, , |, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (and, , &, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned short, 2, newval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned short, 2, newval)
+
+SUBWORD_SYNC_OP (add, , +, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (sub, , -, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (or, , |, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (and, , &, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (xor, , ^, unsigned char, 1, newval)
+SUBWORD_SYNC_OP (nand, ~, &, unsigned char, 1, newval)
+
+int HIDDEN
+__sync_val_compare_and_swap_4 (int *ptr, int oldval, int newval)
+{
+ int actual_oldval, fail;
+
+ while (1)
+ {
+ actual_oldval = *ptr;
+
+ if (oldval != actual_oldval)
+ return actual_oldval;
+
+ fail = __kernel_cmpxchg (actual_oldval, newval, ptr);
+
+ if (!fail)
+ return oldval;
+ }
+}
+
+#define SUBWORD_VAL_CAS(TYPE, WIDTH) \
+ TYPE HIDDEN \
+ __sync_val_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
+ TYPE newval) \
+ { \
+ int *wordptr = (int *)((unsigned int) ptr & ~3), fail; \
+ unsigned int mask, shift, actual_oldval, actual_newval; \
+ \
+ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ while (1) \
+ { \
+ actual_oldval = *wordptr; \
+ \
+ if (((actual_oldval & mask) >> shift) != (unsigned int) oldval) \
+ return (actual_oldval & mask) >> shift; \
+ \
+ actual_newval = (actual_oldval & ~mask) \
+ | (((unsigned int) newval << shift) & mask); \
+ \
+ fail = __kernel_cmpxchg (actual_oldval, actual_newval, \
+ wordptr); \
+ \
+ if (!fail) \
+ return oldval; \
+ } \
+ }
+
+SUBWORD_VAL_CAS (unsigned short, 2)
+SUBWORD_VAL_CAS (unsigned char, 1)
+
+typedef unsigned char bool;
+
+bool HIDDEN
+__sync_bool_compare_and_swap_4 (int *ptr, int oldval, int newval)
+{
+ int failure = __kernel_cmpxchg (oldval, newval, ptr);
+ return (failure == 0);
+}
+
+#define SUBWORD_BOOL_CAS(TYPE, WIDTH) \
+ bool HIDDEN \
+ __sync_bool_compare_and_swap_##WIDTH (TYPE *ptr, TYPE oldval, \
+ TYPE newval) \
+ { \
+ TYPE actual_oldval \
+ = __sync_val_compare_and_swap_##WIDTH (ptr, oldval, newval); \
+ return (oldval == actual_oldval); \
+ }
+
+SUBWORD_BOOL_CAS (unsigned short, 2)
+SUBWORD_BOOL_CAS (unsigned char, 1)
+
+void HIDDEN
+__sync_synchronize (void)
+{
+ __kernel_dmb ();
+}
+
+int HIDDEN
+__sync_lock_test_and_set_4 (int *ptr, int val)
+{
+ int failure, oldval;
+
+ do
+ {
+ oldval = *ptr;
+ failure = __kernel_cmpxchg (oldval, val, ptr);
+ }
+ while (failure != 0);
+
+ return oldval;
+}
+
+#define SUBWORD_TEST_AND_SET(TYPE, WIDTH) \
+ TYPE HIDDEN \
+ __sync_lock_test_and_set_##WIDTH (TYPE *ptr, TYPE val) \
+ { \
+ int failure; \
+ unsigned int oldval, newval, shift, mask; \
+ int *wordptr = (int *) ((unsigned int) ptr & ~3); \
+ \
+ shift = (((unsigned int) ptr & 3) << 3) ^ INVERT_MASK_##WIDTH; \
+ mask = MASK_##WIDTH << shift; \
+ \
+ do \
+ { \
+ oldval = *wordptr; \
+ newval = ((oldval & ~mask) \
+ | (((unsigned int) val << shift) & mask)); \
+ failure = __kernel_cmpxchg (oldval, newval, wordptr); \
+ } \
+ while (failure != 0); \
+ \
+ return (oldval & mask) >> shift; \
+ }
+
+SUBWORD_TEST_AND_SET (unsigned short, 2)
+SUBWORD_TEST_AND_SET (unsigned char, 1)
+
+#define SYNC_LOCK_RELEASE(TYPE, WIDTH) \
+ void HIDDEN \
+ __sync_lock_release_##WIDTH (TYPE *ptr) \
+ { \
+ /* All writes before this point must be seen before we release \
+ the lock itself. */ \
+ __kernel_dmb (); \
+ *ptr = 0; \
+ }
+
+SYNC_LOCK_RELEASE (int, 4)
+SYNC_LOCK_RELEASE (short, 2)
+SYNC_LOCK_RELEASE (char, 1)
--- /dev/null
+/* DWARF2 EH unwinding support for C-SKY Linux.
+ Copyright (C) 2018 Free Software Foundation, Inc.
+ Contributed by C-SKY Microsystems and Mentor Graphics.
+
+ This file is part of GCC.
+
+ GCC is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 3, or (at your option)
+ any later version.
+
+ GCC is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ Under Section 7 of GPL version 3, you are granted additional
+ permissions described in the GCC Runtime Library Exception, version
+ 3.1, as published by the Free Software Foundation.
+
+ You should have received a copy of the GNU General Public License and
+ a copy of the GCC Runtime Library Exception along with this program;
+ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
+ <http://www.gnu.org/licenses/>. */
+
+#ifndef inhibit_libc
+
+/* Do code reading to identify a signal frame, and set the frame
+ state data appropriately. See unwind-dw2.c for the structs. */
+
+#include <signal.h>
+#include <asm/unistd.h>
+
+/* The third parameter to the signal handler points to something with
+ this structure defined in asm/ucontext.h, but the name clashes with
+ struct ucontext from sys/ucontext.h so this private copy is used. */
+typedef struct _sig_ucontext {
+ unsigned long uc_flags;
+ struct _sig_ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask;
+} _sig_ucontext_t;
+
+#define MD_FALLBACK_FRAME_STATE_FOR csky_fallback_frame_state
+
+static _Unwind_Reason_Code
+csky_fallback_frame_state (struct _Unwind_Context *context,
+ _Unwind_FrameState *fs)
+{
+ u_int16_t *pc = (u_int16_t *) context->ra;
+ struct sigcontext *sc;
+ _Unwind_Ptr new_cfa;
+ int i;
+
+ /* movi r7, __NR_rt_sigreturn; trap 0 */
+ if ((*(pc+0) == 0xea07) && (*(pc+1) == 119)
+ && (*(pc+2) == 0xc000) && (*(pc+3) == 0x2020))
+ {
+ struct sigframe
+ {
+ int sig;
+ int code;
+ struct sigcontext *psc;
+ unsigned long extramask[2]; /* _NSIG_WORDS */
+ struct sigcontext sc;
+ } *_rt = context->cfa;
+ sc = _rt->psc; // &(_rt->sc);
+ }
+ /* movi r7, __NR_rt_sigreturn; trap 0 */
+ else if ((*(pc+0) == 0xea07) && (*(pc+1) == 173)
+ && (*(pc+2) == 0xc000) && (*(pc+3) == 0x2020))
+ {
+ struct rt_sigframe
+ {
+ int sig;
+ struct siginfo *pinfo;
+ void* puc;
+ siginfo_t info;
+ struct ucontext uc;
+ } *_rt = context->cfa;
+ sc = &(_rt->uc.uc_mcontext);
+ }
+ else
+ return _URC_END_OF_STACK;
+
+ new_cfa = (_Unwind_Ptr) sc->sc_usp;
+ fs->regs.cfa_how = CFA_REG_OFFSET;
+ fs->regs.cfa_reg = STACK_POINTER_REGNUM;
+ fs->regs.cfa_offset = new_cfa - (_Unwind_Ptr) context->cfa;
+
+ fs->regs.reg[0].how = REG_SAVED_OFFSET;
+ fs->regs.reg[0].loc.offset = (_Unwind_Ptr)&(sc->sc_a0) - new_cfa;
+
+ fs->regs.reg[1].how = REG_SAVED_OFFSET;
+ fs->regs.reg[1].loc.offset = (_Unwind_Ptr)&(sc->sc_a1) - new_cfa;
+
+ fs->regs.reg[2].how = REG_SAVED_OFFSET;
+ fs->regs.reg[2].loc.offset = (_Unwind_Ptr)&(sc->sc_a2) - new_cfa;
+
+ fs->regs.reg[3].how = REG_SAVED_OFFSET;
+ fs->regs.reg[3].loc.offset = (_Unwind_Ptr)&(sc->sc_a3) - new_cfa;
+
+ for (i = 4; i < 14; i++)
+ {
+ fs->regs.reg[i].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i].loc.offset = ((_Unwind_Ptr)&(sc->sc_regs[i - 4])
+ - new_cfa);
+ }
+
+ for (i = 16; i < 32; i++)
+ {
+ fs->regs.reg[i].how = REG_SAVED_OFFSET;
+ fs->regs.reg[i].loc.offset = ((_Unwind_Ptr)&(sc->sc_exregs[i - 16])
+ - new_cfa);
+ }
+
+ /* FIXME : hi lo ? */
+ fs->regs.reg[15].how = REG_SAVED_OFFSET;
+ fs->regs.reg[15].loc.offset = (_Unwind_Ptr)&(sc->sc_r15) - new_cfa;
+
+ fs->regs.reg[56].how = REG_SAVED_OFFSET;
+ fs->regs.reg[56].loc.offset = (_Unwind_Ptr)&(sc->sc_pc) - new_cfa;
+ fs->retaddr_column = 56;
+ fs->signal_frame = 1;
+
+ return _URC_NO_REASON;
+}
+
+
+#endif
--- /dev/null
+# Makefile fragment for all C-SKY targets.
+# Copyright (C) 2018 Free Software Foundation, Inc.
+# Contributed by C-SKY Microsystems and Mentor Graphics.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB1ASMSRC = csky/lib1funcs.S
+LIB1ASMFUNCS = _divsi3 _udivsi3 _modsi3 _umodsi3 _unorddf2 _unordsf2 \
+ _csky_case_sqi _csky_case_uqi _csky_case_shi _csky_case_uhi _csky_case_si
+
+LIB2FUNCS_EXCLUDE += _unord_df
+LIB2FUNCS_EXCLUDE += _unord_sf
+
+TARGET_LIBGCC2_CFLAGS=-O3 -DNO_FLOATLIB_FIXUNSDFSI
--- /dev/null
+# Makefile fragment for C-SKY targets running Linux.
+# Copyright (C) 2018 Free Software Foundation, Inc.
+# Contributed by C-SKY Microsystems and Mentor Graphics.
+#
+# This file is part of GCC.
+#
+# GCC is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 3, or (at your option)
+# any later version.
+#
+# GCC is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with GCC; see the file COPYING3. If not see
+# <http://www.gnu.org/licenses/>.
+
+LIB2ADD_ST += $(srcdir)/config/csky/linux-atomic.c