This simplifies the integration of the CPUs' software, avoid complex switches in the code,
and is a first step to make CPUs fully pluggable.
The CPU name is no longer present in the crt0 files (for example crt0-vexriscv-ctr.o
becomes crt0-ctr.o) so users building firmwares externally will have to update their
Makefiles to remove the $(CPU) from crt0-$(CPU)-ctr.o.
--- /dev/null
+.section .text, "ax", @progbits
+.global boot_helper
+boot_helper:
+ jr x13
--- /dev/null
+.global main
+.global isr
+.global _start
+
+_start:
+ j crt_init
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+trap_entry:
+ sd x1, - 1*8(sp)
+ sd x5, - 2*8(sp)
+ sd x6, - 3*8(sp)
+ sd x7, - 4*8(sp)
+ sd x10, - 5*8(sp)
+ sd x11, - 6*8(sp)
+ sd x12, - 7*8(sp)
+ sd x13, - 8*8(sp)
+ sd x14, - 9*8(sp)
+ sd x15, -10*8(sp)
+ sd x16, -11*8(sp)
+ sd x17, -12*8(sp)
+ sd x28, -13*8(sp)
+ sd x29, -14*8(sp)
+ sd x30, -15*8(sp)
+ sd x31, -16*8(sp)
+ addi sp,sp,-16*8
+ call isr
+ ld x1 , 15*8(sp)
+ ld x5, 14*8(sp)
+ ld x6, 13*8(sp)
+ ld x7, 12*8(sp)
+ ld x10, 11*8(sp)
+ ld x11, 10*8(sp)
+ ld x12, 9*8(sp)
+ ld x13, 8*8(sp)
+ ld x14, 7*8(sp)
+ ld x15, 6*8(sp)
+ ld x16, 5*8(sp)
+ ld x17, 4*8(sp)
+ ld x28, 3*8(sp)
+ ld x29, 2*8(sp)
+ ld x30, 1*8(sp)
+ ld x31, 0*8(sp)
+ addi sp,sp,16*8
+ mret
+ .text
+
+
+crt_init:
+ la sp, _fstack + 8
+ la a0, trap_entry
+ csrw mtvec, a0
+
+bss_init:
+ la a0, _fbss
+ la a1, _ebss
+bss_loop:
+ beq a0,a1,bss_done
+ sd zero,0(a0)
+ add a0,a0,8
+ j bss_loop
+bss_done:
+
+// call plic_init // initialize external interrupt controller
+# li a0, 0x800 // external interrupt sources only (using LiteX timer);
+ // NOTE: must still enable mstatus.MIE!
+ csrw mie,a0
+
+ call main
+inf_loop:
+ j inf_loop
--- /dev/null
+#ifndef CSR_DEFS__H
+#define CSR_DEFS__H
+
+#define CSR_MSTATUS_MIE 0x8
+
+#define CSR_DCACHE_INFO 0xCC0
+
+#endif /* CSR_DEFS__H */
--- /dev/null
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <system.h>
+#include <generated/csr.h>
+#include <generated/soc.h>
+
+// The RocketChip uses a Platform-Level Interrupt Controller (PLIC) which
+// is programmed and queried via a set of MMIO registers.
+// TODO: How about Blackparrot? Should be probably included in linux version
+
+#define PLIC_BASE 0x0c000000L // Base address and per-pin priority array
+#define PLIC_PENDING 0x0c001000L // Bit field matching currently pending pins
+#define PLIC_ENABLED 0x0c002000L // Bit field corresponding to the current mask
+#define PLIC_THRSHLD 0x0c200000L // Per-pin priority must be >= this to trigger
+#define PLIC_CLAIM 0x0c200004L // Claim & completion register address
+
+static inline unsigned int irq_getie(void)
+{
+ return (csrr(mstatus) & CSR_MSTATUS_MIE) != 0; /* FIXME */
+}
+
+static inline void irq_setie(unsigned int ie)
+{
+ if(ie) csrs(mstatus,CSR_MSTATUS_MIE); else csrc(mstatus,CSR_MSTATUS_MIE); /* FIXME */
+}
+
+static inline unsigned int irq_getmask(void)
+{
+ return 0; /* FIXME */
+}
+
+static inline void irq_setmask(unsigned int mask)
+{
+ /* FIXME */
+}
+
+static inline unsigned int irq_pending(void)
+{
+ return csr_readl(PLIC_PENDING) >> 1; /* FIXME */
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __IRQ_H */
--- /dev/null
+#ifndef __SYSTEM_H
+#define __SYSTEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void flush_cpu_icache(void);
+void flush_cpu_dcache(void);
+void flush_l2_cache(void);
+
+void busy_wait(unsigned int ms);
+
+#include <csr-defs.h>
+
+#define csrr(reg) ({ unsigned long __tmp; \
+ asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
+ __tmp; })
+
+#define csrw(reg, val) ({ \
+ if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
+ asm volatile ("csrw " #reg ", %0" :: "i"(val)); \
+ else \
+ asm volatile ("csrw " #reg ", %0" :: "r"(val)); })
+
+#define csrs(reg, bit) ({ \
+ if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
+ asm volatile ("csrrs x0, " #reg ", %0" :: "i"(bit)); \
+ else \
+ asm volatile ("csrrs x0, " #reg ", %0" :: "r"(bit)); })
+
+#define csrc(reg, bit) ({ \
+ if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
+ asm volatile ("csrrc x0, " #reg ", %0" :: "i"(bit)); \
+ else \
+ asm volatile ("csrrc x0, " #reg ", %0" :: "r"(bit)); })
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_H */
--- /dev/null
+.section .text, "ax", @progbits
+.global boot_helper
+boot_helper:
+ call r4
--- /dev/null
+/*
+ * LatticeMico32 C startup code.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/* Exception handlers - Must be 32 bytes long. */
+.section .text, "ax", @progbits
+.global _start
+_start:
+_reset_handler:
+ xor r0, r0, r0
+ wcsr IE, r0
+ mvhi r1, hi(_reset_handler)
+ ori r1, r1, lo(_reset_handler)
+ wcsr EBA, r1
+ bi _crt0
+ nop
+ nop
+
+_breakpoint_handler:
+ bi _breakpoint_handler
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+_instruction_bus_error_handler:
+ bi _instruction_bus_error_handler
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+_watchpoint_hander:
+ bi _watchpoint_hander
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+_data_bus_error_handler:
+ bi _data_bus_error_handler
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+_divide_by_zero_handler:
+ bi _divide_by_zero_handler
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+_interrupt_handler:
+ sw (sp+0), ra
+ calli .save_all
+ calli isr
+ bi .restore_all_and_eret
+ nop
+ nop
+ nop
+ nop
+
+_syscall_handler:
+ bi _syscall_handler
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+_crt0:
+ /* Setup stack and global pointer */
+ mvhi sp, hi(_fstack)
+ ori sp, sp, lo(_fstack)
+
+#ifdef EXECUTE_IN_PLACE
+ /* Load DATA */
+ mvhi r1, hi(_erodata)
+ ori r1, r1, lo(_erodata)
+ mvhi r2, hi(_fdata)
+ ori r2, r2, lo(_fdata)
+ mvhi r3, hi(_edata)
+ ori r3, r3, lo(_edata)
+.moveDATA:
+ be r2, r3, .doBSS
+ lw r4, (r1+0)
+ sw (r2+0), r4
+ /* _edata is aligned to 16 bytes. Use word-xfers. */
+ addi r1, r1, 4
+ addi r2, r2, 4
+ bi .moveDATA
+#endif
+
+.doBSS:
+ /* Clear BSS */
+ mvhi r1, hi(_fbss)
+ ori r1, r1, lo(_fbss)
+ mvhi r3, hi(_ebss)
+ ori r3, r3, lo(_ebss)
+.clearBSS:
+ be r1, r3, .callMain
+ sw (r1+0), r0
+ addi r1, r1, 4
+ bi .clearBSS
+
+.callMain:
+ bi main
+
+.save_all:
+ addi sp, sp, -56
+ sw (sp+4), r1
+ sw (sp+8), r2
+ sw (sp+12), r3
+ sw (sp+16), r4
+ sw (sp+20), r5
+ sw (sp+24), r6
+ sw (sp+28), r7
+ sw (sp+32), r8
+ sw (sp+36), r9
+ sw (sp+40), r10
+ sw (sp+48), ea
+ sw (sp+52), ba
+ /* ra needs to be moved from initial stack location */
+ lw r1, (sp+56)
+ sw (sp+44), r1
+ ret
+
+.restore_all_and_eret:
+ lw r1, (sp+4)
+ lw r2, (sp+8)
+ lw r3, (sp+12)
+ lw r4, (sp+16)
+ lw r5, (sp+20)
+ lw r6, (sp+24)
+ lw r7, (sp+28)
+ lw r8, (sp+32)
+ lw r9, (sp+36)
+ lw r10, (sp+40)
+ lw ra, (sp+44)
+ lw ea, (sp+48)
+ lw ba, (sp+52)
+ addi sp, sp, 56
+ eret
--- /dev/null
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <system.h>
+#include <generated/csr.h>
+#include <generated/soc.h>
+
+static inline unsigned int irq_getie(void)
+{
+ unsigned int ie;
+ __asm__ __volatile__("rcsr %0, IE" : "=r" (ie));
+ return ie;
+}
+
+static inline void irq_setie(unsigned int ie)
+{
+ __asm__ __volatile__("wcsr IE, %0" : : "r" (ie));
+}
+
+static inline unsigned int irq_getmask(void)
+{
+ unsigned int mask;
+ __asm__ __volatile__("rcsr %0, IM" : "=r" (mask));
+ return mask;
+}
+
+static inline void irq_setmask(unsigned int mask)
+{
+ __asm__ __volatile__("wcsr IM, %0" : : "r" (mask));
+}
+
+static inline unsigned int irq_pending(void)
+{
+ unsigned int pending;
+ __asm__ __volatile__("rcsr %0, IP" : "=r" (pending));
+ return pending;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __IRQ_H */
--- /dev/null
+#ifndef __SYSTEM_H
+#define __SYSTEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void flush_cpu_icache(void);
+void flush_cpu_dcache(void);
+void flush_l2_cache(void);
+
+void busy_wait(unsigned int ms);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_H */
--- /dev/null
+.section .text, "ax", @progbits
+.global boot_helper
+boot_helper:
+ nop # FIXME
--- /dev/null
+/* Copyright 2013-2014 IBM Corp.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ * implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define STACK_TOP 0xffff4000
+
+#define FIXUP_ENDIAN \
+ tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
+ b 191f; /* Skip trampoline if endian is good */ \
+ .long 0xa600607d; /* mfmsr r11 */ \
+ .long 0x01006b69; /* xori r11,r11,1 */ \
+ .long 0x05009f42; /* bcl 20,31,$+4 */ \
+ .long 0xa602487d; /* mflr r10 */ \
+ .long 0x14004a39; /* addi r10,r10,20 */ \
+ .long 0xa64b5a7d; /* mthsrr0 r10 */ \
+ .long 0xa64b7b7d; /* mthsrr1 r11 */ \
+ .long 0x2402004c; /* hrfid */ \
+191:
+
+
+/* Load an immediate 64-bit value into a register */
+#define LOAD_IMM64(r, e) \
+ lis r,(e)@highest; \
+ ori r,r,(e)@higher; \
+ rldicr r,r, 32, 31; \
+ oris r,r, (e)@h; \
+ ori r,r, (e)@l;
+
+ .section ".head","ax"
+
+ . = 0
+.global _start
+_start:
+ FIXUP_ENDIAN
+
+ /* setup stack */
+ LOAD_IMM64(%r1, STACK_TOP - 0x100)
+ LOAD_IMM64(%r12, main)
+ mtctr %r12,
+ bctrl
+ ba 0
+
+ /* XXX: litedram init should not take exceptions, maybe we could get
+ * rid of these to save space, along with a core tweak to suppress
+ * exceptions in case they happen (just terminate ?)
+ */
+
+#define EXCEPTION(nr) \
+ .= nr; \
+ b .
+
+ /* More exception stubs */
+ EXCEPTION(0x100)
+ EXCEPTION(0x200)
+ EXCEPTION(0x300)
+ EXCEPTION(0x380)
+ EXCEPTION(0x400)
+ EXCEPTION(0x480)
+ EXCEPTION(0x500)
+ EXCEPTION(0x600)
+ EXCEPTION(0x700)
+ EXCEPTION(0x800)
+ EXCEPTION(0x900)
+ EXCEPTION(0x980)
+ EXCEPTION(0xa00)
+ EXCEPTION(0xb00)
+ EXCEPTION(0xc00)
+ EXCEPTION(0xd00)
+ EXCEPTION(0xe00)
+ EXCEPTION(0xe20)
+ EXCEPTION(0xe40)
+ EXCEPTION(0xe60)
+ EXCEPTION(0xe80)
+ EXCEPTION(0xf00)
+ EXCEPTION(0xf20)
+ EXCEPTION(0xf40)
+ EXCEPTION(0xf60)
+ EXCEPTION(0xf80)
+#if 0
+ EXCEPTION(0x1000)
+ EXCEPTION(0x1100)
+ EXCEPTION(0x1200)
+ EXCEPTION(0x1300)
+ EXCEPTION(0x1400)
+ EXCEPTION(0x1500)
+ EXCEPTION(0x1600)
+#endif
+
+ .text
+
--- /dev/null
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#endif /* __IRQ_H */
--- /dev/null
+#ifndef __SYSTEM_H
+#define __SYSTEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void flush_cpu_icache(void);
+void flush_cpu_dcache(void);
+void flush_l2_cache(void);
+
+void busy_wait(unsigned int ms);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_H */
--- /dev/null
+ .section .text, "ax", @progbits
+ .global boot_helper
+boot_helper:
+ jr x13
--- /dev/null
+#define MIE_MEIE 0x800
+
+ .global _start
+_start:
+ j reset_vector
+
+reset_vector:
+ la sp, _fstack
+ la t0, trap_vector
+ csrw mtvec, t0
+
+ // initialize .bss
+ la t0, _fbss
+ la t1, _ebss
+1: beq t0, t1, 2f
+ sw zero, 0(t0)
+ addi t0, t0, 4
+ j 1b
+2:
+ // enable external interrupts
+ li t0, MIE_MEIE
+ csrs mie, t0
+
+ call main
+1: j 1b
+
+trap_vector:
+ addi sp, sp, -16*4
+ sw ra, 0*4(sp)
+ sw t0, 1*4(sp)
+ sw t1, 2*4(sp)
+ sw t2, 3*4(sp)
+ sw a0, 4*4(sp)
+ sw a1, 5*4(sp)
+ sw a2, 6*4(sp)
+ sw a3, 7*4(sp)
+ sw a4, 8*4(sp)
+ sw a5, 9*4(sp)
+ sw a6, 10*4(sp)
+ sw a7, 11*4(sp)
+ sw t3, 12*4(sp)
+ sw t4, 13*4(sp)
+ sw t5, 14*4(sp)
+ sw t6, 15*4(sp)
+ call isr
+ lw ra, 0*4(sp)
+ lw t0, 1*4(sp)
+ lw t1, 2*4(sp)
+ lw t2, 3*4(sp)
+ lw a0, 4*4(sp)
+ lw a1, 5*4(sp)
+ lw a2, 6*4(sp)
+ lw a3, 7*4(sp)
+ lw a4, 8*4(sp)
+ lw a5, 9*4(sp)
+ lw a6, 10*4(sp)
+ lw a7, 11*4(sp)
+ lw t3, 12*4(sp)
+ lw t4, 13*4(sp)
+ lw t5, 14*4(sp)
+ lw t6, 15*4(sp)
+ addi sp, sp, 16*4
+ mret
--- /dev/null
+#ifndef CSR_DEFS__H
+#define CSR_DEFS__H
+
+#define CSR_MSTATUS_MIE 0x8
+
+#define CSR_IRQ_MASK 0x330
+#define CSR_IRQ_PENDING 0x360
+
+#define CSR_DCACHE_INFO 0xCC0
+
+#endif /* CSR_DEFS__H */
--- /dev/null
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <system.h>
+#include <generated/csr.h>
+#include <generated/soc.h>
+
+static inline unsigned int irq_getie(void)
+{
+ return (csrr(mstatus) & CSR_MSTATUS_MIE) != 0;
+}
+
+static inline void irq_setie(unsigned int ie)
+{
+ if(ie) csrs(mstatus,CSR_MSTATUS_MIE); else csrc(mstatus,CSR_MSTATUS_MIE);
+}
+
+static inline unsigned int irq_getmask(void)
+{
+ unsigned int mask;
+ asm volatile ("csrr %0, %1" : "=r"(mask) : "i"(CSR_IRQ_MASK));
+ return mask;
+}
+
+static inline void irq_setmask(unsigned int mask)
+{
+ asm volatile ("csrw %0, %1" :: "i"(CSR_IRQ_MASK), "r"(mask));
+}
+
+static inline unsigned int irq_pending(void)
+{
+ unsigned int pending;
+ asm volatile ("csrr %0, %1" : "=r"(pending) : "i"(CSR_IRQ_PENDING));
+ return pending;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __IRQ_H */
--- /dev/null
+#ifndef __SYSTEM_H
+#define __SYSTEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void flush_cpu_icache(void);
+void flush_cpu_dcache(void);
+void flush_l2_cache(void);
+
+void busy_wait(unsigned int ms);
+
+#include <csr-defs.h>
+
+#define csrr(reg) ({ unsigned long __tmp; \
+ asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
+ __tmp; })
+
+#define csrw(reg, val) ({ \
+ if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
+ asm volatile ("csrw " #reg ", %0" :: "i"(val)); \
+ else \
+ asm volatile ("csrw " #reg ", %0" :: "r"(val)); })
+
+#define csrs(reg, bit) ({ \
+ if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
+ asm volatile ("csrrs x0, " #reg ", %0" :: "i"(bit)); \
+ else \
+ asm volatile ("csrrs x0, " #reg ", %0" :: "r"(bit)); })
+
+#define csrc(reg, bit) ({ \
+ if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
+ asm volatile ("csrrc x0, " #reg ", %0" :: "i"(bit)); \
+ else \
+ asm volatile ("csrrc x0, " #reg ", %0" :: "r"(bit)); })
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_H */
--- /dev/null
+.section .text, "ax", @progbits
+.global boot_helper
+boot_helper:
+ l.jr r6
+ l.nop
--- /dev/null
+/*
+ * (C) Copyright 2012, Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+
+#include <spr-defs.h>
+
+/*
+ * OR1K Architecture has a 128 byte "red zone" after the stack that can not be
+ * touched by exception handlers. GCC uses this red zone for locals and
+ * temps without needing to change the stack pointer.
+ */
+#define OR1K_RED_ZONE_SIZE 128
+
+/*
+ * We need 4 bytes (32 bits) * 32 registers space on the stack to save all the
+ * registers.
+ */
+#define EXCEPTION_STACK_SIZE ((4*32) + OR1K_RED_ZONE_SIZE)
+
+#define HANDLE_EXCEPTION ; \
+ l.addi r1, r1, -EXCEPTION_STACK_SIZE ; \
+ l.sw 0x1c(r1), r9 ; \
+ l.jal _exception_handler ; \
+ l.nop ; \
+ l.lwz r9, 0x1c(r1) ; \
+ l.addi r1, r1, EXCEPTION_STACK_SIZE ; \
+ l.rfe ; \
+ l.nop
+
+
+.section .text, "ax", @progbits
+.global _start
+_start:
+_reset_handler:
+ l.movhi r0, 0
+ l.movhi r1, 0
+ l.movhi r2, 0
+ l.movhi r3, 0
+ l.movhi r4, 0
+ l.movhi r5, 0
+ l.movhi r6, 0
+ l.movhi r7, 0
+ l.movhi r8, 0
+ l.movhi r9, 0
+ l.movhi r10, 0
+ l.movhi r11, 0
+ l.movhi r12, 0
+ l.movhi r13, 0
+ l.movhi r14, 0
+ l.movhi r15, 0
+ l.movhi r16, 0
+ l.movhi r17, 0
+ l.movhi r18, 0
+ l.movhi r19, 0
+ l.movhi r20, 0
+ l.movhi r21, 0
+ l.movhi r22, 0
+ l.movhi r23, 0
+ l.movhi r24, 0
+ l.movhi r25, 0
+ l.movhi r26, 0
+ l.movhi r27, 0
+ l.movhi r28, 0
+ l.movhi r29, 0
+ l.movhi r30, 0
+ l.movhi r31, 0
+
+ l.ori r21, r0, SPR_SR_SM
+ l.mtspr r0, r21, SPR_SR
+ l.movhi r21, hi(_reset_handler)
+ l.ori r21, r21, lo(_reset_handler)
+ l.mtspr r0, r21, SPR_EVBAR
+ /* enable caches */
+ l.jal _cache_init
+ l.nop
+ l.j _crt0
+ l.nop
+
+ /* bus error */
+ .org 0x200
+ HANDLE_EXCEPTION
+
+ /* data page fault */
+ .org 0x300
+ HANDLE_EXCEPTION
+
+ /* instruction page fault */
+ .org 0x400
+ HANDLE_EXCEPTION
+
+ /* tick timer */
+ .org 0x500
+ HANDLE_EXCEPTION
+
+ /* alignment */
+ .org 0x600
+ HANDLE_EXCEPTION
+
+ /* illegal instruction */
+ .org 0x700
+ HANDLE_EXCEPTION
+
+ /* external interrupt */
+ .org 0x800
+ HANDLE_EXCEPTION
+
+ /* D-TLB miss */
+ .org 0x900
+ HANDLE_EXCEPTION
+
+ /* I-TLB miss */
+ .org 0xa00
+ HANDLE_EXCEPTION
+
+ /* range */
+ .org 0xb00
+ HANDLE_EXCEPTION
+
+ /* system call */
+ .org 0xc00
+ HANDLE_EXCEPTION
+
+ /* floating point */
+ .org 0xd00
+ HANDLE_EXCEPTION
+
+ /* trap */
+ .org 0xe00
+ HANDLE_EXCEPTION
+
+ /* reserved */
+ .org 0xf00
+ HANDLE_EXCEPTION
+
+ .org 0x1000
+_crt0:
+ /* Setup stack and global pointer */
+ l.movhi r1, hi(_fstack)
+ l.ori r1, r1, lo(_fstack)
+
+ /* Clear BSS */
+ l.movhi r21, hi(_fbss)
+ l.ori r21, r21, lo(_fbss)
+ l.movhi r3, hi(_ebss)
+ l.ori r3, r3, lo(_ebss)
+.clearBSS:
+ l.sfeq r21, r3
+ l.bf .callMain
+ l.nop
+ l.sw 0(r21), r0
+ l.addi r21, r21, 4
+ l.j .clearBSS
+ l.nop
+
+.callMain:
+ l.j main
+ l.nop
+
+_exception_handler:
+ l.sw 0x00(r1), r2
+ l.sw 0x04(r1), r3
+ l.sw 0x08(r1), r4
+ l.sw 0x0c(r1), r5
+ l.sw 0x10(r1), r6
+ l.sw 0x14(r1), r7
+ l.sw 0x18(r1), r8
+ l.sw 0x20(r1), r10
+ l.sw 0x24(r1), r11
+ l.sw 0x28(r1), r12
+ l.sw 0x2c(r1), r13
+ l.sw 0x30(r1), r14
+ l.sw 0x34(r1), r15
+ l.sw 0x38(r1), r16
+ l.sw 0x3c(r1), r17
+ l.sw 0x40(r1), r18
+ l.sw 0x44(r1), r19
+ l.sw 0x48(r1), r20
+ l.sw 0x4c(r1), r21
+ l.sw 0x50(r1), r22
+ l.sw 0x54(r1), r23
+ l.sw 0x58(r1), r24
+ l.sw 0x5c(r1), r25
+ l.sw 0x60(r1), r26
+ l.sw 0x64(r1), r27
+ l.sw 0x68(r1), r28
+ l.sw 0x6c(r1), r29
+ l.sw 0x70(r1), r30
+ l.sw 0x74(r1), r31
+
+ /* Save return address */
+ l.or r14, r0, r9
+ /* Calculate exception vector from handler address */
+ l.andi r3, r9, 0xf00
+ l.srli r3, r3, 8
+ /* Pass saved register state */
+ l.or r4, r0, r1
+ /* Extract exception PC */
+ l.mfspr r5, r0, SPR_EPCR_BASE
+ /* Extract exception effective address */
+ l.mfspr r6, r0, SPR_EEAR_BASE
+ /* Extract exception SR */
+ l.mfspr r7, r0, SPR_ESR_BASE
+ /* Call exception handler with the link address as argument */
+ l.jal exception_handler
+ l.nop
+
+ /* Load return address */
+ l.or r9, r0, r14
+ /* Restore state */
+ l.lwz r2, 0x00(r1)
+ l.lwz r3, 0x04(r1)
+ l.lwz r4, 0x08(r1)
+ l.lwz r5, 0x0c(r1)
+ l.lwz r6, 0x10(r1)
+ l.lwz r7, 0x14(r1)
+ l.lwz r8, 0x18(r1)
+ l.lwz r10, 0x20(r1)
+ l.lwz r11, 0x24(r1)
+ l.lwz r12, 0x28(r1)
+ l.lwz r13, 0x2c(r1)
+ l.lwz r14, 0x30(r1)
+ l.lwz r15, 0x34(r1)
+ l.lwz r16, 0x38(r1)
+ l.lwz r17, 0x3c(r1)
+ l.lwz r18, 0x40(r1)
+ l.lwz r19, 0x44(r1)
+ l.lwz r20, 0x48(r1)
+ l.lwz r21, 0x4c(r1)
+ l.lwz r22, 0x50(r1)
+ l.lwz r23, 0x54(r1)
+ l.lwz r24, 0x58(r1)
+ l.lwz r25, 0x5c(r1)
+ l.lwz r26, 0x60(r1)
+ l.lwz r27, 0x64(r1)
+ l.lwz r28, 0x68(r1)
+ l.lwz r29, 0x6c(r1)
+ l.lwz r30, 0x70(r1)
+ l.lwz r31, 0x74(r1)
+ l.jr r9
+ l.nop
+
+.global _cache_init
+_cache_init:
+ /*
+ This function is to be used ONLY during reset, before main() is called.
+ TODO: Perhaps break into individual enable instruction/data cache
+ sections functions, and provide disable functions, also, all
+ callable from C
+ */
+
+ /* Instruction cache enable */
+ /* Check if IC present and skip enabling otherwise */
+#if 1
+.L6:
+ l.mfspr r3,r0,SPR_UPR
+ l.andi r7,r3,SPR_UPR_ICP
+ l.sfeq r7,r0
+ l.bf .L8
+ l.nop
+
+ /* Disable IC */
+ l.mfspr r6,r0,SPR_SR
+ l.addi r5,r0,-1
+ l.xori r5,r5,SPR_SR_ICE
+ l.and r5,r6,r5
+ l.mtspr r0,r5,SPR_SR
+
+ /* Establish cache block size
+ If BS=0, 16;
+ If BS=1, 32;
+ r14 contain block size
+ */
+ l.mfspr r3,r0,SPR_ICCFGR
+ l.andi r7,r3,SPR_ICCFGR_CBS
+ l.srli r8,r7,7
+ l.ori r4,r0,16
+ l.sll r14,r4,r8
+
+ /* Establish number of cache sets
+ r10 contains number of cache sets
+ r8 contains log(# of cache sets)
+ */
+ l.andi r7,r3,SPR_ICCFGR_NCS
+ l.srli r8,r7,3
+ l.ori r4,r0,1
+ l.sll r10,r4,r8
+
+ /* Invalidate IC */
+ l.addi r6,r0,0
+ l.sll r5,r14,r8
+
+.L7: l.mtspr r0,r6,SPR_ICBIR
+ l.sfne r6,r5
+ l.bf .L7
+ l.add r6,r6,r14
+
+ /* Enable IC */
+ l.mfspr r6,r0,SPR_SR
+ l.ori r6,r6,SPR_SR_ICE
+ l.mtspr r0,r6,SPR_SR
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ l.nop
+ /* Data cache enable */
+ /* Check if DC present and skip enabling otherwise */
+#endif
+.L8:
+#if 1
+ l.mfspr r3,r0,SPR_UPR
+ l.andi r7,r3,SPR_UPR_DCP
+ l.sfeq r7,r0
+ l.bf .L10
+ l.nop
+ /* Disable DC */
+ l.mfspr r6,r0,SPR_SR
+ l.addi r5,r0,-1
+ l.xori r5,r5,SPR_SR_DCE
+ l.and r5,r6,r5
+ l.mtspr r0,r5,SPR_SR
+ /* Establish cache block size
+ If BS=0, 16;
+ If BS=1, 32;
+ r14 contain block size
+ */
+ l.mfspr r3,r0,SPR_DCCFGR
+ l.andi r7,r3,SPR_DCCFGR_CBS
+ l.srli r8,r7,7
+ l.ori r4,r0,16
+ l.sll r14,r4,r8
+ /* Establish number of cache sets
+ r10 contains number of cache sets
+ r8 contains log(# of cache sets)
+ */
+ l.andi r7,r3,SPR_DCCFGR_NCS
+ l.srli r8,r7,3
+ l.ori r4,r0,1
+ l.sll r10,r4,r8
+ /* Invalidate DC */
+ l.addi r6,r0,0
+ l.sll r5,r14,r8
+
+.L9:
+ l.mtspr r0,r6,SPR_DCBIR
+ l.sfne r6,r5
+ l.bf .L9
+ l.add r6,r6,r14
+ /* Enable DC */
+ l.mfspr r6,r0,SPR_SR
+ l.ori r6,r6,SPR_SR_DCE
+ l.mtspr r0,r6,SPR_SR
+#endif
+.L10:
+ /* Return */
+ l.jr r9
+ l.nop
--- /dev/null
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <system.h>
+#include <generated/csr.h>
+#include <generated/soc.h>
+
+static inline unsigned int irq_getie(void)
+{
+ return !!(mfspr(SPR_SR) & SPR_SR_IEE);
+}
+
+static inline void irq_setie(unsigned int ie)
+{
+ if (ie & 0x1)
+ mtspr(SPR_SR, mfspr(SPR_SR) | SPR_SR_IEE);
+ else
+ mtspr(SPR_SR, mfspr(SPR_SR) & ~SPR_SR_IEE);
+}
+
+static inline unsigned int irq_getmask(void)
+{
+ return mfspr(SPR_PICMR);
+}
+
+static inline void irq_setmask(unsigned int mask)
+{
+ mtspr(SPR_PICMR, mask);
+}
+
+static inline unsigned int irq_pending(void)
+{
+ return mfspr(SPR_PICSR);
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __IRQ_H */
--- /dev/null
+#ifndef __SYSTEM_H
+#define __SYSTEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void flush_cpu_icache(void);
+void flush_cpu_dcache(void);
+void flush_l2_cache(void);
+
+void busy_wait(unsigned int ms);
+
+#include <spr-defs.h>
+static inline unsigned long mfspr(unsigned long add)
+{
+ unsigned long ret;
+
+ __asm__ __volatile__ ("l.mfspr %0,%1,0" : "=r" (ret) : "r" (add));
+
+ return ret;
+}
+
+static inline void mtspr(unsigned long add, unsigned long val)
+{
+ __asm__ __volatile__ ("l.mtspr %0,%1,0" : : "r" (add), "r" (val));
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_H */
--- /dev/null
+.section .text, "ax", @progbits
+.global boot_helper
+boot_helper:
+ jr x13
--- /dev/null
+/*
+ * Copyright 2018, Serge Bazanski <serge@bazanski.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted.
+ */
+
+#include "extraops.S"
+
+/*
+ * Interrupt vector.
+ */
+.global _start
+_start:
+
+.org 0x00000000 # Reset
+ j _crt0
+
+.org 0x00000010 # IRQ
+_irq_vector:
+ addi sp, sp, -16
+ sw t0, 4(sp)
+ sw ra, 8(sp)
+ /* By convention, q2 holds true IRQ vector, but remains caller-save.
+ We rely on the assumption that compiler-generated code will never touch
+ the QREGs. q3 is truly scratch/caller-save. */
+ picorv32_getq_insn(t0, q2)
+ sw t0, 12(sp)
+
+ jalr t0 // Call the true IRQ vector.
+
+ lw t0, 12(sp)
+ picorv32_setq_insn(q2, t0) // Restore the true IRQ vector.
+ lw ra, 8(sp)
+ lw t0, 4(sp)
+ addi sp, sp, 16
+ picorv32_retirq_insn() // return from interrupt
+
+
+/*
+ * IRQ handler, branched to from the vector.
+ */
+_irq:
+ /* save x1/x2 to q1/q2 */
+ picorv32_setq_insn(q2, x1)
+ picorv32_setq_insn(q3, x2)
+
+ /* use x1 to index into irq_regs */
+ lui x1, %hi(irq_regs)
+ addi x1, x1, %lo(irq_regs)
+
+ /* use x2 as scratch space for saving registers */
+
+ /* q0 (== x1), q2(== x2), q3 */
+ picorv32_getq_insn(x2, q0)
+ sw x2, 0*4(x1)
+ picorv32_getq_insn(x2, q2)
+ sw x2, 1*4(x1)
+ picorv32_getq_insn(x2, q3)
+ sw x2, 2*4(x1)
+
+ /* save x3 - x31 */
+ sw x3, 3*4(x1)
+ sw x4, 4*4(x1)
+ sw x5, 5*4(x1)
+ sw x6, 6*4(x1)
+ sw x7, 7*4(x1)
+ sw x8, 8*4(x1)
+ sw x9, 9*4(x1)
+ sw x10, 10*4(x1)
+ sw x11, 11*4(x1)
+ sw x12, 12*4(x1)
+ sw x13, 13*4(x1)
+ sw x14, 14*4(x1)
+ sw x15, 15*4(x1)
+ sw x16, 16*4(x1)
+ sw x17, 17*4(x1)
+ sw x18, 18*4(x1)
+ sw x19, 19*4(x1)
+ sw x20, 20*4(x1)
+ sw x21, 21*4(x1)
+ sw x22, 22*4(x1)
+ sw x23, 23*4(x1)
+ sw x24, 24*4(x1)
+ sw x25, 25*4(x1)
+ sw x26, 26*4(x1)
+ sw x27, 27*4(x1)
+ sw x28, 28*4(x1)
+ sw x29, 29*4(x1)
+ sw x30, 30*4(x1)
+ sw x31, 31*4(x1)
+
+ /* update _irq_pending to the currently pending interrupts */
+ picorv32_getq_insn(t0, q1)
+ la t1, (_irq_pending)
+ sw t0, 0(t1)
+
+ /* prepare C handler stack */
+ lui sp, %hi(_irq_stack)
+ addi sp, sp, %lo(_irq_stack)
+
+ /* call C handler */
+ jal ra, isr
+
+ /* use x1 to index into irq_regs */
+ lui x1, %hi(irq_regs)
+ addi x1, x1, %lo(irq_regs)
+
+ /* restore q0 - q2 */
+ lw x2, 0*4(x1)
+ picorv32_setq_insn(q0, x2)
+ lw x2, 1*4(x1)
+ picorv32_setq_insn(q1, x2)
+ lw x2, 2*4(x1)
+ picorv32_setq_insn(q2, x2)
+
+ /* restore x3 - x31 */
+ lw x3, 3*4(x1)
+ lw x4, 4*4(x1)
+ lw x5, 5*4(x1)
+ lw x6, 6*4(x1)
+ lw x7, 7*4(x1)
+ lw x8, 8*4(x1)
+ lw x9, 9*4(x1)
+ lw x10, 10*4(x1)
+ lw x11, 11*4(x1)
+ lw x12, 12*4(x1)
+ lw x13, 13*4(x1)
+ lw x14, 14*4(x1)
+ lw x15, 15*4(x1)
+ lw x16, 16*4(x1)
+ lw x17, 17*4(x1)
+ lw x18, 18*4(x1)
+ lw x19, 19*4(x1)
+ lw x20, 20*4(x1)
+ lw x21, 21*4(x1)
+ lw x22, 22*4(x1)
+ lw x23, 23*4(x1)
+ lw x24, 24*4(x1)
+ lw x25, 25*4(x1)
+ lw x26, 26*4(x1)
+ lw x27, 27*4(x1)
+ lw x28, 28*4(x1)
+ lw x29, 29*4(x1)
+ lw x30, 30*4(x1)
+ lw x31, 31*4(x1)
+
+ /* restore x1 - x2 from q registers */
+ picorv32_getq_insn(x1, q1)
+ picorv32_getq_insn(x2, q2)
+ ret
+
+/*
+ * Reset handler, branched to from the vector.
+ */
+_crt0:
+ /* zero-initialize all registers */
+ addi x1, zero, 0
+ addi x2, zero, 0
+ addi x3, zero, 0
+ addi x4, zero, 0
+ addi x5, zero, 0
+ addi x6, zero, 0
+ addi x7, zero, 0
+ addi x8, zero, 0
+ addi x9, zero, 0
+ addi x10, zero, 0
+ addi x11, zero, 0
+ addi x12, zero, 0
+ addi x13, zero, 0
+ addi x14, zero, 0
+ addi x15, zero, 0
+ addi x16, zero, 0
+ addi x17, zero, 0
+ addi x18, zero, 0
+ addi x19, zero, 0
+ addi x20, zero, 0
+ addi x21, zero, 0
+ addi x22, zero, 0
+ addi x23, zero, 0
+ addi x24, zero, 0
+ addi x25, zero, 0
+ addi x26, zero, 0
+ addi x27, zero, 0
+ addi x28, zero, 0
+ addi x29, zero, 0
+ addi x30, zero, 0
+ addi x31, zero, 0
+
+ /* mask all interrupts */
+ li t0, 0xffffffff
+ picorv32_maskirq_insn(zero, t0)
+ /* reflect that in _irq_mask */
+ la t1, _irq_mask
+ sw t0, 0(t1)
+
+#ifdef EXECUTE_IN_PLACE
+ /* Load DATA */
+ la t0, _erodata
+ la t1, _fdata
+ la t2, _edata
+3:
+ lw t3, 0(t0)
+ sw t3, 0(t1)
+ /* _edata is aligned to 16 bytes. Use word-xfers. */
+ addi t0, t0, 4
+ addi t1, t1, 4
+ bltu t1, t2, 3b
+#endif
+
+ /* Clear BSS */
+ la t0, _fbss
+ la t1, _ebss
+2:
+ sw zero, 0(t0)
+ addi t0, t0, 4
+ bltu t0, t1, 2b
+
+ /* set main stack */
+ la sp, _fstack
+
+ /* Set up address to IRQ handler since vector is hardcoded.
+ By convention, q2 keeps the pointer to the true IRQ handler,
+ to emulate relocatable interrupts. */
+ la t0, _irq
+ picorv32_setq_insn(q2, t0)
+
+ /* jump to main */
+ jal ra, main
+
+1:
+ /* loop forever */
+ j 1b
+
+
+/*
+ * Enable interrupts by copying the software mask to the hardware mask
+ */
+.global _irq_enable
+_irq_enable:
+ /* Set _irq_enabled to true */
+ la t0, _irq_enabled
+ addi t1, zero, 1
+ sw t1, 0(t0)
+ /* Set the HW IRQ mask to _irq_mask */
+ la t0, _irq_mask
+ lw t0, 0(t0)
+ picorv32_maskirq_insn(zero, t0)
+ ret
+
+/*
+ * Disable interrupts by masking all interrupts (the mask should already be
+ * up to date)
+ */
+.global _irq_disable
+_irq_disable:
+ /* Mask all IRQs */
+ li t0, 0xffffffff
+ picorv32_maskirq_insn(zero, t0)
+ /* Set _irq_enabled to false */
+ la t0, _irq_enabled
+ sw zero, (t0)
+ ret
+
+/*
+ * Set interrrupt mask.
+ * This updates the software mask (for readback and interrupt inable/disable)
+ * and the hardware mask.
+ * 1 means interrupt is masked (disabled).
+ */
+.global _irq_setmask
+_irq_setmask:
+ /* Update _irq_mask */
+ la t0, _irq_mask
+ sw a0, (t0)
+ /* Are interrupts enabled? */
+ la t0, _irq_enabled
+ lw t0, 0(t0)
+ beq t0, zero, 1f
+ /* If so, update the HW IRQ mask */
+ picorv32_maskirq_insn(zero, a0)
+1:
+ ret
+
+
+.section .bss
+irq_regs:
+ /* saved interrupt registers, x0 - x31 */
+ .fill 32,4
+
+ /* interrupt stack */
+ .fill 256,4
+_irq_stack:
+
+/*
+ * Bitfield of pending interrupts, updated on ISR entry.
+ */
+.global _irq_pending
+_irq_pending:
+ .word 0
+
+/*
+ * Software copy of enabled interrupts. Do not write directly, use
+ * _irq_set_mask instead.
+ */
+.global _irq_mask
+_irq_mask:
+ .word 0
+
+/*
+ * Software state of global interrupts being enabled or disabled. Do not write
+ * directly, use _irq_disable / _irq_enable instead.
+ */
+.global _irq_enabled
+_irq_enabled:
+ .word 0
--- /dev/null
+// This is free and unencumbered software released into the public domain.
+//
+// Anyone is free to copy, modify, publish, use, compile, sell, or
+// distribute this software, either in source code form or as a compiled
+// binary, for any purpose, commercial or non-commercial, and by any
+// means.
+
+#define regnum_q0 0
+#define regnum_q1 1
+#define regnum_q2 2
+#define regnum_q3 3
+
+#define regnum_x0 0
+#define regnum_x1 1
+#define regnum_x2 2
+#define regnum_x3 3
+#define regnum_x4 4
+#define regnum_x5 5
+#define regnum_x6 6
+#define regnum_x7 7
+#define regnum_x8 8
+#define regnum_x9 9
+#define regnum_x10 10
+#define regnum_x11 11
+#define regnum_x12 12
+#define regnum_x13 13
+#define regnum_x14 14
+#define regnum_x15 15
+#define regnum_x16 16
+#define regnum_x17 17
+#define regnum_x18 18
+#define regnum_x19 19
+#define regnum_x20 20
+#define regnum_x21 21
+#define regnum_x22 22
+#define regnum_x23 23
+#define regnum_x24 24
+#define regnum_x25 25
+#define regnum_x26 26
+#define regnum_x27 27
+#define regnum_x28 28
+#define regnum_x29 29
+#define regnum_x30 30
+#define regnum_x31 31
+
+#define regnum_zero 0
+#define regnum_ra 1
+#define regnum_sp 2
+#define regnum_gp 3
+#define regnum_tp 4
+#define regnum_t0 5
+#define regnum_t1 6
+#define regnum_t2 7
+#define regnum_s0 8
+#define regnum_s1 9
+#define regnum_a0 10
+#define regnum_a1 11
+#define regnum_a2 12
+#define regnum_a3 13
+#define regnum_a4 14
+#define regnum_a5 15
+#define regnum_a6 16
+#define regnum_a7 17
+#define regnum_s2 18
+#define regnum_s3 19
+#define regnum_s4 20
+#define regnum_s5 21
+#define regnum_s6 22
+#define regnum_s7 23
+#define regnum_s8 24
+#define regnum_s9 25
+#define regnum_s10 26
+#define regnum_s11 27
+#define regnum_t3 28
+#define regnum_t4 29
+#define regnum_t5 30
+#define regnum_t6 31
+
+// x8 is s0 and also fp
+#define regnum_fp 8
+
+#define r_type_insn(_f7, _rs2, _rs1, _f3, _rd, _opc) \
+.word (((_f7) << 25) | ((_rs2) << 20) | ((_rs1) << 15) | ((_f3) << 12) | ((_rd) << 7) | ((_opc) << 0))
+
+#define picorv32_getq_insn(_rd, _qs) \
+r_type_insn(0b0000000, 0, regnum_ ## _qs, 0b100, regnum_ ## _rd, 0b0001011)
+
+#define picorv32_setq_insn(_qd, _rs) \
+r_type_insn(0b0000001, 0, regnum_ ## _rs, 0b010, regnum_ ## _qd, 0b0001011)
+
+#define picorv32_retirq_insn() \
+r_type_insn(0b0000010, 0, 0, 0b000, 0, 0b0001011)
+
+#define picorv32_maskirq_insn(_rd, _rs) \
+r_type_insn(0b0000011, 0, regnum_ ## _rs, 0b110, regnum_ ## _rd, 0b0001011)
+
+#define picorv32_waitirq_insn(_rd) \
+r_type_insn(0b0000100, 0, 0, 0b100, regnum_ ## _rd, 0b0001011)
+
+#define picorv32_timer_insn(_rd, _rs) \
+r_type_insn(0b0000101, 0, regnum_ ## _rs, 0b110, regnum_ ## _rd, 0b0001011)
--- /dev/null
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <system.h>
+#include <generated/csr.h>
+#include <generated/soc.h>
+
+// PicoRV32 has a very limited interrupt support, implemented via custom
+// instructions. It also doesn't have a global interrupt enable/disable, so
+// we have to emulate it via saving and restoring a mask and using 0/~1 as a
+// hardware mask.
+// Due to all this somewhat low-level mess, all of the glue is implemented in
+// the RiscV crt0, and this header is kept as a thin wrapper. Since interrupts
+// managed by this layer, do not call interrupt instructions directly, as the
+// state will go out of sync with the hardware.
+
+// Read only.
+extern unsigned int _irq_pending;
+// Read only.
+extern unsigned int _irq_mask;
+// Read only.
+extern unsigned int _irq_enabled;
+extern void _irq_enable(void);
+extern void _irq_disable(void);
+extern void _irq_setmask(unsigned int);
+
+static inline unsigned int irq_getie(void)
+{
+ return _irq_enabled != 0;
+}
+
+static inline void irq_setie(unsigned int ie)
+{
+ if (ie & 0x1)
+ _irq_enable();
+ else
+ _irq_disable();
+}
+
+static inline unsigned int irq_getmask(void)
+{
+ // PicoRV32 interrupt mask bits are high-disabled. This is the inverse of how
+ // LiteX sees things.
+ return ~_irq_mask;
+}
+
+static inline void irq_setmask(unsigned int mask)
+{
+ // PicoRV32 interrupt mask bits are high-disabled. This is the inverse of how
+ // LiteX sees things.
+ _irq_setmask(~mask);
+}
+
+static inline unsigned int irq_pending(void)
+{
+ return _irq_pending;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __IRQ_H */
--- /dev/null
+#ifndef __SYSTEM_H
+#define __SYSTEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void flush_cpu_icache(void);
+void flush_cpu_dcache(void);
+void flush_l2_cache(void);
+
+void busy_wait(unsigned int ms);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_H */
--- /dev/null
+.section .text, "ax", @progbits
+.global boot_helper
+boot_helper:
+ jr x13
--- /dev/null
+.global main
+.global isr
+.global _start
+
+_start:
+ j crt_init
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+trap_entry:
+ sd x1, - 1*8(sp)
+ sd x5, - 2*8(sp)
+ sd x6, - 3*8(sp)
+ sd x7, - 4*8(sp)
+ sd x10, - 5*8(sp)
+ sd x11, - 6*8(sp)
+ sd x12, - 7*8(sp)
+ sd x13, - 8*8(sp)
+ sd x14, - 9*8(sp)
+ sd x15, -10*8(sp)
+ sd x16, -11*8(sp)
+ sd x17, -12*8(sp)
+ sd x28, -13*8(sp)
+ sd x29, -14*8(sp)
+ sd x30, -15*8(sp)
+ sd x31, -16*8(sp)
+ addi sp,sp,-16*8
+ call isr
+ ld x1 , 15*8(sp)
+ ld x5, 14*8(sp)
+ ld x6, 13*8(sp)
+ ld x7, 12*8(sp)
+ ld x10, 11*8(sp)
+ ld x11, 10*8(sp)
+ ld x12, 9*8(sp)
+ ld x13, 8*8(sp)
+ ld x14, 7*8(sp)
+ ld x15, 6*8(sp)
+ ld x16, 5*8(sp)
+ ld x17, 4*8(sp)
+ ld x28, 3*8(sp)
+ ld x29, 2*8(sp)
+ ld x30, 1*8(sp)
+ ld x31, 0*8(sp)
+ addi sp,sp,16*8
+ mret
+ .text
+
+
+crt_init:
+ la sp, _fstack + 8
+ la a0, trap_entry
+ csrw mtvec, a0
+
+bss_init:
+ la a0, _fbss
+ la a1, _ebss
+bss_loop:
+ beq a0,a1,bss_done
+ sd zero,0(a0)
+ add a0,a0,8
+ j bss_loop
+bss_done:
+
+ call plic_init // initialize external interrupt controller
+ li a0, 0x800 // external interrupt sources only (using LiteX timer);
+ // NOTE: must still enable mstatus.MIE!
+ csrw mie,a0
+
+ call main
+inf_loop:
+ j inf_loop
--- /dev/null
+#ifndef CSR_DEFS__H
+#define CSR_DEFS__H
+
+#define CSR_MSTATUS_MIE 0x8
+
+#define CSR_DCACHE_INFO 0xCC0
+
+#endif /* CSR_DEFS__H */
--- /dev/null
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <system.h>
+#include <generated/csr.h>
+#include <generated/soc.h>
+
+// The RocketChip uses a Platform-Level Interrupt Controller (PLIC) which
+// is programmed and queried via a set of MMIO registers.
+
+#define PLIC_BASE 0x0c000000L // Base address and per-pin priority array
+#define PLIC_PENDING 0x0c001000L // Bit field matching currently pending pins
+#define PLIC_ENABLED 0x0c002000L // Bit field corresponding to the current mask
+#define PLIC_THRSHLD 0x0c200000L // Per-pin priority must be >= this to trigger
+#define PLIC_CLAIM 0x0c200004L // Claim & completion register address
+
+static inline unsigned int irq_getie(void)
+{
+ return (csrr(mstatus) & CSR_MSTATUS_MIE) != 0;
+}
+
+static inline void irq_setie(unsigned int ie)
+{
+ if(ie) csrs(mstatus,CSR_MSTATUS_MIE); else csrc(mstatus,CSR_MSTATUS_MIE);
+}
+
+static inline unsigned int irq_getmask(void)
+{
+ return *((unsigned int *)PLIC_ENABLED) >> 1;
+}
+
+static inline void irq_setmask(unsigned int mask)
+{
+ *((unsigned int *)PLIC_ENABLED) = mask << 1;
+}
+
+static inline unsigned int irq_pending(void)
+{
+ return *((unsigned int *)PLIC_PENDING) >> 1;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __IRQ_H */
--- /dev/null
+#ifndef __SYSTEM_H
+#define __SYSTEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void flush_cpu_icache(void);
+void flush_cpu_dcache(void);
+void flush_l2_cache(void);
+
+void busy_wait(unsigned int ms);
+
+#include <csr-defs.h>
+
+#define csrr(reg) ({ unsigned long __tmp; \
+ asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
+ __tmp; })
+
+#define csrw(reg, val) ({ \
+ if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
+ asm volatile ("csrw " #reg ", %0" :: "i"(val)); \
+ else \
+ asm volatile ("csrw " #reg ", %0" :: "r"(val)); })
+
+#define csrs(reg, bit) ({ \
+ if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
+ asm volatile ("csrrs x0, " #reg ", %0" :: "i"(bit)); \
+ else \
+ asm volatile ("csrrs x0, " #reg ", %0" :: "r"(bit)); })
+
+#define csrc(reg, bit) ({ \
+ if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
+ asm volatile ("csrrc x0, " #reg ", %0" :: "i"(bit)); \
+ else \
+ asm volatile ("csrrc x0, " #reg ", %0" :: "r"(bit)); })
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_H */
--- /dev/null
+ .section .text, "ax", @progbits
+ .global boot_helper
+boot_helper:
+ jr x13
--- /dev/null
+#define MIE_MEIE 0x800
+
+ .global _start
+_start:
+ j reset_vector
+
+reset_vector:
+ la sp, _fstack
+ la t0, trap_vector
+ csrw mtvec, t0
+
+ // initialize .bss
+ la t0, _fbss
+ la t1, _ebss
+1: beq t0, t1, 2f
+ sw zero, 0(t0)
+ addi t0, t0, 4
+ j 1b
+2:
+ // enable external interrupts
+ li t0, MIE_MEIE
+ csrs mie, t0
+
+ call main
+1: j 1b
+
+trap_vector:
+ addi sp, sp, -16*4
+ sw ra, 0*4(sp)
+ sw t0, 1*4(sp)
+ sw t1, 2*4(sp)
+ sw t2, 3*4(sp)
+ sw a0, 4*4(sp)
+ sw a1, 5*4(sp)
+ sw a2, 6*4(sp)
+ sw a3, 7*4(sp)
+ sw a4, 8*4(sp)
+ sw a5, 9*4(sp)
+ sw a6, 10*4(sp)
+ sw a7, 11*4(sp)
+ sw t3, 12*4(sp)
+ sw t4, 13*4(sp)
+ sw t5, 14*4(sp)
+ sw t6, 15*4(sp)
+ call isr
+ lw ra, 0*4(sp)
+ lw t0, 1*4(sp)
+ lw t1, 2*4(sp)
+ lw t2, 3*4(sp)
+ lw a0, 4*4(sp)
+ lw a1, 5*4(sp)
+ lw a2, 6*4(sp)
+ lw a3, 7*4(sp)
+ lw a4, 8*4(sp)
+ lw a5, 9*4(sp)
+ lw a6, 10*4(sp)
+ lw a7, 11*4(sp)
+ lw t3, 12*4(sp)
+ lw t4, 13*4(sp)
+ lw t5, 14*4(sp)
+ lw t6, 15*4(sp)
+ addi sp, sp, 16*4
+ mret
--- /dev/null
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#endif /* __IRQ_H */
--- /dev/null
+#ifndef __SYSTEM_H
+#define __SYSTEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void flush_cpu_icache(void);
+void flush_cpu_dcache(void);
+void flush_l2_cache(void);
+
+void busy_wait(unsigned int ms);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_H */
--- /dev/null
+.section .text, "ax", @progbits
+.global boot_helper
+boot_helper:
+ jr x13
--- /dev/null
+.global main
+.global isr
+.global _start
+
+_start:
+ j crt_init
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+ nop
+
+.global trap_entry
+trap_entry:
+ sw x1, - 1*4(sp)
+ sw x5, - 2*4(sp)
+ sw x6, - 3*4(sp)
+ sw x7, - 4*4(sp)
+ sw x10, - 5*4(sp)
+ sw x11, - 6*4(sp)
+ sw x12, - 7*4(sp)
+ sw x13, - 8*4(sp)
+ sw x14, - 9*4(sp)
+ sw x15, -10*4(sp)
+ sw x16, -11*4(sp)
+ sw x17, -12*4(sp)
+ sw x28, -13*4(sp)
+ sw x29, -14*4(sp)
+ sw x30, -15*4(sp)
+ sw x31, -16*4(sp)
+ addi sp,sp,-16*4
+ call isr
+ lw x1 , 15*4(sp)
+ lw x5, 14*4(sp)
+ lw x6, 13*4(sp)
+ lw x7, 12*4(sp)
+ lw x10, 11*4(sp)
+ lw x11, 10*4(sp)
+ lw x12, 9*4(sp)
+ lw x13, 8*4(sp)
+ lw x14, 7*4(sp)
+ lw x15, 6*4(sp)
+ lw x16, 5*4(sp)
+ lw x17, 4*4(sp)
+ lw x28, 3*4(sp)
+ lw x29, 2*4(sp)
+ lw x30, 1*4(sp)
+ lw x31, 0*4(sp)
+ addi sp,sp,16*4
+ mret
+ .text
+
+
+crt_init:
+ la sp, _fstack + 4
+ la a0, trap_entry
+ csrw mtvec, a0
+
+bss_init:
+ la a0, _fbss
+ la a1, _ebss
+bss_loop:
+ beq a0,a1,bss_done
+ sw zero,0(a0)
+ add a0,a0,4
+ j bss_loop
+bss_done:
+
+ li a0, 0x880 //880 enable timer + external interrupt sources (until mstatus.MIE is set, they will never trigger an interrupt)
+ csrw mie,a0
+
+ call main
+infinit_loop:
+ j infinit_loop
--- /dev/null
+#ifndef CSR_DEFS__H
+#define CSR_DEFS__H
+
+#define CSR_MSTATUS_MIE 0x8
+
+#define CSR_IRQ_MASK 0xBC0
+#define CSR_IRQ_PENDING 0xFC0
+
+#define CSR_DCACHE_INFO 0xCC0
+
+#endif /* CSR_DEFS__H */
--- /dev/null
+#ifndef __IRQ_H
+#define __IRQ_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <system.h>
+#include <generated/csr.h>
+#include <generated/soc.h>
+
+static inline unsigned int irq_getie(void)
+{
+ return (csrr(mstatus) & CSR_MSTATUS_MIE) != 0;
+}
+
+static inline void irq_setie(unsigned int ie)
+{
+ if(ie) csrs(mstatus,CSR_MSTATUS_MIE); else csrc(mstatus,CSR_MSTATUS_MIE);
+}
+
+static inline unsigned int irq_getmask(void)
+{
+ unsigned int mask;
+ asm volatile ("csrr %0, %1" : "=r"(mask) : "i"(CSR_IRQ_MASK));
+ return mask;
+}
+
+static inline void irq_setmask(unsigned int mask)
+{
+ asm volatile ("csrw %0, %1" :: "i"(CSR_IRQ_MASK), "r"(mask));
+}
+
+static inline unsigned int irq_pending(void)
+{
+ unsigned int pending;
+ asm volatile ("csrr %0, %1" : "=r"(pending) : "i"(CSR_IRQ_PENDING));
+ return pending;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __IRQ_H */
--- /dev/null
+#ifndef __SYSTEM_H
+#define __SYSTEM_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void flush_cpu_icache(void);
+void flush_cpu_dcache(void);
+void flush_l2_cache(void);
+
+void busy_wait(unsigned int ms);
+
+#include <csr-defs.h>
+
+#define csrr(reg) ({ unsigned long __tmp; \
+ asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
+ __tmp; })
+
+#define csrw(reg, val) ({ \
+ if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
+ asm volatile ("csrw " #reg ", %0" :: "i"(val)); \
+ else \
+ asm volatile ("csrw " #reg ", %0" :: "r"(val)); })
+
+#define csrs(reg, bit) ({ \
+ if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
+ asm volatile ("csrrs x0, " #reg ", %0" :: "i"(bit)); \
+ else \
+ asm volatile ("csrrs x0, " #reg ", %0" :: "r"(bit)); })
+
+#define csrc(reg, bit) ({ \
+ if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
+ asm volatile ("csrrc x0, " #reg ", %0" :: "i"(bit)); \
+ else \
+ asm volatile ("csrrc x0, " #reg ", %0" :: "r"(bit)); })
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __SYSTEM_H */
import os
import json
+import inspect
from shutil import which
from sysconfig import get_platform
("CPU", cpu.name),
("CPUFLAGS", flags),
("CPUENDIANNESS", cpu.endianness),
- ("CLANG", str(int(clang)))
+ ("CLANG", str(int(clang))),
+ ("CPU_DIRECTORY", os.path.dirname(inspect.getfile(cpu.__class__))),
]
include $(SOC_DIRECTORY)/software/common.mak
ifeq ($(CPU),blackparrot)
-BP_LIBS = -L$(BP_EXTERNAL_DIR)/lib/gcc/riscv64-unknown-elf/8.3.0
+BP_LIBS = -L$(BP_EXTERNAL_DIR)/lib/gcc/riscv64-unknown-elf/8.3.0
BP_FLAGS = -lgcc
endif
# Permit TFTP_SERVER_PORT override from shell environment / command line
CFLAGS += -DTFTP_SERVER_PORT=$(TFTP_SERVER_PORT)
endif
-OBJECTS=isr.o sdram.o sdcard.o main.o boot-helper-$(CPU).o boot.o
+OBJECTS=isr.o sdram.o sdcard.o main.o boot-helper.o boot.o
all: bios.bin
$(PYTHON) -m litex.soc.software.memusage bios.elf $(CURDIR)/../include/generated/regions.ld $(TRIPLE)
bios.elf: $(BIOS_DIRECTORY)/linker.ld $(OBJECTS)
-%.elf: ../libbase/crt0-$(CPU)-ctr.o ../libnet/libnet.a ../libbase/libbase-nofloat.a ../libcompiler_rt/libcompiler_rt.a
+%.elf: ../libbase/crt0-ctr.o ../libnet/libnet.a ../libbase/libbase-nofloat.a ../libcompiler_rt/libcompiler_rt.a
$(LD) $(LDFLAGS) -T $(BIOS_DIRECTORY)/linker.ld -N -o $@ \
- ../libbase/crt0-$(CPU)-ctr.o \
+ ../libbase/crt0-ctr.o \
$(OBJECTS) \
-L../libnet \
-L../libbase \
$(BP_LIBS) \
-lnet -lbase-nofloat -lcompiler_rt \
$(BP_FLAGS)
-
+
ifneq ($(OS),Windows_NT)
chmod -x $@
endif
%.o: $(BIOS_DIRECTORY)/%.S
$(assemble)
+boot-helper.o: $(CPU_DIRECTORY)/boot-helper.S
+ cp $(CPU_DIRECTORY)/boot-helper.S $(BIOS_DIRECTORY)/boot-helper.S
+ $(assemble)
+
clean:
$(RM) $(OBJECTS) bios.elf bios.bin .*~ *~
+++ /dev/null
-.section .text, "ax", @progbits
-.global boot_helper
-boot_helper:
- jr x13
+++ /dev/null
-.section .text, "ax", @progbits
-.global boot_helper
-boot_helper:
- call r4
+++ /dev/null
-.section .text, "ax", @progbits
-.global boot_helper
-boot_helper:
- nop # FIXME
+++ /dev/null
- .section .text, "ax", @progbits
- .global boot_helper
-boot_helper:
- jr x13
+++ /dev/null
-.section .text, "ax", @progbits
-.global boot_helper
-boot_helper:
- l.jr r6
- l.nop
+++ /dev/null
-.section .text, "ax", @progbits
-.global boot_helper
-boot_helper:
- jr x13
+++ /dev/null
-.section .text, "ax", @progbits
-.global boot_helper
-boot_helper:
- jr x13
+++ /dev/null
- .section .text, "ax", @progbits
- .global boot_helper
-boot_helper:
- jr x13
+++ /dev/null
-.section .text, "ax", @progbits
-.global boot_helper
-boot_helper:
- jr x13
__attribute__((unused)) static void cdelay(int i)
{
+ /* FIXME: move nop definitions to CPUs */
while(i > 0) {
#if defined (__lm32__)
__asm__ volatile("nop");
# Toolchain options
#
-INCLUDES = -I$(SOC_DIRECTORY)/software/include/base -I$(SOC_DIRECTORY)/software/include -I$(SOC_DIRECTORY)/common -I$(BUILDINC_DIRECTORY)
+INCLUDES = -I$(SOC_DIRECTORY)/software/include/base -I$(SOC_DIRECTORY)/software/include -I$(SOC_DIRECTORY)/common -I$(BUILDINC_DIRECTORY) -I$(CPU_DIRECTORY)
COMMONFLAGS = $(DEPFLAGS) -Os $(CPUFLAGS) -g3 -fomit-frame-pointer -Wall -fno-builtin -nostdinc $(INCLUDES)
CFLAGS = $(COMMONFLAGS) -fexceptions -Wstrict-prototypes -Wold-style-definition -Wmissing-prototypes
CXXFLAGS = $(COMMONFLAGS) -std=c++11 -I$(SOC_DIRECTORY)/software/include/basec++ -fexceptions -fno-rtti -ffreestanding
+++ /dev/null
-#ifndef CSR_DEFS__H
-#define CSR_DEFS__H
-
-#define CSR_MSTATUS_MIE 0x8
-
-#if defined (__vexriscv__)
-#define CSR_IRQ_MASK 0xBC0
-#define CSR_IRQ_PENDING 0xFC0
-#endif
-
-#if defined (__minerva__)
-#define CSR_IRQ_MASK 0x330
-#define CSR_IRQ_PENDING 0x360
-#endif
-
-#define CSR_DCACHE_INFO 0xCC0
-
-#endif /* CSR_DEFS__H */
+++ /dev/null
-#ifndef __IRQ_H
-#define __IRQ_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <system.h>
-#include <generated/csr.h>
-#include <generated/soc.h>
-
-#ifdef CONFIG_CPU_HAS_INTERRUPT
-
-#ifdef __picorv32__
-// PicoRV32 has a very limited interrupt support, implemented via custom
-// instructions. It also doesn't have a global interrupt enable/disable, so
-// we have to emulate it via saving and restoring a mask and using 0/~1 as a
-// hardware mask.
-// Due to all this somewhat low-level mess, all of the glue is implemented in
-// the RiscV crt0, and this header is kept as a thin wrapper. Since interrupts
-// managed by this layer, do not call interrupt instructions directly, as the
-// state will go out of sync with the hardware.
-
-// Read only.
-extern unsigned int _irq_pending;
-// Read only.
-extern unsigned int _irq_mask;
-// Read only.
-extern unsigned int _irq_enabled;
-extern void _irq_enable(void);
-extern void _irq_disable(void);
-extern void _irq_setmask(unsigned int);
-#endif
-
-#ifdef __rocket__
-// The RocketChip uses a Platform-Level Interrupt Controller (PLIC) which
-// is programmed and queried via a set of MMIO registers.
-
-#define PLIC_BASE 0x0c000000L // Base address and per-pin priority array
-#define PLIC_PENDING 0x0c001000L // Bit field matching currently pending pins
-#define PLIC_ENABLED 0x0c002000L // Bit field corresponding to the current mask
-#define PLIC_THRSHLD 0x0c200000L // Per-pin priority must be >= this to trigger
-#define PLIC_CLAIM 0x0c200004L // Claim & completion register address
-#endif /* __rocket__ */
-
-
-#ifdef __blackparrot__
-// The RocketChip uses a Platform-Level Interrupt Controller (PLIC) which
-// is programmed and queried via a set of MMIO registers.
-// TODO: How about Blackparrot? Should be probably included in linux version
-
-#define PLIC_BASE 0x0c000000L // Base address and per-pin priority array
-#define PLIC_PENDING 0x0c001000L // Bit field matching currently pending pins
-#define PLIC_ENABLED 0x0c002000L // Bit field corresponding to the current mask
-#define PLIC_THRSHLD 0x0c200000L // Per-pin priority must be >= this to trigger
-#define PLIC_CLAIM 0x0c200004L // Claim & completion register address
-#endif /* __blackparrot__ */
-
-
-
-static inline unsigned int irq_getie(void)
-{
-#if defined (__lm32__)
- unsigned int ie;
- __asm__ __volatile__("rcsr %0, IE" : "=r" (ie));
- return ie;
-#elif defined (__or1k__)
- return !!(mfspr(SPR_SR) & SPR_SR_IEE);
-#elif defined (__picorv32__)
- return _irq_enabled != 0;
-#elif defined (__vexriscv__)
- return (csrr(mstatus) & CSR_MSTATUS_MIE) != 0;
-#elif defined (__minerva__)
- return (csrr(mstatus) & CSR_MSTATUS_MIE) != 0;
-#elif defined (__rocket__)
- return (csrr(mstatus) & CSR_MSTATUS_MIE) != 0;
-#elif defined (__blackparrot__)
- return (csrr(mstatus) & CSR_MSTATUS_MIE) != 0; /* FIXME */
-#else
-#error Unsupported architecture
-#endif
-}
-
-static inline void irq_setie(unsigned int ie)
-{
-#if defined (__lm32__)
- __asm__ __volatile__("wcsr IE, %0" : : "r" (ie));
-#elif defined (__or1k__)
- if (ie & 0x1)
- mtspr(SPR_SR, mfspr(SPR_SR) | SPR_SR_IEE);
- else
- mtspr(SPR_SR, mfspr(SPR_SR) & ~SPR_SR_IEE);
-#elif defined (__picorv32__)
- if (ie & 0x1)
- _irq_enable();
- else
- _irq_disable();
-#elif defined (__vexriscv__)
- if(ie) csrs(mstatus,CSR_MSTATUS_MIE); else csrc(mstatus,CSR_MSTATUS_MIE);
-#elif defined (__minerva__)
- if(ie) csrs(mstatus,CSR_MSTATUS_MIE); else csrc(mstatus,CSR_MSTATUS_MIE);
-#elif defined (__rocket__)
- if(ie) csrs(mstatus,CSR_MSTATUS_MIE); else csrc(mstatus,CSR_MSTATUS_MIE);
-#elif defined (__blackparrot__)
- if(ie) csrs(mstatus,CSR_MSTATUS_MIE); else csrc(mstatus,CSR_MSTATUS_MIE); /* FIXME */
-#else
-#error Unsupported architecture
-#endif
-}
-
-static inline unsigned int irq_getmask(void)
-{
-#if defined (__lm32__)
- unsigned int mask;
- __asm__ __volatile__("rcsr %0, IM" : "=r" (mask));
- return mask;
-#elif defined (__or1k__)
- return mfspr(SPR_PICMR);
-#elif defined (__picorv32__)
- // PicoRV32 interrupt mask bits are high-disabled. This is the inverse of how
- // LiteX sees things.
- return ~_irq_mask;
-#elif defined (__vexriscv__)
- unsigned int mask;
- asm volatile ("csrr %0, %1" : "=r"(mask) : "i"(CSR_IRQ_MASK));
- return mask;
-#elif defined (__minerva__)
- unsigned int mask;
- asm volatile ("csrr %0, %1" : "=r"(mask) : "i"(CSR_IRQ_MASK));
- return mask;
-#elif defined (__rocket__)
- return *((unsigned int *)PLIC_ENABLED) >> 1;
-#elif defined (__blackparrot__)
- return 0; /* FIXME */
-#else
-#error Unsupported architecture
-#endif
-}
-
-static inline void irq_setmask(unsigned int mask)
-{
-#if defined (__lm32__)
- __asm__ __volatile__("wcsr IM, %0" : : "r" (mask));
-#elif defined (__or1k__)
- mtspr(SPR_PICMR, mask);
-#elif defined (__picorv32__)
- // PicoRV32 interrupt mask bits are high-disabled. This is the inverse of how
- // LiteX sees things.
- _irq_setmask(~mask);
-#elif defined (__vexriscv__)
- asm volatile ("csrw %0, %1" :: "i"(CSR_IRQ_MASK), "r"(mask));
-#elif defined (__minerva__)
- asm volatile ("csrw %0, %1" :: "i"(CSR_IRQ_MASK), "r"(mask));
-#elif defined (__rocket__)
- *((unsigned int *)PLIC_ENABLED) = mask << 1;
-#elif defined (__blackparrot__)
- /* FIXME */
-#else
-#error Unsupported architecture
-#endif
-}
-
-static inline unsigned int irq_pending(void)
-{
-#if defined (__lm32__)
- unsigned int pending;
- __asm__ __volatile__("rcsr %0, IP" : "=r" (pending));
- return pending;
-#elif defined (__or1k__)
- return mfspr(SPR_PICSR);
-#elif defined (__picorv32__)
- return _irq_pending;
-#elif defined (__vexriscv__)
- unsigned int pending;
- asm volatile ("csrr %0, %1" : "=r"(pending) : "i"(CSR_IRQ_PENDING));
- return pending;
-#elif defined (__minerva__)
- unsigned int pending;
- asm volatile ("csrr %0, %1" : "=r"(pending) : "i"(CSR_IRQ_PENDING));
- return pending;
-#elif defined (__rocket__)
- return *((unsigned int *)PLIC_PENDING) >> 1;
-#elif defined (__blackparrot__)
- return csr_readl(PLIC_PENDING) >> 1; /* FIXME */
-#else
-#error Unsupported architecture
-#endif
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
-
-#endif /* __IRQ_H */
+++ /dev/null
-// This is free and unencumbered software released into the public domain.
-//
-// Anyone is free to copy, modify, publish, use, compile, sell, or
-// distribute this software, either in source code form or as a compiled
-// binary, for any purpose, commercial or non-commercial, and by any
-// means.
-
-#define regnum_q0 0
-#define regnum_q1 1
-#define regnum_q2 2
-#define regnum_q3 3
-
-#define regnum_x0 0
-#define regnum_x1 1
-#define regnum_x2 2
-#define regnum_x3 3
-#define regnum_x4 4
-#define regnum_x5 5
-#define regnum_x6 6
-#define regnum_x7 7
-#define regnum_x8 8
-#define regnum_x9 9
-#define regnum_x10 10
-#define regnum_x11 11
-#define regnum_x12 12
-#define regnum_x13 13
-#define regnum_x14 14
-#define regnum_x15 15
-#define regnum_x16 16
-#define regnum_x17 17
-#define regnum_x18 18
-#define regnum_x19 19
-#define regnum_x20 20
-#define regnum_x21 21
-#define regnum_x22 22
-#define regnum_x23 23
-#define regnum_x24 24
-#define regnum_x25 25
-#define regnum_x26 26
-#define regnum_x27 27
-#define regnum_x28 28
-#define regnum_x29 29
-#define regnum_x30 30
-#define regnum_x31 31
-
-#define regnum_zero 0
-#define regnum_ra 1
-#define regnum_sp 2
-#define regnum_gp 3
-#define regnum_tp 4
-#define regnum_t0 5
-#define regnum_t1 6
-#define regnum_t2 7
-#define regnum_s0 8
-#define regnum_s1 9
-#define regnum_a0 10
-#define regnum_a1 11
-#define regnum_a2 12
-#define regnum_a3 13
-#define regnum_a4 14
-#define regnum_a5 15
-#define regnum_a6 16
-#define regnum_a7 17
-#define regnum_s2 18
-#define regnum_s3 19
-#define regnum_s4 20
-#define regnum_s5 21
-#define regnum_s6 22
-#define regnum_s7 23
-#define regnum_s8 24
-#define regnum_s9 25
-#define regnum_s10 26
-#define regnum_s11 27
-#define regnum_t3 28
-#define regnum_t4 29
-#define regnum_t5 30
-#define regnum_t6 31
-
-// x8 is s0 and also fp
-#define regnum_fp 8
-
-#define r_type_insn(_f7, _rs2, _rs1, _f3, _rd, _opc) \
-.word (((_f7) << 25) | ((_rs2) << 20) | ((_rs1) << 15) | ((_f3) << 12) | ((_rd) << 7) | ((_opc) << 0))
-
-#define picorv32_getq_insn(_rd, _qs) \
-r_type_insn(0b0000000, 0, regnum_ ## _qs, 0b100, regnum_ ## _rd, 0b0001011)
-
-#define picorv32_setq_insn(_qd, _rs) \
-r_type_insn(0b0000001, 0, regnum_ ## _rs, 0b010, regnum_ ## _qd, 0b0001011)
-
-#define picorv32_retirq_insn() \
-r_type_insn(0b0000010, 0, 0, 0b000, 0, 0b0001011)
-
-#define picorv32_maskirq_insn(_rd, _rs) \
-r_type_insn(0b0000011, 0, regnum_ ## _rs, 0b110, regnum_ ## _rd, 0b0001011)
-
-#define picorv32_waitirq_insn(_rd) \
-r_type_insn(0b0000100, 0, 0, 0b100, regnum_ ## _rd, 0b0001011)
-
-#define picorv32_timer_insn(_rd, _rs) \
-r_type_insn(0b0000101, 0, regnum_ ## _rs, 0b110, regnum_ ## _rd, 0b0001011)
+++ /dev/null
-#ifndef __SYSTEM_H
-#define __SYSTEM_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-void flush_cpu_icache(void);
-void flush_cpu_dcache(void);
-void flush_l2_cache(void);
-
-void busy_wait(unsigned int ms);
-
-#ifdef __or1k__
-#include <spr-defs.h>
-static inline unsigned long mfspr(unsigned long add)
-{
- unsigned long ret;
-
- __asm__ __volatile__ ("l.mfspr %0,%1,0" : "=r" (ret) : "r" (add));
-
- return ret;
-}
-
-static inline void mtspr(unsigned long add, unsigned long val)
-{
- __asm__ __volatile__ ("l.mtspr %0,%1,0" : : "r" (add), "r" (val));
-}
-#endif
-
-#if defined(__vexriscv__) || defined(__minerva__) || defined(__rocket__) || defined(__blackparrot__)
-#include <csr-defs.h>
-#define csrr(reg) ({ unsigned long __tmp; \
- asm volatile ("csrr %0, " #reg : "=r"(__tmp)); \
- __tmp; })
-
-#define csrw(reg, val) ({ \
- if (__builtin_constant_p(val) && (unsigned long)(val) < 32) \
- asm volatile ("csrw " #reg ", %0" :: "i"(val)); \
- else \
- asm volatile ("csrw " #reg ", %0" :: "r"(val)); })
-
-#define csrs(reg, bit) ({ \
- if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
- asm volatile ("csrrs x0, " #reg ", %0" :: "i"(bit)); \
- else \
- asm volatile ("csrrs x0, " #reg ", %0" :: "r"(bit)); })
-
-#define csrc(reg, bit) ({ \
- if (__builtin_constant_p(bit) && (unsigned long)(bit) < 32) \
- asm volatile ("csrrc x0, " #reg ", %0" :: "i"(bit)); \
- else \
- asm volatile ("csrrc x0, " #reg ", %0" :: "r"(bit)); })
-#endif
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif /* __SYSTEM_H */
OBJECTS=exception.o libc.o errno.o crc16.o crc32.o console.o \
system.o id.o uart.o time.o qsort.o strtod.o spiflash.o spisdcard.o strcasecmp.o mdio.o
-all: crt0-$(CPU)-ctr.o crt0-$(CPU)-xip.o libbase.a libbase-nofloat.a
+all: crt0-ctr.o crt0-xip.o libbase.a libbase-nofloat.a
libbase.a: $(OBJECTS) vsnprintf.o
$(AR) crs libbase.a $(OBJECTS) vsnprintf.o
%.o: $(LIBBASE_DIRECTORY)/%.S
$(assemble)
-crt0-$(CPU)-ctr.o: $(LIBBASE_DIRECTORY)/crt0-$(CPU).S
+crt0-ctr.o: $(CPU_DIRECTORY)/crt0.S
+ cp $(CPU_DIRECTORY)/crt0.S $(LIBBASE_DIRECTORY)/crt0-crt.S
$(assemble)
-crt0-$(CPU)-xip.o: $(LIBBASE_DIRECTORY)/crt0-$(CPU).S
+crt0-xip.o: $(CPU_DIRECTORY)/crt0.S
+ cp $(CPU_DIRECTORY)/crt0.S $(LIBBASE_DIRECTORY)/crt0-xip.S
$(CC) -c -DEXECUTE_IN_PLACE $(CFLAGS) -o $@ $<
.PHONY: all clean
+++ /dev/null
-.global main
-.global isr
-.global _start
-
-_start:
- j crt_init
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-
-trap_entry:
- sd x1, - 1*8(sp)
- sd x5, - 2*8(sp)
- sd x6, - 3*8(sp)
- sd x7, - 4*8(sp)
- sd x10, - 5*8(sp)
- sd x11, - 6*8(sp)
- sd x12, - 7*8(sp)
- sd x13, - 8*8(sp)
- sd x14, - 9*8(sp)
- sd x15, -10*8(sp)
- sd x16, -11*8(sp)
- sd x17, -12*8(sp)
- sd x28, -13*8(sp)
- sd x29, -14*8(sp)
- sd x30, -15*8(sp)
- sd x31, -16*8(sp)
- addi sp,sp,-16*8
- call isr
- ld x1 , 15*8(sp)
- ld x5, 14*8(sp)
- ld x6, 13*8(sp)
- ld x7, 12*8(sp)
- ld x10, 11*8(sp)
- ld x11, 10*8(sp)
- ld x12, 9*8(sp)
- ld x13, 8*8(sp)
- ld x14, 7*8(sp)
- ld x15, 6*8(sp)
- ld x16, 5*8(sp)
- ld x17, 4*8(sp)
- ld x28, 3*8(sp)
- ld x29, 2*8(sp)
- ld x30, 1*8(sp)
- ld x31, 0*8(sp)
- addi sp,sp,16*8
- mret
- .text
-
-
-crt_init:
- la sp, _fstack + 8
- la a0, trap_entry
- csrw mtvec, a0
-
-bss_init:
- la a0, _fbss
- la a1, _ebss
-bss_loop:
- beq a0,a1,bss_done
- sd zero,0(a0)
- add a0,a0,8
- j bss_loop
-bss_done:
-
-// call plic_init // initialize external interrupt controller
-# li a0, 0x800 // external interrupt sources only (using LiteX timer);
- // NOTE: must still enable mstatus.MIE!
- csrw mie,a0
-
- call main
-inf_loop:
- j inf_loop
+++ /dev/null
-/*
- * LatticeMico32 C startup code.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-/* Exception handlers - Must be 32 bytes long. */
-.section .text, "ax", @progbits
-.global _start
-_start:
-_reset_handler:
- xor r0, r0, r0
- wcsr IE, r0
- mvhi r1, hi(_reset_handler)
- ori r1, r1, lo(_reset_handler)
- wcsr EBA, r1
- bi _crt0
- nop
- nop
-
-_breakpoint_handler:
- bi _breakpoint_handler
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-
-_instruction_bus_error_handler:
- bi _instruction_bus_error_handler
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-
-_watchpoint_hander:
- bi _watchpoint_hander
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-
-_data_bus_error_handler:
- bi _data_bus_error_handler
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-
-_divide_by_zero_handler:
- bi _divide_by_zero_handler
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-
-_interrupt_handler:
- sw (sp+0), ra
- calli .save_all
- calli isr
- bi .restore_all_and_eret
- nop
- nop
- nop
- nop
-
-_syscall_handler:
- bi _syscall_handler
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-
-_crt0:
- /* Setup stack and global pointer */
- mvhi sp, hi(_fstack)
- ori sp, sp, lo(_fstack)
-
-#ifdef EXECUTE_IN_PLACE
- /* Load DATA */
- mvhi r1, hi(_erodata)
- ori r1, r1, lo(_erodata)
- mvhi r2, hi(_fdata)
- ori r2, r2, lo(_fdata)
- mvhi r3, hi(_edata)
- ori r3, r3, lo(_edata)
-.moveDATA:
- be r2, r3, .doBSS
- lw r4, (r1+0)
- sw (r2+0), r4
- /* _edata is aligned to 16 bytes. Use word-xfers. */
- addi r1, r1, 4
- addi r2, r2, 4
- bi .moveDATA
-#endif
-
-.doBSS:
- /* Clear BSS */
- mvhi r1, hi(_fbss)
- ori r1, r1, lo(_fbss)
- mvhi r3, hi(_ebss)
- ori r3, r3, lo(_ebss)
-.clearBSS:
- be r1, r3, .callMain
- sw (r1+0), r0
- addi r1, r1, 4
- bi .clearBSS
-
-.callMain:
- bi main
-
-.save_all:
- addi sp, sp, -56
- sw (sp+4), r1
- sw (sp+8), r2
- sw (sp+12), r3
- sw (sp+16), r4
- sw (sp+20), r5
- sw (sp+24), r6
- sw (sp+28), r7
- sw (sp+32), r8
- sw (sp+36), r9
- sw (sp+40), r10
- sw (sp+48), ea
- sw (sp+52), ba
- /* ra needs to be moved from initial stack location */
- lw r1, (sp+56)
- sw (sp+44), r1
- ret
-
-.restore_all_and_eret:
- lw r1, (sp+4)
- lw r2, (sp+8)
- lw r3, (sp+12)
- lw r4, (sp+16)
- lw r5, (sp+20)
- lw r6, (sp+24)
- lw r7, (sp+28)
- lw r8, (sp+32)
- lw r9, (sp+36)
- lw r10, (sp+40)
- lw ra, (sp+44)
- lw ea, (sp+48)
- lw ba, (sp+52)
- addi sp, sp, 56
- eret
+++ /dev/null
-/* Copyright 2013-2014 IBM Corp.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- * implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#define STACK_TOP 0xffff4000
-
-#define FIXUP_ENDIAN \
- tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \
- b 191f; /* Skip trampoline if endian is good */ \
- .long 0xa600607d; /* mfmsr r11 */ \
- .long 0x01006b69; /* xori r11,r11,1 */ \
- .long 0x05009f42; /* bcl 20,31,$+4 */ \
- .long 0xa602487d; /* mflr r10 */ \
- .long 0x14004a39; /* addi r10,r10,20 */ \
- .long 0xa64b5a7d; /* mthsrr0 r10 */ \
- .long 0xa64b7b7d; /* mthsrr1 r11 */ \
- .long 0x2402004c; /* hrfid */ \
-191:
-
-
-/* Load an immediate 64-bit value into a register */
-#define LOAD_IMM64(r, e) \
- lis r,(e)@highest; \
- ori r,r,(e)@higher; \
- rldicr r,r, 32, 31; \
- oris r,r, (e)@h; \
- ori r,r, (e)@l;
-
- .section ".head","ax"
-
- . = 0
-.global _start
-_start:
- FIXUP_ENDIAN
-
- /* setup stack */
- LOAD_IMM64(%r1, STACK_TOP - 0x100)
- LOAD_IMM64(%r12, main)
- mtctr %r12,
- bctrl
- ba 0
-
- /* XXX: litedram init should not take exceptions, maybe we could get
- * rid of these to save space, along with a core tweak to suppress
- * exceptions in case they happen (just terminate ?)
- */
-
-#define EXCEPTION(nr) \
- .= nr; \
- b .
-
- /* More exception stubs */
- EXCEPTION(0x100)
- EXCEPTION(0x200)
- EXCEPTION(0x300)
- EXCEPTION(0x380)
- EXCEPTION(0x400)
- EXCEPTION(0x480)
- EXCEPTION(0x500)
- EXCEPTION(0x600)
- EXCEPTION(0x700)
- EXCEPTION(0x800)
- EXCEPTION(0x900)
- EXCEPTION(0x980)
- EXCEPTION(0xa00)
- EXCEPTION(0xb00)
- EXCEPTION(0xc00)
- EXCEPTION(0xd00)
- EXCEPTION(0xe00)
- EXCEPTION(0xe20)
- EXCEPTION(0xe40)
- EXCEPTION(0xe60)
- EXCEPTION(0xe80)
- EXCEPTION(0xf00)
- EXCEPTION(0xf20)
- EXCEPTION(0xf40)
- EXCEPTION(0xf60)
- EXCEPTION(0xf80)
-#if 0
- EXCEPTION(0x1000)
- EXCEPTION(0x1100)
- EXCEPTION(0x1200)
- EXCEPTION(0x1300)
- EXCEPTION(0x1400)
- EXCEPTION(0x1500)
- EXCEPTION(0x1600)
-#endif
-
- .text
-
+++ /dev/null
-#define MIE_MEIE 0x800
-
- .global _start
-_start:
- j reset_vector
-
-reset_vector:
- la sp, _fstack
- la t0, trap_vector
- csrw mtvec, t0
-
- // initialize .bss
- la t0, _fbss
- la t1, _ebss
-1: beq t0, t1, 2f
- sw zero, 0(t0)
- addi t0, t0, 4
- j 1b
-2:
- // enable external interrupts
- li t0, MIE_MEIE
- csrs mie, t0
-
- call main
-1: j 1b
-
-trap_vector:
- addi sp, sp, -16*4
- sw ra, 0*4(sp)
- sw t0, 1*4(sp)
- sw t1, 2*4(sp)
- sw t2, 3*4(sp)
- sw a0, 4*4(sp)
- sw a1, 5*4(sp)
- sw a2, 6*4(sp)
- sw a3, 7*4(sp)
- sw a4, 8*4(sp)
- sw a5, 9*4(sp)
- sw a6, 10*4(sp)
- sw a7, 11*4(sp)
- sw t3, 12*4(sp)
- sw t4, 13*4(sp)
- sw t5, 14*4(sp)
- sw t6, 15*4(sp)
- call isr
- lw ra, 0*4(sp)
- lw t0, 1*4(sp)
- lw t1, 2*4(sp)
- lw t2, 3*4(sp)
- lw a0, 4*4(sp)
- lw a1, 5*4(sp)
- lw a2, 6*4(sp)
- lw a3, 7*4(sp)
- lw a4, 8*4(sp)
- lw a5, 9*4(sp)
- lw a6, 10*4(sp)
- lw a7, 11*4(sp)
- lw t3, 12*4(sp)
- lw t4, 13*4(sp)
- lw t5, 14*4(sp)
- lw t6, 15*4(sp)
- addi sp, sp, 16*4
- mret
+++ /dev/null
-/*
- * (C) Copyright 2012, Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
- * MA 02111-1307 USA
- */
-
-#include <spr-defs.h>
-
-/*
- * OR1K Architecture has a 128 byte "red zone" after the stack that can not be
- * touched by exception handlers. GCC uses this red zone for locals and
- * temps without needing to change the stack pointer.
- */
-#define OR1K_RED_ZONE_SIZE 128
-
-/*
- * We need 4 bytes (32 bits) * 32 registers space on the stack to save all the
- * registers.
- */
-#define EXCEPTION_STACK_SIZE ((4*32) + OR1K_RED_ZONE_SIZE)
-
-#define HANDLE_EXCEPTION ; \
- l.addi r1, r1, -EXCEPTION_STACK_SIZE ; \
- l.sw 0x1c(r1), r9 ; \
- l.jal _exception_handler ; \
- l.nop ; \
- l.lwz r9, 0x1c(r1) ; \
- l.addi r1, r1, EXCEPTION_STACK_SIZE ; \
- l.rfe ; \
- l.nop
-
-
-.section .text, "ax", @progbits
-.global _start
-_start:
-_reset_handler:
- l.movhi r0, 0
- l.movhi r1, 0
- l.movhi r2, 0
- l.movhi r3, 0
- l.movhi r4, 0
- l.movhi r5, 0
- l.movhi r6, 0
- l.movhi r7, 0
- l.movhi r8, 0
- l.movhi r9, 0
- l.movhi r10, 0
- l.movhi r11, 0
- l.movhi r12, 0
- l.movhi r13, 0
- l.movhi r14, 0
- l.movhi r15, 0
- l.movhi r16, 0
- l.movhi r17, 0
- l.movhi r18, 0
- l.movhi r19, 0
- l.movhi r20, 0
- l.movhi r21, 0
- l.movhi r22, 0
- l.movhi r23, 0
- l.movhi r24, 0
- l.movhi r25, 0
- l.movhi r26, 0
- l.movhi r27, 0
- l.movhi r28, 0
- l.movhi r29, 0
- l.movhi r30, 0
- l.movhi r31, 0
-
- l.ori r21, r0, SPR_SR_SM
- l.mtspr r0, r21, SPR_SR
- l.movhi r21, hi(_reset_handler)
- l.ori r21, r21, lo(_reset_handler)
- l.mtspr r0, r21, SPR_EVBAR
- /* enable caches */
- l.jal _cache_init
- l.nop
- l.j _crt0
- l.nop
-
- /* bus error */
- .org 0x200
- HANDLE_EXCEPTION
-
- /* data page fault */
- .org 0x300
- HANDLE_EXCEPTION
-
- /* instruction page fault */
- .org 0x400
- HANDLE_EXCEPTION
-
- /* tick timer */
- .org 0x500
- HANDLE_EXCEPTION
-
- /* alignment */
- .org 0x600
- HANDLE_EXCEPTION
-
- /* illegal instruction */
- .org 0x700
- HANDLE_EXCEPTION
-
- /* external interrupt */
- .org 0x800
- HANDLE_EXCEPTION
-
- /* D-TLB miss */
- .org 0x900
- HANDLE_EXCEPTION
-
- /* I-TLB miss */
- .org 0xa00
- HANDLE_EXCEPTION
-
- /* range */
- .org 0xb00
- HANDLE_EXCEPTION
-
- /* system call */
- .org 0xc00
- HANDLE_EXCEPTION
-
- /* floating point */
- .org 0xd00
- HANDLE_EXCEPTION
-
- /* trap */
- .org 0xe00
- HANDLE_EXCEPTION
-
- /* reserved */
- .org 0xf00
- HANDLE_EXCEPTION
-
- .org 0x1000
-_crt0:
- /* Setup stack and global pointer */
- l.movhi r1, hi(_fstack)
- l.ori r1, r1, lo(_fstack)
-
- /* Clear BSS */
- l.movhi r21, hi(_fbss)
- l.ori r21, r21, lo(_fbss)
- l.movhi r3, hi(_ebss)
- l.ori r3, r3, lo(_ebss)
-.clearBSS:
- l.sfeq r21, r3
- l.bf .callMain
- l.nop
- l.sw 0(r21), r0
- l.addi r21, r21, 4
- l.j .clearBSS
- l.nop
-
-.callMain:
- l.j main
- l.nop
-
-_exception_handler:
- l.sw 0x00(r1), r2
- l.sw 0x04(r1), r3
- l.sw 0x08(r1), r4
- l.sw 0x0c(r1), r5
- l.sw 0x10(r1), r6
- l.sw 0x14(r1), r7
- l.sw 0x18(r1), r8
- l.sw 0x20(r1), r10
- l.sw 0x24(r1), r11
- l.sw 0x28(r1), r12
- l.sw 0x2c(r1), r13
- l.sw 0x30(r1), r14
- l.sw 0x34(r1), r15
- l.sw 0x38(r1), r16
- l.sw 0x3c(r1), r17
- l.sw 0x40(r1), r18
- l.sw 0x44(r1), r19
- l.sw 0x48(r1), r20
- l.sw 0x4c(r1), r21
- l.sw 0x50(r1), r22
- l.sw 0x54(r1), r23
- l.sw 0x58(r1), r24
- l.sw 0x5c(r1), r25
- l.sw 0x60(r1), r26
- l.sw 0x64(r1), r27
- l.sw 0x68(r1), r28
- l.sw 0x6c(r1), r29
- l.sw 0x70(r1), r30
- l.sw 0x74(r1), r31
-
- /* Save return address */
- l.or r14, r0, r9
- /* Calculate exception vector from handler address */
- l.andi r3, r9, 0xf00
- l.srli r3, r3, 8
- /* Pass saved register state */
- l.or r4, r0, r1
- /* Extract exception PC */
- l.mfspr r5, r0, SPR_EPCR_BASE
- /* Extract exception effective address */
- l.mfspr r6, r0, SPR_EEAR_BASE
- /* Extract exception SR */
- l.mfspr r7, r0, SPR_ESR_BASE
- /* Call exception handler with the link address as argument */
- l.jal exception_handler
- l.nop
-
- /* Load return address */
- l.or r9, r0, r14
- /* Restore state */
- l.lwz r2, 0x00(r1)
- l.lwz r3, 0x04(r1)
- l.lwz r4, 0x08(r1)
- l.lwz r5, 0x0c(r1)
- l.lwz r6, 0x10(r1)
- l.lwz r7, 0x14(r1)
- l.lwz r8, 0x18(r1)
- l.lwz r10, 0x20(r1)
- l.lwz r11, 0x24(r1)
- l.lwz r12, 0x28(r1)
- l.lwz r13, 0x2c(r1)
- l.lwz r14, 0x30(r1)
- l.lwz r15, 0x34(r1)
- l.lwz r16, 0x38(r1)
- l.lwz r17, 0x3c(r1)
- l.lwz r18, 0x40(r1)
- l.lwz r19, 0x44(r1)
- l.lwz r20, 0x48(r1)
- l.lwz r21, 0x4c(r1)
- l.lwz r22, 0x50(r1)
- l.lwz r23, 0x54(r1)
- l.lwz r24, 0x58(r1)
- l.lwz r25, 0x5c(r1)
- l.lwz r26, 0x60(r1)
- l.lwz r27, 0x64(r1)
- l.lwz r28, 0x68(r1)
- l.lwz r29, 0x6c(r1)
- l.lwz r30, 0x70(r1)
- l.lwz r31, 0x74(r1)
- l.jr r9
- l.nop
-
-.global _cache_init
-_cache_init:
- /*
- This function is to be used ONLY during reset, before main() is called.
- TODO: Perhaps break into individual enable instruction/data cache
- sections functions, and provide disable functions, also, all
- callable from C
- */
-
- /* Instruction cache enable */
- /* Check if IC present and skip enabling otherwise */
-#if 1
-.L6:
- l.mfspr r3,r0,SPR_UPR
- l.andi r7,r3,SPR_UPR_ICP
- l.sfeq r7,r0
- l.bf .L8
- l.nop
-
- /* Disable IC */
- l.mfspr r6,r0,SPR_SR
- l.addi r5,r0,-1
- l.xori r5,r5,SPR_SR_ICE
- l.and r5,r6,r5
- l.mtspr r0,r5,SPR_SR
-
- /* Establish cache block size
- If BS=0, 16;
- If BS=1, 32;
- r14 contain block size
- */
- l.mfspr r3,r0,SPR_ICCFGR
- l.andi r7,r3,SPR_ICCFGR_CBS
- l.srli r8,r7,7
- l.ori r4,r0,16
- l.sll r14,r4,r8
-
- /* Establish number of cache sets
- r10 contains number of cache sets
- r8 contains log(# of cache sets)
- */
- l.andi r7,r3,SPR_ICCFGR_NCS
- l.srli r8,r7,3
- l.ori r4,r0,1
- l.sll r10,r4,r8
-
- /* Invalidate IC */
- l.addi r6,r0,0
- l.sll r5,r14,r8
-
-.L7: l.mtspr r0,r6,SPR_ICBIR
- l.sfne r6,r5
- l.bf .L7
- l.add r6,r6,r14
-
- /* Enable IC */
- l.mfspr r6,r0,SPR_SR
- l.ori r6,r6,SPR_SR_ICE
- l.mtspr r0,r6,SPR_SR
- l.nop
- l.nop
- l.nop
- l.nop
- l.nop
- l.nop
- l.nop
- l.nop
- /* Data cache enable */
- /* Check if DC present and skip enabling otherwise */
-#endif
-.L8:
-#if 1
- l.mfspr r3,r0,SPR_UPR
- l.andi r7,r3,SPR_UPR_DCP
- l.sfeq r7,r0
- l.bf .L10
- l.nop
- /* Disable DC */
- l.mfspr r6,r0,SPR_SR
- l.addi r5,r0,-1
- l.xori r5,r5,SPR_SR_DCE
- l.and r5,r6,r5
- l.mtspr r0,r5,SPR_SR
- /* Establish cache block size
- If BS=0, 16;
- If BS=1, 32;
- r14 contain block size
- */
- l.mfspr r3,r0,SPR_DCCFGR
- l.andi r7,r3,SPR_DCCFGR_CBS
- l.srli r8,r7,7
- l.ori r4,r0,16
- l.sll r14,r4,r8
- /* Establish number of cache sets
- r10 contains number of cache sets
- r8 contains log(# of cache sets)
- */
- l.andi r7,r3,SPR_DCCFGR_NCS
- l.srli r8,r7,3
- l.ori r4,r0,1
- l.sll r10,r4,r8
- /* Invalidate DC */
- l.addi r6,r0,0
- l.sll r5,r14,r8
-
-.L9:
- l.mtspr r0,r6,SPR_DCBIR
- l.sfne r6,r5
- l.bf .L9
- l.add r6,r6,r14
- /* Enable DC */
- l.mfspr r6,r0,SPR_SR
- l.ori r6,r6,SPR_SR_DCE
- l.mtspr r0,r6,SPR_SR
-#endif
-.L10:
- /* Return */
- l.jr r9
- l.nop
+++ /dev/null
-/*
- * Copyright 2018, Serge Bazanski <serge@bazanski.pl>
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted.
- */
-
-#include "picorv32-extraops.S"
-
-/*
- * Interrupt vector.
- */
-.global _start
-_start:
-
-.org 0x00000000 # Reset
- j _crt0
-
-.org 0x00000010 # IRQ
-_irq_vector:
- addi sp, sp, -16
- sw t0, 4(sp)
- sw ra, 8(sp)
- /* By convention, q2 holds true IRQ vector, but remains caller-save.
- We rely on the assumption that compiler-generated code will never touch
- the QREGs. q3 is truly scratch/caller-save. */
- picorv32_getq_insn(t0, q2)
- sw t0, 12(sp)
-
- jalr t0 // Call the true IRQ vector.
-
- lw t0, 12(sp)
- picorv32_setq_insn(q2, t0) // Restore the true IRQ vector.
- lw ra, 8(sp)
- lw t0, 4(sp)
- addi sp, sp, 16
- picorv32_retirq_insn() // return from interrupt
-
-
-/*
- * IRQ handler, branched to from the vector.
- */
-_irq:
- /* save x1/x2 to q1/q2 */
- picorv32_setq_insn(q2, x1)
- picorv32_setq_insn(q3, x2)
-
- /* use x1 to index into irq_regs */
- lui x1, %hi(irq_regs)
- addi x1, x1, %lo(irq_regs)
-
- /* use x2 as scratch space for saving registers */
-
- /* q0 (== x1), q2(== x2), q3 */
- picorv32_getq_insn(x2, q0)
- sw x2, 0*4(x1)
- picorv32_getq_insn(x2, q2)
- sw x2, 1*4(x1)
- picorv32_getq_insn(x2, q3)
- sw x2, 2*4(x1)
-
- /* save x3 - x31 */
- sw x3, 3*4(x1)
- sw x4, 4*4(x1)
- sw x5, 5*4(x1)
- sw x6, 6*4(x1)
- sw x7, 7*4(x1)
- sw x8, 8*4(x1)
- sw x9, 9*4(x1)
- sw x10, 10*4(x1)
- sw x11, 11*4(x1)
- sw x12, 12*4(x1)
- sw x13, 13*4(x1)
- sw x14, 14*4(x1)
- sw x15, 15*4(x1)
- sw x16, 16*4(x1)
- sw x17, 17*4(x1)
- sw x18, 18*4(x1)
- sw x19, 19*4(x1)
- sw x20, 20*4(x1)
- sw x21, 21*4(x1)
- sw x22, 22*4(x1)
- sw x23, 23*4(x1)
- sw x24, 24*4(x1)
- sw x25, 25*4(x1)
- sw x26, 26*4(x1)
- sw x27, 27*4(x1)
- sw x28, 28*4(x1)
- sw x29, 29*4(x1)
- sw x30, 30*4(x1)
- sw x31, 31*4(x1)
-
- /* update _irq_pending to the currently pending interrupts */
- picorv32_getq_insn(t0, q1)
- la t1, (_irq_pending)
- sw t0, 0(t1)
-
- /* prepare C handler stack */
- lui sp, %hi(_irq_stack)
- addi sp, sp, %lo(_irq_stack)
-
- /* call C handler */
- jal ra, isr
-
- /* use x1 to index into irq_regs */
- lui x1, %hi(irq_regs)
- addi x1, x1, %lo(irq_regs)
-
- /* restore q0 - q2 */
- lw x2, 0*4(x1)
- picorv32_setq_insn(q0, x2)
- lw x2, 1*4(x1)
- picorv32_setq_insn(q1, x2)
- lw x2, 2*4(x1)
- picorv32_setq_insn(q2, x2)
-
- /* restore x3 - x31 */
- lw x3, 3*4(x1)
- lw x4, 4*4(x1)
- lw x5, 5*4(x1)
- lw x6, 6*4(x1)
- lw x7, 7*4(x1)
- lw x8, 8*4(x1)
- lw x9, 9*4(x1)
- lw x10, 10*4(x1)
- lw x11, 11*4(x1)
- lw x12, 12*4(x1)
- lw x13, 13*4(x1)
- lw x14, 14*4(x1)
- lw x15, 15*4(x1)
- lw x16, 16*4(x1)
- lw x17, 17*4(x1)
- lw x18, 18*4(x1)
- lw x19, 19*4(x1)
- lw x20, 20*4(x1)
- lw x21, 21*4(x1)
- lw x22, 22*4(x1)
- lw x23, 23*4(x1)
- lw x24, 24*4(x1)
- lw x25, 25*4(x1)
- lw x26, 26*4(x1)
- lw x27, 27*4(x1)
- lw x28, 28*4(x1)
- lw x29, 29*4(x1)
- lw x30, 30*4(x1)
- lw x31, 31*4(x1)
-
- /* restore x1 - x2 from q registers */
- picorv32_getq_insn(x1, q1)
- picorv32_getq_insn(x2, q2)
- ret
-
-/*
- * Reset handler, branched to from the vector.
- */
-_crt0:
- /* zero-initialize all registers */
- addi x1, zero, 0
- addi x2, zero, 0
- addi x3, zero, 0
- addi x4, zero, 0
- addi x5, zero, 0
- addi x6, zero, 0
- addi x7, zero, 0
- addi x8, zero, 0
- addi x9, zero, 0
- addi x10, zero, 0
- addi x11, zero, 0
- addi x12, zero, 0
- addi x13, zero, 0
- addi x14, zero, 0
- addi x15, zero, 0
- addi x16, zero, 0
- addi x17, zero, 0
- addi x18, zero, 0
- addi x19, zero, 0
- addi x20, zero, 0
- addi x21, zero, 0
- addi x22, zero, 0
- addi x23, zero, 0
- addi x24, zero, 0
- addi x25, zero, 0
- addi x26, zero, 0
- addi x27, zero, 0
- addi x28, zero, 0
- addi x29, zero, 0
- addi x30, zero, 0
- addi x31, zero, 0
-
- /* mask all interrupts */
- li t0, 0xffffffff
- picorv32_maskirq_insn(zero, t0)
- /* reflect that in _irq_mask */
- la t1, _irq_mask
- sw t0, 0(t1)
-
-#ifdef EXECUTE_IN_PLACE
- /* Load DATA */
- la t0, _erodata
- la t1, _fdata
- la t2, _edata
-3:
- lw t3, 0(t0)
- sw t3, 0(t1)
- /* _edata is aligned to 16 bytes. Use word-xfers. */
- addi t0, t0, 4
- addi t1, t1, 4
- bltu t1, t2, 3b
-#endif
-
- /* Clear BSS */
- la t0, _fbss
- la t1, _ebss
-2:
- sw zero, 0(t0)
- addi t0, t0, 4
- bltu t0, t1, 2b
-
- /* set main stack */
- la sp, _fstack
-
- /* Set up address to IRQ handler since vector is hardcoded.
- By convention, q2 keeps the pointer to the true IRQ handler,
- to emulate relocatable interrupts. */
- la t0, _irq
- picorv32_setq_insn(q2, t0)
-
- /* jump to main */
- jal ra, main
-
-1:
- /* loop forever */
- j 1b
-
-
-/*
- * Enable interrupts by copying the software mask to the hardware mask
- */
-.global _irq_enable
-_irq_enable:
- /* Set _irq_enabled to true */
- la t0, _irq_enabled
- addi t1, zero, 1
- sw t1, 0(t0)
- /* Set the HW IRQ mask to _irq_mask */
- la t0, _irq_mask
- lw t0, 0(t0)
- picorv32_maskirq_insn(zero, t0)
- ret
-
-/*
- * Disable interrupts by masking all interrupts (the mask should already be
- * up to date)
- */
-.global _irq_disable
-_irq_disable:
- /* Mask all IRQs */
- li t0, 0xffffffff
- picorv32_maskirq_insn(zero, t0)
- /* Set _irq_enabled to false */
- la t0, _irq_enabled
- sw zero, (t0)
- ret
-
-/*
- * Set interrrupt mask.
- * This updates the software mask (for readback and interrupt inable/disable)
- * and the hardware mask.
- * 1 means interrupt is masked (disabled).
- */
-.global _irq_setmask
-_irq_setmask:
- /* Update _irq_mask */
- la t0, _irq_mask
- sw a0, (t0)
- /* Are interrupts enabled? */
- la t0, _irq_enabled
- lw t0, 0(t0)
- beq t0, zero, 1f
- /* If so, update the HW IRQ mask */
- picorv32_maskirq_insn(zero, a0)
-1:
- ret
-
-
-.section .bss
-irq_regs:
- /* saved interrupt registers, x0 - x31 */
- .fill 32,4
-
- /* interrupt stack */
- .fill 256,4
-_irq_stack:
-
-/*
- * Bitfield of pending interrupts, updated on ISR entry.
- */
-.global _irq_pending
-_irq_pending:
- .word 0
-
-/*
- * Software copy of enabled interrupts. Do not write directly, use
- * _irq_set_mask instead.
- */
-.global _irq_mask
-_irq_mask:
- .word 0
-
-/*
- * Software state of global interrupts being enabled or disabled. Do not write
- * directly, use _irq_disable / _irq_enable instead.
- */
-.global _irq_enabled
-_irq_enabled:
- .word 0
+++ /dev/null
-.global main
-.global isr
-.global _start
-
-_start:
- j crt_init
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-
-trap_entry:
- sd x1, - 1*8(sp)
- sd x5, - 2*8(sp)
- sd x6, - 3*8(sp)
- sd x7, - 4*8(sp)
- sd x10, - 5*8(sp)
- sd x11, - 6*8(sp)
- sd x12, - 7*8(sp)
- sd x13, - 8*8(sp)
- sd x14, - 9*8(sp)
- sd x15, -10*8(sp)
- sd x16, -11*8(sp)
- sd x17, -12*8(sp)
- sd x28, -13*8(sp)
- sd x29, -14*8(sp)
- sd x30, -15*8(sp)
- sd x31, -16*8(sp)
- addi sp,sp,-16*8
- call isr
- ld x1 , 15*8(sp)
- ld x5, 14*8(sp)
- ld x6, 13*8(sp)
- ld x7, 12*8(sp)
- ld x10, 11*8(sp)
- ld x11, 10*8(sp)
- ld x12, 9*8(sp)
- ld x13, 8*8(sp)
- ld x14, 7*8(sp)
- ld x15, 6*8(sp)
- ld x16, 5*8(sp)
- ld x17, 4*8(sp)
- ld x28, 3*8(sp)
- ld x29, 2*8(sp)
- ld x30, 1*8(sp)
- ld x31, 0*8(sp)
- addi sp,sp,16*8
- mret
- .text
-
-
-crt_init:
- la sp, _fstack + 8
- la a0, trap_entry
- csrw mtvec, a0
-
-bss_init:
- la a0, _fbss
- la a1, _ebss
-bss_loop:
- beq a0,a1,bss_done
- sd zero,0(a0)
- add a0,a0,8
- j bss_loop
-bss_done:
-
- call plic_init // initialize external interrupt controller
- li a0, 0x800 // external interrupt sources only (using LiteX timer);
- // NOTE: must still enable mstatus.MIE!
- csrw mie,a0
-
- call main
-inf_loop:
- j inf_loop
+++ /dev/null
-#define MIE_MEIE 0x800
-
- .global _start
-_start:
- j reset_vector
-
-reset_vector:
- la sp, _fstack
- la t0, trap_vector
- csrw mtvec, t0
-
- // initialize .bss
- la t0, _fbss
- la t1, _ebss
-1: beq t0, t1, 2f
- sw zero, 0(t0)
- addi t0, t0, 4
- j 1b
-2:
- // enable external interrupts
- li t0, MIE_MEIE
- csrs mie, t0
-
- call main
-1: j 1b
-
-trap_vector:
- addi sp, sp, -16*4
- sw ra, 0*4(sp)
- sw t0, 1*4(sp)
- sw t1, 2*4(sp)
- sw t2, 3*4(sp)
- sw a0, 4*4(sp)
- sw a1, 5*4(sp)
- sw a2, 6*4(sp)
- sw a3, 7*4(sp)
- sw a4, 8*4(sp)
- sw a5, 9*4(sp)
- sw a6, 10*4(sp)
- sw a7, 11*4(sp)
- sw t3, 12*4(sp)
- sw t4, 13*4(sp)
- sw t5, 14*4(sp)
- sw t6, 15*4(sp)
- call isr
- lw ra, 0*4(sp)
- lw t0, 1*4(sp)
- lw t1, 2*4(sp)
- lw t2, 3*4(sp)
- lw a0, 4*4(sp)
- lw a1, 5*4(sp)
- lw a2, 6*4(sp)
- lw a3, 7*4(sp)
- lw a4, 8*4(sp)
- lw a5, 9*4(sp)
- lw a6, 10*4(sp)
- lw a7, 11*4(sp)
- lw t3, 12*4(sp)
- lw t4, 13*4(sp)
- lw t5, 14*4(sp)
- lw t6, 15*4(sp)
- addi sp, sp, 16*4
- mret
+++ /dev/null
-.global main
-.global isr
-.global _start
-
-_start:
- j crt_init
- nop
- nop
- nop
- nop
- nop
- nop
- nop
-
-.global trap_entry
-trap_entry:
- sw x1, - 1*4(sp)
- sw x5, - 2*4(sp)
- sw x6, - 3*4(sp)
- sw x7, - 4*4(sp)
- sw x10, - 5*4(sp)
- sw x11, - 6*4(sp)
- sw x12, - 7*4(sp)
- sw x13, - 8*4(sp)
- sw x14, - 9*4(sp)
- sw x15, -10*4(sp)
- sw x16, -11*4(sp)
- sw x17, -12*4(sp)
- sw x28, -13*4(sp)
- sw x29, -14*4(sp)
- sw x30, -15*4(sp)
- sw x31, -16*4(sp)
- addi sp,sp,-16*4
- call isr
- lw x1 , 15*4(sp)
- lw x5, 14*4(sp)
- lw x6, 13*4(sp)
- lw x7, 12*4(sp)
- lw x10, 11*4(sp)
- lw x11, 10*4(sp)
- lw x12, 9*4(sp)
- lw x13, 8*4(sp)
- lw x14, 7*4(sp)
- lw x15, 6*4(sp)
- lw x16, 5*4(sp)
- lw x17, 4*4(sp)
- lw x28, 3*4(sp)
- lw x29, 2*4(sp)
- lw x30, 1*4(sp)
- lw x31, 0*4(sp)
- addi sp,sp,16*4
- mret
- .text
-
-
-crt_init:
- la sp, _fstack + 4
- la a0, trap_entry
- csrw mtvec, a0
-
-bss_init:
- la a0, _fbss
- la a1, _ebss
-bss_loop:
- beq a0,a1,bss_done
- sw zero,0(a0)
- add a0,a0,4
- j bss_loop
-bss_done:
-
- li a0, 0x880 //880 enable timer + external interrupt sources (until mstatus.MIE is set, they will never trigger an interrupt)
- csrw mie,a0
-
- call main
-infinit_loop:
- j infinit_loop