#define SR_U64 0x00000020
#define SR_S64 0x00000040
#define SR_VM 0x00000080
-#define SR_EV 0x00000100
+#define SR_EA 0x00000100
#define SR_IM 0x00FF0000
#define SR_IP 0xFF000000
-#define SR_ZERO ~(SR_S|SR_PS|SR_EI|SR_PEI|SR_EF|SR_U64|SR_S64|SR_VM|SR_EV|SR_IM|SR_IP)
+#define SR_ZERO ~(SR_S|SR_PS|SR_EI|SR_PEI|SR_EF|SR_U64|SR_S64|SR_VM|SR_EA|SR_IM|SR_IP)
#define SR_IM_SHIFT 16
#define SR_IP_SHIFT 24
li x6, CAUSE_FAULT_FETCH
beq x3, x5, 1f
beq x3, x6, 1f
- lh x3,0(x4)
- lh x4,2(x4)
- sh x3, 36*REGBYTES(x2)
- sh x4,2+36*REGBYTES(x2)
+ lh x5,0(x4)
+ lh x6,2(x4)
+ sh x5, 36*REGBYTES(x2)
+ sh x6,2+36*REGBYTES(x2)
1:
- #mfpcr x3,ASM_CR(PCR_VECBANK) # vecbank
- #STORE x3,37*REGBYTES(x2)
- #mfpcr x3,ASM_CR(PCR_VECCFG) # veccfg
- #STORE x3,38*REGBYTES(x2)
+ bge x3, x0, 1f
+ vxcptcause x3
+ STORE x3,37*REGBYTES(x2)
+1:
ret
move sp,x2
setpcr status, SR_EI
move a0,x2
-#if 0
mfpcr ra,status
- and ra,ra,SR_EV
+ and ra,ra,SR_EA
beqz ra, 2f
- addi x2,x2,39*REGBYTES
+ addi x2,x2,38*REGBYTES
vxcptsave x2
-#endif
2:jal handle_trap
# when coming from kernel, continue below its stack
#include "../pcr.h"
#include "../hwacha_xcpt.h"
-#define vvcfg(nxregs, nfregs) ({ \
- asm volatile ("vvcfg %0,%1" : : "r"(nxregs), "r"(nfregs)); })
-
-#define vsetvl(vl) ({ long __tmp; \
- asm volatile ("vsetvl %0,%1" : "=r"(__tmp) : "r"(vl)); })
-
-#define vcfg(word) ({ vvcfg((word)>>12, (word)>>18); vsetvl((word)); })
-
#define dword_bit_cmd(dw) ((dw >> 32) & 0x1)
#define dword_bit_cnt(dw) (!dword_bit_cmd(dw))
#define dword_bit_imm1(dw) ((dw >> 35) & 0x1)
#define dword_bit_imm2(dw) ((dw >> 34) & 0x1)
#define dword_bit_pf(dw) ((dw >> 36) & 0x1)
-#define fencevl() ({ \
- asm volatile ("fence.v.l" ::: "memory"); })
+#define fence() ({ \
+ asm volatile ("fence" ::: "memory"); })
#define vxcptkill() ({ \
asm volatile ("vxcptkill"); })
#define PGSHIFT 13
#define PGSIZE (1 << PGSHIFT)
-#define SIZEOF_TRAPFRAME_T 1336
+#define SIZEOF_TRAPFRAME_T 1328
#ifndef __ASSEMBLER__
+static inline void vsetcfg(long cfg)
+{
+ asm volatile ("vsetcfg %0" : : "r"(cfg));
+}
+
+static inline void vsetvl(long vl)
+{
+ long __tmp;
+ asm volatile ("vsetvl %0,%1" : "=r"(__tmp) : "r"(vl));
+}
+
+static inline long vgetcfg()
+{
+ int cfg;
+ asm volatile ("vgetcfg %0" : "=r"(cfg) :);
+ return cfg;
+}
+
+static inline long vgetvl()
+{
+ int vl;
+ asm volatile ("vgetvl %0" : "=r"(vl) :);
+}
+
+static inline long vxcptaux()
+{
+ int aux;
+ asm volatile ("vxcptaux %0" : "=r"(aux) :);
+ return aux;
+}
+
+static inline void vxcptrestore(long* mem)
+{
+ asm volatile("vxcptrestore %0" : : "r"(mem) : "memory");
+}
+
+static inline void vxcptevac(long* mem)
+{
+ asm volatile ("vxcptevac %0" : : "r"(mem));
+}
typedef unsigned long pte_t;
#define LEVELS (sizeof(pte_t) == sizeof(uint64_t) ? 3 : 2)
long badvaddr;
long cause;
long insn;
- long vecbank;
- long veccfg;
+ long hwacha_cause;
long evac[128];
} trapframe_t;
#endif
static void emulate_vxcptsave(trapframe_t* tf)
{
- long where = tf->gpr[(tf->insn >> 22) & 0x1F];
+ long* where = (long*)tf->gpr[(tf->insn >> 15) & 0x1F];
- asm volatile ("vxcptevac %0" : : "r"(where));
- fencevl();
+ where[0] = vgetcfg();
+ where[1] = vgetvl();
+ vxcptevac(&where[2]);
+ fence();
}
static void do_vxcptrestore(long* where)
{
+ vsetcfg(where[0]);
+ vsetvl(where[1]);
+
vxcpthold();
- int idx = 0;
+ int idx = 2;
long dword, cmd, pf;
int first = 1;
static void emulate_vxcptrestore(trapframe_t* tf)
{
- long* where = (long*)tf->gpr[(tf->insn >> 22) & 0x1F];
+ long* where = (long*)tf->gpr[(tf->insn >> 15) & 0x1F];
vxcptkill();
- //vcfg(tf->veccfg);
do_vxcptrestore(where);
}
static void restore_vector(trapframe_t* tf)
{
- mtpcr(PCR_VECBANK, tf->vecbank);
- //vcfg(tf->veccfg);
-
if (mfpcr(PCR_IMPL) == IMPL_ROCKET)
do_vxcptrestore(tf->evac);
else
- asm volatile("vxcptrestore %0" : : "r"(tf->evac) : "memory");
+ vxcptrestore(tf->evac);
}
void handle_trap(trapframe_t* tf)
}
else if (tf->cause == CAUSE_FAULT_LOAD || tf->cause == CAUSE_FAULT_STORE)
handle_fault(tf->badvaddr);
+ else if ((tf->cause << 1) == (IRQ_COP << 1))
+ {
+ if (tf->hwacha_cause == HWACHA_CAUSE_VF_FAULT_FETCH ||
+ tf->hwacha_cause == HWACHA_CAUSE_FAULT_LOAD ||
+ tf->hwacha_cause == HWACHA_CAUSE_FAULT_STORE)
+ {
+ long badvaddr = vxcptaux();
+ handle_fault(badvaddr);
+ }
+ else
+ assert(0);
+ }
else
assert(0);
out:
-#if 0
- if (!(tf->sr & SR_PS) && (tf->sr & SR_EV))
+ if (!(tf->sr & SR_PS) && (tf->sr & SR_EA)) {
restore_vector(tf);
-#endif
+ tf->sr |= SR_PEI;
+ }
pop_tf(tf);
}
trapframe_t tf;
memset(&tf, 0, sizeof(tf));
- tf.sr = SR_EF | SR_EV | SR_S | SR_U64 | SR_S64 | SR_VM;
+ tf.sr = SR_PEI | ((1 << IRQ_COP) << SR_IM_SHIFT) | SR_EF | SR_EA | SR_S | SR_U64 | SR_S64 | SR_VM;
tf.epc = test_addr;
pop_tf(&tf);
rv64uf_pt_vec_tests = $(addprefix rv64uf-pt-vec-, $(rv64uf_sc_vec_tests))
rv64uf_v_vec_tests = $(addprefix rv64uf-v-vec-, $(rv64uf_sc_vec_tests))
-spike_tests += $(rv64uf_p_tests) $(rv64uf_v_tests) $(rv64uf_p_vec_tests) #$(rv64uf_pt_vec_tests) $(rv64uf_v_vec_tests)
+spike_tests += $(rv64uf_p_tests) $(rv64uf_v_tests) $(rv64uf_p_vec_tests) $(rv64uf_v_vec_tests) #$(rv64uf_pt_vec_tests)
rv64ui_pt_vec_tests = $(addprefix rv64ui-pt-vec-, $(rv64ui_sc_vec_tests))
rv64ui_v_vec_tests = $(addprefix rv64ui-v-vec-, $(rv64ui_sc_vec_tests))
-spike_tests += $(rv64ui_p_tests) $(rv64ui_pm_tests) $(rv64ui_v_tests) $(rv64ui_p_vec_tests) #$(rv64ui_pt_vec_tests) $(rv64ui_v_vec_tests)
+spike_tests += $(rv64ui_p_tests) $(rv64ui_pm_tests) $(rv64ui_v_tests) $(rv64ui_p_vec_tests) $(rv64ui_v_vec_tests) #$(rv64ui_pt_vec_tests)
rv64uv_pt_vec_tests = $(addprefix rv64uv-pt-vec-, $(rv64uv_sc_vec_tests))
rv64uv_v_vec_tests = $(addprefix rv64uv-v-vec-, $(rv64uv_sc_vec_tests))
-spike_tests += $(rv64uv_p_tests)
+spike_tests += $(rv64uv_p_tests) $(rv64uv_v_tests)