__builtin___clear_cache(0,0);
}
-void emulate_vxcptsave(trapframe_t* tf)
+static void emulate_vxcptsave(trapframe_t* tf)
{
long where = tf->gpr[(tf->insn >> 22) & 0x1F];
fencevl();
}
-void do_vxcptrestore(long* where)
+static void do_vxcptrestore(long* where)
{
vxcpthold();
}
}
-void emulate_vxcptrestore(trapframe_t* tf)
+static void emulate_vxcptrestore(trapframe_t* tf)
{
long* where = (long*)tf->gpr[(tf->insn >> 22) & 0x1F];
vxcptkill();
do_vxcptrestore(where);
}
-void restore_vector(trapframe_t* tf)
+static void restore_vector(trapframe_t* tf)
{
mtpcr(PCR_VECBANK, tf->vecbank);
//vcfg(tf->veccfg);
if (tf->insn == fssr)
terminate(1); // FP test on non-FP hardware. "succeed."
+#if 0
else if ((tf->insn & 0xF83FFFFF) == 0x37B)
emulate_vxcptsave(tf);
else if ((tf->insn & 0xF83FFFFF) == 0x77B)
emulate_vxcptrestore(tf);
+#endif
else
assert(0);
tf->epc += 4;
// relocate
long adjustment = RELOC(0L), tmp;
mtpcr(PCR_EVEC, (char*)&trap_entry + adjustment);
- asm volatile ("add sp, sp, %1; rdpc %0; addi %0, %0, 16; add %0, %0, %1; jr %0" : "=&r"(tmp) : "r"(adjustment));
+ asm volatile ("add sp, sp, %1\n"
+ "jal %0, 1f\n"
+ "1: add %0, %0, %1\n"
+ "jr %0, 8"
+ : "=&r"(tmp)
+ : "r"(adjustment));
memset(RELOC(&l3pt[0]), 0, MAX_TEST_PAGES*sizeof(pte_t));
mtpcr(PCR_FATC, 0);
$(eval $(call compile_template,rv64si))
$(eval $(call compile_template,rv64sv))
-tests_dump = $(addsuffix .dump, $(tests))
-tests_hex = $(addsuffix .hex, $(tests))
+tests_dump = $(addsuffix .dump, $(spike_tests))
+tests_hex = $(addsuffix .hex, $(spike_tests))
tests_out = $(addsuffix .out, $(spike_tests))
run: $(tests_out)
rv64ui_pt_vec_tests = $(addprefix rv64ui-pt-vec-, $(rv64ui_sc_vec_tests))
rv64ui_v_vec_tests = $(addprefix rv64ui-v-vec-, $(rv64ui_sc_vec_tests))
-spike_tests += $(rv64ui_p_tests) $(rv64ui_pm_tests) #$(rv64ui_v_tests) $(rv64ui_p_vec_tests) $(rv64ui_pt_vec_tests) $(rv64ui_v_vec_tests)
+spike_tests += $(rv64ui_p_tests) $(rv64ui_pm_tests) $(rv64ui_v_tests) #$(rv64ui_p_vec_tests) $(rv64ui_pt_vec_tests) $(rv64ui_v_vec_tests)