#include "processor.h"
#include "mmu.h"
-#include "sim.h"
#include <cassert>
-static void commit_log_stash_privilege(state_t* state)
+static void commit_log_stash_privilege(processor_t* p)
{
#ifdef RISCV_ENABLE_COMMITLOG
+ state_t* state = p->get_state();
state->last_inst_priv = state->prv;
+ state->last_inst_xlen = p->get_xlen();
+ state->last_inst_flen = p->get_flen();
#endif
}
+static void commit_log_print_value(int width, uint64_t hi, uint64_t lo)
+{
+ switch (width) {
+ case 16:
+ fprintf(stderr, "0x%04" PRIx16, (uint16_t)lo);
+ break;
+ case 32:
+ fprintf(stderr, "0x%08" PRIx32, (uint32_t)lo);
+ break;
+ case 64:
+ fprintf(stderr, "0x%016" PRIx64, lo);
+ break;
+ case 128:
+ fprintf(stderr, "0x%016" PRIx64 "%016" PRIx64, hi, lo);
+ break;
+ default:
+ abort();
+ }
+}
+
static void commit_log_print_insn(state_t* state, reg_t pc, insn_t insn)
{
#ifdef RISCV_ENABLE_COMMITLOG
- int32_t priv = state->last_inst_priv;
- uint64_t mask = (insn.length() == 8 ? uint64_t(0) : (uint64_t(1) << (insn.length() * 8))) - 1;
- if (state->log_reg_write.addr) {
- fprintf(stderr, "%1d 0x%016" PRIx64 " (0x%08" PRIx64 ") %c%2" PRIu64 " 0x%016" PRIx64 "\n",
- priv,
- pc,
- insn.bits() & mask,
- state->log_reg_write.addr & 1 ? 'f' : 'x',
- state->log_reg_write.addr >> 1,
- state->log_reg_write.data);
+ auto& reg = state->log_reg_write;
+ int priv = state->last_inst_priv;
+ int xlen = state->last_inst_xlen;
+ int flen = state->last_inst_flen;
+
+ fprintf(stderr, "%1d ", priv);
+ commit_log_print_value(xlen, 0, pc);
+ fprintf(stderr, " (");
+ commit_log_print_value(insn.length() * 8, 0, insn.bits());
+
+ if (reg.addr) {
+ bool fp = reg.addr & 1;
+ int rd = reg.addr >> 1;
+ int size = fp ? flen : xlen;
+ fprintf(stderr, ") %c%2d ", fp ? 'f' : 'x', rd);
+ commit_log_print_value(size, reg.data.v[1], reg.data.v[0]);
+ fprintf(stderr, "\n");
} else {
- fprintf(stderr, "%1d 0x%016" PRIx64 " (0x%08" PRIx64 ")\n", priv, pc, insn.bits() & mask);
+ fprintf(stderr, ")\n");
}
- state->log_reg_write.addr = 0;
+ reg.addr = 0;
#endif
}
// function calls.
static reg_t execute_insn(processor_t* p, reg_t pc, insn_fetch_t fetch)
{
- commit_log_stash_privilege(p->get_state());
+ commit_log_stash_privilege(p);
reg_t npc = fetch.func(p, fetch.insn, pc);
if (!invalid_pc(npc)) {
commit_log_print_insn(p->get_state(), pc, fetch.insn);
void processor_t::step(size_t n)
{
if (state.dcsr.cause == DCSR_CAUSE_NONE) {
- // TODO: get_interrupt() isn't super fast. Does that matter?
- if (sim->debug_module.get_interrupt(id)) {
+ if (halt_request) {
enter_debug_mode(DCSR_CAUSE_DEBUGINT);
- } else if (state.dcsr.halt) {
+ } // !!!The halt bit in DCSR is deprecated.
+ else if (state.dcsr.halt) {
enter_debug_mode(DCSR_CAUSE_HALT);
}
- } else {
- // In Debug Mode, just do 11 steps at a time. Otherwise we're going to be
- // spinning the rest of the time anyway.
- n = std::min(n, (size_t) 11);
}
while (n > 0) {
if (unlikely(invalid_pc(pc))) { \
switch (pc) { \
case PC_SERIALIZE_BEFORE: state.serialized = true; break; \
- case PC_SERIALIZE_AFTER: instret++; break; \
+ case PC_SERIALIZE_AFTER: n = ++instret; break; \
default: abort(); \
} \
pc = state.pc; \
+ check_pc_alignment(pc); \
break; \
} else { \
state.pc = pc; \
try
{
- take_interrupt();
+ take_pending_interrupt();
if (unlikely(slow_path()))
{
while (instret < n)
{
+ if (unlikely(!state.serialized && state.single_step == state.STEP_STEPPED)) {
+ state.single_step = state.STEP_NONE;
+ enter_debug_mode(DCSR_CAUSE_STEP);
+ // enter_debug_mode changed state.pc, so we can't just continue.
+ break;
+ }
+
if (unlikely(state.single_step == state.STEP_STEPPING)) {
state.single_step = state.STEP_STEPPED;
}
if (debug && !state.serialized)
disasm(fetch.insn);
pc = execute_insn(this, pc, fetch);
- bool serialize_before = (pc == PC_SERIALIZE_BEFORE);
advance_pc();
- if (unlikely(state.single_step == state.STEP_STEPPED) && !serialize_before) {
- state.single_step = state.STEP_NONE;
- enter_debug_mode(DCSR_CAUSE_STEP);
- // enter_debug_mode changed state.pc, so we can't just continue.
- break;
+ if (unlikely(state.pc >= DEBUG_ROM_ENTRY &&
+ state.pc < DEBUG_END)) {
+ // We're waiting for the debugger to tell us something.
+ return;
}
+
}
}
else while (instret < n)
//
// According to Andrew Waterman's recollection, this optimization
// resulted in approximately a 2x performance increase.
- //
- // If there is support for compressed instructions, the mmu and the
- // switch statement get more complicated. Each branch target is stored
- // in the index corresponding to mmu->icache_index(), but consecutive
- // non-branching instructions are stored in consecutive indices even if
- // mmu->icache_index() specifies a different index (which is the case
- // for 32-bit instructions in the presence of compressed instructions).
// This figures out where to jump to in the switch statement
size_t idx = _mmu->icache_index(pc);
- // This gets the cached decoded instruction form the MMU. If the MMU
+ // This gets the cached decoded instruction from the MMU. If the MMU
// does not have the current pc cached, it will refill the MMU and
// return the correct entry. ic_entry->data.func is the C++ function
// corresponding to the instruction.
// is located within the execute_insn() function call.
#define ICACHE_ACCESS(i) { \
insn_fetch_t fetch = ic_entry->data; \
- ic_entry++; \
pc = execute_insn(this, pc, fetch); \
+ ic_entry = ic_entry->next; \
if (i == mmu_t::ICACHE_ENTRIES-1) break; \
- if (unlikely(ic_entry->tag != pc)) goto miss; \
+ if (unlikely(ic_entry->tag != pc)) break; \
if (unlikely(instret+1 == n)) break; \
instret++; \
state.pc = pc; \
}
advance_pc();
- continue;
-
-miss:
- advance_pc();
- // refill I$ if it looks like there wasn't a taken branch
- if (pc > (ic_entry-1)->tag && pc <= (ic_entry-1)->tag + MAX_INSN_LENGTH)
- _mmu->refill_icache(pc, ic_entry);
}
}
catch(trap_t& t)