1 // See LICENSE for license details.
9 static void commit_log_stash_privilege(state_t
* state
)
11 #ifdef RISCV_ENABLE_COMMITLOG
12 state
->last_inst_priv
= state
->prv
;
16 static void commit_log_print_insn(state_t
* state
, reg_t pc
, insn_t insn
)
18 #ifdef RISCV_ENABLE_COMMITLOG
19 int32_t priv
= state
->last_inst_priv
;
20 uint64_t mask
= (insn
.length() == 8 ? uint64_t(0) : (uint64_t(1) << (insn
.length() * 8))) - 1;
21 if (state
->log_reg_write
.addr
) {
22 fprintf(stderr
, "%1d 0x%016" PRIx64
" (0x%08" PRIx64
") %c%2" PRIu64
" 0x%016" PRIx64
"\n",
26 state
->log_reg_write
.addr
& 1 ? 'f' : 'x',
27 state
->log_reg_write
.addr
>> 1,
28 state
->log_reg_write
.data
);
30 fprintf(stderr
, "%1d 0x%016" PRIx64
" (0x%08" PRIx64
")\n", priv
, pc
, insn
.bits() & mask
);
32 state
->log_reg_write
.addr
= 0;
36 inline void processor_t::update_histogram(reg_t pc
)
38 #ifdef RISCV_ENABLE_HISTOGRAM
43 static reg_t
execute_insn(processor_t
* p
, reg_t pc
, insn_fetch_t fetch
)
45 commit_log_stash_privilege(p
->get_state());
46 reg_t npc
= fetch
.func(p
, fetch
.insn
, pc
);
47 if (!invalid_pc(npc
)) {
48 commit_log_print_insn(p
->get_state(), pc
, fetch
.insn
);
49 p
->update_histogram(pc
);
54 // fetch/decode/execute loop
55 void processor_t::step(size_t n
)
57 if (state
.dcsr
.cause
== DCSR_CAUSE_NONE
) {
58 // TODO: get_interrupt() isn't super fast. Does that matter?
59 if (sim
->debug_module
.get_interrupt(id
)) {
60 enter_debug_mode(DCSR_CAUSE_DEBUGINT
);
61 } else if (state
.dcsr
.halt
) {
62 enter_debug_mode(DCSR_CAUSE_HALT
);
65 // In Debug Mode, just do 11 steps at a time. Otherwise we're going to be
66 // spinning the rest of the time anyway.
67 n
= std::min(n
, (size_t) 11);
75 #define advance_pc() \
76 if (unlikely(invalid_pc(pc))) { \
78 case PC_SERIALIZE_BEFORE: state.serialized = true; break; \
79 case PC_SERIALIZE_AFTER: instret++; break; \
93 // When we might single step, use the slow loop instead of the fast one.
94 if (unlikely(debug
|| state
.single_step
!= state
.STEP_NONE
|| state
.dcsr
.cause
))
98 if (unlikely(state
.single_step
== state
.STEP_STEPPING
)) {
99 state
.single_step
= state
.STEP_STEPPED
;
100 } else if (unlikely(state
.single_step
== state
.STEP_STEPPED
)) {
101 state
.single_step
= state
.STEP_NONE
;
102 enter_debug_mode(DCSR_CAUSE_STEP
);
103 // enter_debug_mode changed state.pc, so we can't just continue.
107 insn_fetch_t fetch
= mmu
->load_insn(pc
);
108 if (debug
&& !state
.serialized
)
110 pc
= execute_insn(this, pc
, fetch
);
114 else while (instret
< n
)
116 size_t idx
= _mmu
->icache_index(pc
);
117 auto ic_entry
= _mmu
->access_icache(pc
);
119 #define ICACHE_ACCESS(i) { \
120 insn_fetch_t fetch = ic_entry->data; \
122 pc = execute_insn(this, pc, fetch); \
123 if (i == mmu_t::ICACHE_ENTRIES-1) break; \
124 if (unlikely(ic_entry->tag != pc)) goto miss; \
125 if (unlikely(instret+1 == n)) break; \
139 // refill I$ if it looks like there wasn't a taken branch
140 if (pc
> (ic_entry
-1)->tag
&& pc
<= (ic_entry
-1)->tag
+ MAX_INSN_LENGTH
)
141 _mmu
->refill_icache(pc
, ic_entry
);
150 state
.minstret
+= instret
;