// See LICENSE for license details.
#include "processor.h"
+#include "extension.h"
#include "common.h"
#include "config.h"
#include "sim.h"
+#include "htif.h"
#include "disasm.h"
+#include "icache.h"
#include <cinttypes>
#include <cmath>
#include <cstdlib>
#include <iostream>
#include <assert.h>
#include <limits.h>
+#include <stdexcept>
+#include <algorithm>
processor_t::processor_t(sim_t* _sim, mmu_t* _mmu, uint32_t _id)
- : sim(*_sim), mmu(*_mmu), id(_id), opcode_bits(0), utidx(0)
+ : sim(_sim), mmu(_mmu), ext(NULL), disassembler(new disassembler_t),
+ id(_id), run(false), debug(false)
{
reset(true);
- mmu.set_processor(this);
+ mmu->set_processor(this);
- #define DECLARE_INSN(name, match, mask) \
- register_insn(match, mask, (insn_func_t)&processor_t::rv32_##name, (insn_func_t)&processor_t::rv64_##name);
- #include "opcodes.h"
+ #define DECLARE_INSN(name, match, mask) REGISTER_INSN(this, name, match, mask)
+ #include "encoding.h"
#undef DECLARE_INSN
-
- // create microthreads
- for (int i=0; i<MAX_UTS; i++)
- uts[i] = new processor_t(&sim, &mmu, id, i);
-}
-
-processor_t::processor_t(sim_t* _sim, mmu_t* _mmu, uint32_t _id,
- uint32_t _utidx)
- : sim(*_sim), mmu(*_mmu), id(_id)
-{
- reset(true);
- set_pcr(PCR_SR, SR_U64 | SR_EF | SR_EV);
- utidx = _utidx;
-
- // microthreads don't possess their own microthreads
- for (int i=0; i<MAX_UTS; i++)
- uts[i] = NULL;
+ build_opcode_map();
}
processor_t::~processor_t()
{
}
-void processor_t::reset(bool value)
+void state_t::reset()
{
- if (run == !value)
- return;
- run = !value;
-
// the ISA guarantees on boot that the PC is 0x2000 and the the processor
// is in supervisor mode, and in 64-bit mode, if supported, with traps
// and virtual memory disabled.
- sr = 0;
- set_pcr(PCR_SR, SR_S | SR_S64 | SR_IM);
+ sr = SR_S | SR_S64;
pc = 0x2000;
// the following state is undefined upon boot-up,
pcr_k1 = 0;
count = 0;
compare = 0;
- cycle = 0;
- set_fsr(0);
-
- // vector stuff
- vecbanks = 0xff;
- vecbanks_count = 8;
- utidx = -1;
- vlmax = 32;
- vl = 0;
- nxfpr_bank = 256;
- nxpr_use = 32;
- nfpr_use = 32;
+ fflags = 0;
+ frm = 0;
+
+ load_reservation = -1;
}
-void processor_t::set_fsr(uint32_t val)
+void processor_t::set_debug(bool value)
{
- fsr = val & ~FSR_ZERO; // clear FSR bits that read as zero
+ debug = value;
+ if (ext)
+ ext->set_debug(value);
}
-void processor_t::vcfg()
+void processor_t::reset(bool value)
{
- if (nxpr_use + nfpr_use < 2)
- vlmax = nxfpr_bank * vecbanks_count;
- else
- vlmax = (nxfpr_bank / (nxpr_use + nfpr_use - 1)) * vecbanks_count;
+ if (run == !value)
+ return;
+ run = !value;
- vlmax = std::min(vlmax, MAX_UTS);
-}
+ state.reset(); // reset the core
+ set_pcr(CSR_STATUS, state.sr);
-void processor_t::setvl(int vlapp)
-{
- vl = std::min(vlmax, vlapp);
+ if (ext)
+ ext->reset(); // reset the extension
}
void processor_t::take_interrupt()
{
- uint32_t interrupts = (sr & SR_IP) >> SR_IP_SHIFT;
- interrupts &= (sr & SR_IM) >> SR_IM_SHIFT;
+ uint32_t interrupts = (state.sr & SR_IP) >> SR_IP_SHIFT;
+ interrupts &= (state.sr & SR_IM) >> SR_IM_SHIFT;
- if(interrupts && (sr & SR_ET))
- for(int i = 0; ; i++, interrupts >>= 1)
- if(interrupts & 1)
- throw interrupt_t(i);
+ if (interrupts && (state.sr & SR_EI))
+ for (int i = 0; ; i++, interrupts >>= 1)
+ if (interrupts & 1)
+ throw trap_t((1ULL << ((state.sr & SR_S64) ? 63 : 31)) + i);
}
-void processor_t::step(size_t n, bool noisy)
+void processor_t::step(size_t n)
{
if(!run)
return;
- size_t i = 0;
+ mmu_t* _mmu = mmu;
+ auto count32 = decltype(state.compare)(state.count);
+ bool count_le_compare = count32 <= state.compare;
+ n = std::min(n, size_t(state.compare - count32) | 1);
+
try
{
take_interrupt();
- mmu_t& _mmu = mmu;
- reg_t npc = pc;
-
// execute_insn fetches and executes one instruction
#define execute_insn(noisy) \
do { \
- mmu_t::insn_fetch_t fetch = _mmu.load_insn(npc, sr & SR_EC); \
- if(noisy) disasm(fetch.insn, npc); \
- npc = fetch.func(this, fetch.insn, npc); \
- pc = npc; \
+ insn_fetch_t fetch = mmu->load_insn(state.pc); \
+ if(noisy) disasm(fetch.insn.insn); \
+ state.pc = fetch.func(this, fetch.insn.insn, state.pc); \
+ } while(0)
+
+
+ // special execute_insn for commit log dumping
+#ifdef RISCV_ENABLE_COMMITLOG
+ //static disassembler disasmblr;
+ #undef execute_insn
+ #define execute_insn(noisy) \
+ do { \
+ insn_fetch_t fetch = _mmu->load_insn(state.pc); \
+ if(noisy) disasm(fetch.insn.insn); \
+ bool in_spvr = state.sr & SR_S; \
+ if (!in_spvr) fprintf(stderr, "\n0x%016" PRIx64 " (0x%08" PRIx32 ") ", state.pc, fetch.insn.insn.bits()); \
+ /*if (!in_spvr) fprintf(stderr, "\n0x%016" PRIx64 " (0x%08" PRIx32 ") %s ", state.pc, fetch.insn.insn.bits(), disasmblr.disassemble(fetch.insn.insn).c_str());*/ \
+ state.pc = fetch.func(this, fetch.insn.insn, state.pc); \
} while(0)
+#endif
- if(noisy) for( ; i < n; i++) // print out instructions as we go
- execute_insn(true);
- else
+ if (debug) // print out instructions as we go
{
- // unrolled for speed
- for( ; n > 3 && i < n-3; i+=4)
+ for (size_t i = 0; i < n; state.count++, i++)
+ execute_insn(true);
+ }
+ else while (n > 0)
+ {
+ size_t idx = (state.pc / sizeof(insn_t)) % ICACHE_SIZE;
+ auto ic_entry_init = &_mmu->icache[idx], ic_entry = ic_entry_init;
+
+ #define update_count() { \
+ size_t i = ic_entry - ic_entry_init; \
+ state.count += i; \
+ if (i >= n) break; \
+ n -= i; }
+
+ #define ICACHE_ACCESS(idx) { \
+ insn_t insn = ic_entry->data.insn.insn; \
+ insn_func_t func = ic_entry->data.func; \
+ if (unlikely(ic_entry->tag != state.pc)) break; \
+ ic_entry++; \
+ state.pc = func(this, insn, state.pc); }
+
+ switch (idx) while (true)
{
- execute_insn(false);
- execute_insn(false);
- execute_insn(false);
- execute_insn(false);
+ ICACHE_SWITCH;
+ update_count();
+ ic_entry_init = ic_entry = &_mmu->icache[0];
}
- for( ; i < n; i++)
- execute_insn(false);
+
+ _mmu->access_icache(state.pc);
+ update_count();
}
}
- catch(trap_t t)
+ catch(trap_t& t)
{
- // an exception occurred in the target processor
- take_trap(t,noisy);
- }
- catch(interrupt_t t)
- {
- take_trap((1ULL << (8*sizeof(reg_t)-1)) + t.i, noisy);
- }
- catch(vt_command_t cmd)
- {
- // this microthread has finished
- assert(cmd == vt_command_stop);
+ take_trap(t);
}
- cycle += i;
-
- // update timer and possibly register a timer interrupt
- uint32_t old_count = count;
- count += i;
- if(old_count < compare && uint64_t(old_count) + i >= compare)
+ bool count_ge_compare =
+ uint64_t(n) + decltype(state.compare)(state.count) >= state.compare;
+ if (count_le_compare && count_ge_compare)
set_interrupt(IRQ_TIMER, true);
}
-void processor_t::take_trap(reg_t t, bool noisy)
+void processor_t::take_trap(trap_t& t)
{
- if(noisy)
- {
- if ((sreg_t)t < 0)
- fprintf(stderr, "core %3d: interrupt %d, epc 0x%016" PRIx64 "\n",
- id, uint8_t(t), pc);
- else
- fprintf(stderr, "core %3d: trap %s, epc 0x%016" PRIx64 "\n",
- id, trap_name(trap_t(t)), pc);
- }
+ if (debug)
+ fprintf(stderr, "core %3d: exception %s, epc 0x%016" PRIx64 "\n",
+ id, t.name(), state.pc);
+
+ // switch to supervisor, set previous supervisor bit, disable interrupts
+ set_pcr(CSR_STATUS, (((state.sr & ~SR_EI) | SR_S) & ~SR_PS & ~SR_PEI) |
+ ((state.sr & SR_S) ? SR_PS : 0) |
+ ((state.sr & SR_EI) ? SR_PEI : 0));
- // switch to supervisor, set previous supervisor bit, disable traps
- set_pcr(PCR_SR, (((sr & ~SR_ET) | SR_S) & ~SR_PS) | ((sr & SR_S) ? SR_PS : 0));
- cause = t;
- epc = pc;
- pc = evec;
- badvaddr = mmu.get_badvaddr();
+ yield_load_reservation();
+ state.cause = t.cause();
+ state.epc = state.pc;
+ state.pc = state.evec;
+
+ t.side_effects(&state); // might set badvaddr etc.
}
void processor_t::deliver_ipi()
{
if (run)
- set_pcr(PCR_CLR_IPI, 1);
+ set_pcr(CSR_CLEAR_IPI, 1);
}
-void processor_t::disasm(insn_t insn, reg_t pc)
+void processor_t::disasm(insn_t insn)
{
// the disassembler is stateless, so we share it
- static disassembler disasm;
- fprintf(stderr, "core %3d: 0x%016" PRIx64 " (0x%08" PRIxFAST32 ") %s\n",
- id, pc, insn.bits, disasm.disassemble(insn).c_str());
+ fprintf(stderr, "core %3d: 0x%016" PRIx64 " (0x%08" PRIx32 ") %s\n",
+ id, state.pc, insn.bits(), disassembler->disassemble(insn).c_str());
}
-void processor_t::set_pcr(int which, reg_t val)
+reg_t processor_t::set_pcr(int which, reg_t val)
{
+ reg_t old_pcr = get_pcr(which);
+
switch (which)
{
- case PCR_SR:
- sr = (val & ~SR_IP) | (sr & SR_IP);
+ case CSR_FFLAGS:
+ state.fflags = val & (FSR_AEXC >> FSR_AEXC_SHIFT);
+ break;
+ case CSR_FRM:
+ state.frm = val & (FSR_RD >> FSR_RD_SHIFT);
+ break;
+ case CSR_FCSR:
+ state.fflags = (val & FSR_AEXC) >> FSR_AEXC_SHIFT;
+ state.frm = (val & FSR_RD) >> FSR_RD_SHIFT;
+ break;
+ case CSR_STATUS:
+ state.sr = (val & ~SR_IP) | (state.sr & SR_IP);
#ifndef RISCV_ENABLE_64BIT
- sr &= ~(SR_S64 | SR_U64);
+ state.sr &= ~(SR_S64 | SR_U64);
#endif
#ifndef RISCV_ENABLE_FPU
- sr &= ~SR_EF;
-#endif
-#ifndef RISCV_ENABLE_RVC
- sr &= ~SR_EC;
-#endif
-#ifndef RISCV_ENABLE_VEC
- sr &= ~SR_EV;
+ state.sr &= ~SR_EF;
#endif
- sr &= ~SR_ZERO;
- mmu.flush_tlb();
+ if (!ext)
+ state.sr &= ~SR_EA;
+ state.sr &= ~SR_ZERO;
+ rv64 = (state.sr & SR_S) ? (state.sr & SR_S64) : (state.sr & SR_U64);
+ mmu->flush_tlb();
break;
- case PCR_EPC:
- epc = val;
+ case CSR_EPC:
+ state.epc = val;
break;
- case PCR_EVEC:
- evec = val;
+ case CSR_EVEC:
+ state.evec = val;
break;
- case PCR_COUNT:
- count = val;
+ case CSR_CYCLE:
+ case CSR_TIME:
+ case CSR_INSTRET:
+ case CSR_COUNT:
+ state.count = val;
break;
- case PCR_COMPARE:
+ case CSR_COMPARE:
set_interrupt(IRQ_TIMER, false);
- compare = val;
+ state.compare = val;
break;
- case PCR_PTBR:
- mmu.set_ptbr(val);
+ case CSR_PTBR:
+ state.ptbr = val & ~(PGSIZE-1);
break;
- case PCR_SEND_IPI:
- sim.send_ipi(val);
+ case CSR_SEND_IPI:
+ sim->send_ipi(val);
break;
- case PCR_CLR_IPI:
+ case CSR_CLEAR_IPI:
set_interrupt(IRQ_IPI, val & 1);
break;
- case PCR_K0:
- pcr_k0 = val;
+ case CSR_SUP0:
+ state.pcr_k0 = val;
break;
- case PCR_K1:
- pcr_k1 = val;
+ case CSR_SUP1:
+ state.pcr_k1 = val;
break;
- case PCR_VECBANK:
- vecbanks = val & 0xff;
- vecbanks_count = __builtin_popcountll(vecbanks);
+ case CSR_TOHOST:
+ if (state.tohost == 0)
+ state.tohost = val;
break;
- case PCR_TOHOST:
- if (tohost == 0)
- tohost = val;
- break;
- case PCR_FROMHOST:
- set_interrupt(IRQ_HOST, val != 0);
- fromhost = val;
+ case CSR_FROMHOST:
+ set_fromhost(val);
break;
}
+
+ return old_pcr;
+}
+
+void processor_t::set_fromhost(reg_t val)
+{
+ set_interrupt(IRQ_HOST, val != 0);
+ state.fromhost = val;
}
reg_t processor_t::get_pcr(int which)
{
switch (which)
{
- case PCR_SR:
- return sr;
- case PCR_EPC:
- return epc;
- case PCR_BADVADDR:
- return badvaddr;
- case PCR_EVEC:
- return evec;
- case PCR_COUNT:
- return count;
- case PCR_COMPARE:
- return compare;
- case PCR_CAUSE:
- return cause;
- case PCR_PTBR:
- return mmu.get_ptbr();
- case PCR_COREID:
+ case CSR_FFLAGS:
+ return state.fflags;
+ case CSR_FRM:
+ return state.frm;
+ case CSR_FCSR:
+ return (state.fflags << FSR_AEXC_SHIFT) | (state.frm << FSR_RD_SHIFT);
+ case CSR_STATUS:
+ return state.sr;
+ case CSR_EPC:
+ return state.epc;
+ case CSR_BADVADDR:
+ return state.badvaddr;
+ case CSR_EVEC:
+ return state.evec;
+ case CSR_CYCLE:
+ case CSR_TIME:
+ case CSR_INSTRET:
+ case CSR_COUNT:
+ return state.count;
+ case CSR_COMPARE:
+ return state.compare;
+ case CSR_CAUSE:
+ return state.cause;
+ case CSR_PTBR:
+ return state.ptbr;
+ case CSR_ASID:
+ return 0;
+ case CSR_FATC:
+ mmu->flush_tlb();
+ return 0;
+ case CSR_HARTID:
return id;
- case PCR_IMPL:
+ case CSR_IMPL:
return 1;
- case PCR_K0:
- return pcr_k0;
- case PCR_K1:
- return pcr_k1;
- case PCR_VECBANK:
- return vecbanks;
- case PCR_VECCFG:
- return nfpr_use << 18 | nxpr_use << 12 | vl;
- case PCR_TOHOST:
- return tohost;
- case PCR_FROMHOST:
- return fromhost;
+ case CSR_SUP0:
+ return state.pcr_k0;
+ case CSR_SUP1:
+ return state.pcr_k1;
+ case CSR_TOHOST:
+ sim->get_htif()->tick(); // not necessary, but faster
+ return state.tohost;
+ case CSR_FROMHOST:
+ sim->get_htif()->tick(); // not necessary, but faster
+ return state.fromhost;
+ default:
+ return -1;
}
- return -1;
}
void processor_t::set_interrupt(int which, bool on)
{
uint32_t mask = (1 << (which + SR_IP_SHIFT)) & SR_IP;
if (on)
- sr |= mask;
+ state.sr |= mask;
else
- sr &= ~mask;
+ state.sr &= ~mask;
+}
+
+reg_t illegal_instruction(processor_t* p, insn_t insn, reg_t pc)
+{
+ throw trap_illegal_instruction();
}
insn_func_t processor_t::decode_insn(insn_t insn)
{
- bool rv64 = (sr & SR_S) ? (sr & SR_S64) : (sr & SR_U64);
+ size_t mask = opcode_map.size()-1;
+ insn_desc_t* desc = opcode_map[insn.bits() & mask];
- auto key = insn.bits & ((1L << opcode_bits)-1);
- auto it = opcode_map.find(key);
- for (auto it = opcode_map.find(key); it != opcode_map.end() && it->first == key; ++it)
- if ((insn.bits & it->second.mask) == it->second.match)
- return rv64 ? it->second.rv64 : it->second.rv32;
+ while ((insn.bits() & desc->mask) != desc->match)
+ desc++;
- return &processor_t::illegal_instruction;
+ return rv64 ? desc->rv64 : desc->rv32;
}
-reg_t processor_t::illegal_instruction(insn_t insn, reg_t pc)
+void processor_t::register_insn(insn_desc_t desc)
{
- throw trap_illegal_instruction;
+ assert(desc.mask & 1);
+ instructions.push_back(desc);
}
-void processor_t::register_insn(uint32_t match, uint32_t mask, insn_func_t rv32, insn_func_t rv64)
+void processor_t::build_opcode_map()
{
- assert(mask & 1);
- if (opcode_bits == 0 || (mask & ((1L << opcode_bits)-1)) != ((1L << opcode_bits)-1))
+ size_t buckets = -1;
+ for (auto& inst : instructions)
+ while ((inst.mask & buckets) != buckets)
+ buckets /= 2;
+ buckets++;
+
+ struct cmp {
+ decltype(insn_desc_t::match) mask;
+ cmp(decltype(mask) mask) : mask(mask) {}
+ bool operator()(const insn_desc_t& lhs, const insn_desc_t& rhs) {
+ if ((lhs.match & mask) != (rhs.match & mask))
+ return (lhs.match & mask) < (rhs.match & mask);
+ return lhs.match < rhs.match;
+ }
+ };
+ std::sort(instructions.begin(), instructions.end(), cmp(buckets-1));
+
+ opcode_map.resize(buckets);
+ opcode_store.resize(instructions.size() + 1);
+
+ size_t j = 0;
+ for (size_t b = 0, i = 0; b < buckets; b++)
{
- unsigned x = 0;
- while ((mask & ((1L << (x+1))-1)) == ((1L << (x+1))-1) &&
- (opcode_bits == 0 || x <= opcode_bits))
- x++;
- opcode_bits = x;
-
- decltype(opcode_map) new_map;
- for (auto it = opcode_map.begin(); it != opcode_map.end(); ++it)
- new_map.insert(std::make_pair(it->second.match & ((1L<<x)-1), it->second));
- opcode_map = new_map;
+ opcode_map[b] = &opcode_store[j];
+ while (i < instructions.size() && b == (instructions[i].match & (buckets-1)))
+ opcode_store[j++] = instructions[i++];
}
- opcode_map.insert(std::make_pair(match & ((1L<<opcode_bits)-1),
- (opcode_map_entry_t){match, mask, rv32, rv64}));
+ assert(j == opcode_store.size()-1);
+ opcode_store[j].match = opcode_store[j].mask = 0;
+ opcode_store[j].rv32 = &illegal_instruction;
+ opcode_store[j].rv64 = &illegal_instruction;
+}
+
+void processor_t::register_extension(extension_t* x)
+{
+ for (auto insn : x->get_instructions())
+ register_insn(insn);
+ build_opcode_map();
+ for (auto disasm_insn : x->get_disasms())
+ disassembler->add_insn(disasm_insn);
+ if (ext != NULL)
+ throw std::logic_error("only one extension may be registered");
+ ext = x;
+ x->set_processor(this);
}