self.gpr = GPR(decoder2, self, self.svstate, regfile)
self.spr = SPR(decoder2, initial_sprs) # initialise SPRs before MMU
self.mem = Mem(row_bytes=8, initial_mem=initial_mem)
+ self.imem = Mem(row_bytes=4, initial_mem=initial_insns)
+ # MMU mode, redirect underlying Mem through RADIX
if mmu:
self.mem = RADIX(self.mem, self)
- self.imem = Mem(row_bytes=4, initial_mem=initial_insns)
+ self.imem = RADIX(self.imem, self)
self.pc = PC()
self.msr = SelectableInt(initial_msr, 64) # underlying reg
else:
pc = self.fake_pc
self._pc = pc
- ins = self.imem.ld(pc, 4, False, True)
+ ins = self.imem.ld(pc, 4, False, True, instr_fetch=True)
if ins is None:
raise KeyError("no instruction at 0x%x" % pc)
print("setup: 0x%x 0x%x %s" % (pc, ins & 0xffffffff, bin(ins)))
print (" svstate.vl", self.svstate.vl.asint(msb0=True))
print (" svstate.mvl", self.svstate.maxvl.asint(msb0=True))
sv_rm = pfx.rm.asint(msb0=True)
- ins = self.imem.ld(pc+4, 4, False, True)
+ ins = self.imem.ld(pc+4, 4, False, True, instr_fetch=True)
print(" svsetup: 0x%x 0x%x %s" % (pc+4, ins & 0xffffffff, bin(ins)))
yield self.dec2.dec.raw_opcode_in.eq(ins & 0xffffffff) # v3.0B suffix
yield self.dec2.sv_rm.eq(sv_rm) # svp64 prefix
return shifter, mask
# TODO: Implement ld/st of lesser width
- def ld(self, address, width=8, swap=True, check_in_mem=False):
- print("ld from addr 0x{:x} width {:d}".format(address, width))
+ def ld(self, address, width=8, swap=True, check_in_mem=False,
+ instr_fetch=False):
+ print("ld from addr 0x{:x} width {:d}".format(address, width),
+ swap, check_in_mem, instr_fetch)
remainder = address & (self.bytes_per_word - 1)
address = address >> self.word_log2
assert remainder & (width - 1) == 0, "Unaligned access unsupported!"
print("RADIX memread", addr, sz, val)
return SelectableInt(val, sz*8)
- def ld(self, address, width=8, swap=True, check_in_mem=False):
+ def ld(self, address, width=8, swap=True, check_in_mem=False,
+ instr_fetch=False):
print("RADIX: ld from addr 0x%x width %d" % (address, width))
- mode = 'LOAD' # XXX TODO: executable load (icache)
+ if instr_fetch:
+ mode = 'EXECUTE'
+ else:
+ mode = 'LOAD'
addr = SelectableInt(address, 64)
(shift, mbits, pgbase) = self._decode_prte(addr)
#shift = SelectableInt(0, 32)
new_shift = shift + (31 - 12) - mbits
return new_shift
- def _check_perms(self, data, priv, iside, store):
+ def _check_perms(self, data, priv, instr_fetch, store):
"""check page permissions
// Leaf PDE |
// |------------------------------| |----------------|
# check permissions and RC bits
perm_ok = 0
if priv == 1 or data[60] == 0:
- if iside == 0:
+ if instr_fetch == 0:
perm_ok = data[62] | (data[61] & (store == 0))
# no IAMR, so no KUEP support for now
# deny execute permission if cache inhibited