from enum import Enum, unique
from nmigen import Module, Signal, Elaboratable, Cat, Repl, Array, Const
+from nmutil.util import Display
+
+from random import randint
+
from nmigen.cli import main
from nmutil.iocontrol import RecordObject
from nmigen.utils import log2_int
-from nmigen.cli import rtlil
-
-
from soc.experiment.mem_types import (LoadStore1ToDCacheType,
DCacheToLoadStore1Type,
MMUToDCacheType,
from soc.experiment.cache_ram import CacheRam
from soc.experiment.plru import PLRU
+# for test
+from nmigen_soc.wishbone.sram import SRAM
+from nmigen import Memory
+from nmigen.cli import rtlil
+if True:
+ from nmigen.back.pysim import Simulator, Delay, Settle
+else:
+ from nmigen.sim.cxxsim import Simulator, Delay, Settle
+from nmutil.util import wrap
+
# TODO: make these parameters of DCache at some point
LINE_SIZE = 64 # Line size in bytes
-NUM_LINES = 32 # Number of lines in a set
+NUM_LINES = 16 # Number of lines in a set
NUM_WAYS = 4 # Number of ways
TLB_SET_SIZE = 64 # L1 DTLB entries per set
TLB_NUM_WAYS = 2 # L1 DTLB number of sets
# to represent the full dcache
BRAM_ROWS = NUM_LINES * ROW_PER_LINE
+print ("ROW_SIZE", ROW_SIZE)
+print ("ROW_PER_LINE", ROW_PER_LINE)
+print ("BRAM_ROWS", BRAM_ROWS)
+print ("NUM_WAYS", NUM_WAYS)
# Bit fields counts in the address
WAY_BITS = log2_int(NUM_WAYS)
# Example of layout for 32 lines of 64 bytes:
-#
-# .. tag |index| line |
-# .. | row | |
-# .. | |---| | ROW_LINE_BITS (3)
-# .. | |--- - --| LINE_OFF_BITS (6)
-# .. | |- --| ROW_OFF_BITS (3)
-# .. |----- ---| | ROW_BITS (8)
-# .. |-----| | INDEX_BITS (5)
-# .. --------| | TAG_BITS (45)
+layout = """\
+ .. tag |index| line |
+ .. | row | |
+ .. | |---| | ROW_LINE_BITS (3)
+ .. | |--- - --| LINE_OFF_BITS (6)
+ .. | |- --| ROW_OFF_BITS (3)
+ .. |----- ---| | ROW_BITS (8)
+ .. |-----| | INDEX_BITS (5)
+ .. --------| | TAG_BITS (45)
+"""
+print (layout)
+print ("Dcache TAG %d IDX %d ROW %d ROFF %d LOFF %d RLB %d" % \
+ (TAG_BITS, INDEX_BITS, ROW_BITS,
+ ROW_OFF_BITS, LINE_OFF_BITS, ROW_LINE_BITS))
+print ("index @: %d-%d" % (LINE_OFF_BITS, SET_SIZE_BITS))
+print ("row @: %d-%d" % (LINE_OFF_BITS, ROW_OFF_BITS))
+print ("tag @: %d-%d width %d" % (SET_SIZE_BITS, REAL_ADDR_BITS, TAG_WIDTH))
TAG_RAM_WIDTH = TAG_WIDTH * NUM_WAYS
def CacheTagArray():
- return Array(Signal(TAG_RAM_WIDTH) for x in range(NUM_LINES))
+ return Array(Signal(TAG_RAM_WIDTH, name="cachetag_%d" % x) \
+ for x in range(NUM_LINES))
def CacheValidBitsArray():
- return Array(Signal(INDEX_BITS) for x in range(NUM_LINES))
+ return Array(Signal(NUM_WAYS, name="cachevalid_%d" % x) \
+ for x in range(NUM_LINES))
def RowPerLineValidArray():
- return Array(Signal() for x in range(ROW_PER_LINE))
+ return Array(Signal(name="rows_valid%d" % x) \
+ for x in range(ROW_PER_LINE))
# L1 TLB
TLB_SET_BITS = log2_int(TLB_SET_SIZE)
TLB_PTE_BITS = 64
TLB_PTE_WAY_BITS = TLB_NUM_WAYS * TLB_PTE_BITS;
+def ispow2(x):
+ return (1<<log2_int(x, False)) == x
+
assert (LINE_SIZE % ROW_SIZE) == 0, "LINE_SIZE not multiple of ROW_SIZE"
-assert (LINE_SIZE % 2) == 0, "LINE_SIZE not power of 2"
-assert (NUM_LINES % 2) == 0, "NUM_LINES not power of 2"
-assert (ROW_PER_LINE % 2) == 0, "ROW_PER_LINE not power of 2"
+assert ispow2(LINE_SIZE), "LINE_SIZE not power of 2"
+assert ispow2(NUM_LINES), "NUM_LINES not power of 2"
+assert ispow2(ROW_PER_LINE), "ROW_PER_LINE not power of 2"
assert ROW_BITS == (INDEX_BITS + ROW_LINE_BITS), "geometry bits don't add up"
assert (LINE_OFF_BITS == ROW_OFF_BITS + ROW_LINE_BITS), \
"geometry bits don't add up"
return Array(Signal(TLB_PTE_WAY_BITS) for x in range(TLB_SET_SIZE))
def HitWaySet():
- return Array(Signal(NUM_WAYS) for x in range(TLB_NUM_WAYS))
+ return Array(Signal(WAY_BITS, name="hitway_%d" % x) \
+ for x in range(TLB_NUM_WAYS))
# Cache RAM interface
def CacheRamOut():
- return Array(Signal(WB_DATA_BITS) for x in range(NUM_WAYS))
+ return Array(Signal(WB_DATA_BITS, name="cache_out%d" % x) \
+ for x in range(NUM_WAYS))
# PLRU output interface
def PLRUOut():
# Return the index of a row within a line
def get_row_of_line(row):
- return row[:ROW_LINE_BITS]
+ return row[:ROW_BITS][:ROW_LINE_BITS]
# Returns whether this is the last row of a line
def is_last_row_addr(addr, last):
# Record for storing permission, attribute, etc. bits from a PTE
class PermAttr(RecordObject):
- def __init__(self):
- super().__init__()
+ def __init__(self, name=None):
+ super().__init__(name=name)
self.reference = Signal()
self.changed = Signal()
self.nocache = Signal()
# Stage 0 register, basically contains just the latched request
class RegStage0(RecordObject):
- def __init__(self):
- super().__init__()
- self.req = LoadStore1ToDCacheType()
+ def __init__(self, name=None):
+ super().__init__(name=name)
+ self.req = LoadStore1ToDCacheType(name="lsmem")
self.tlbie = Signal()
self.doall = Signal()
self.tlbld = Signal()
class MemAccessRequest(RecordObject):
- def __init__(self):
- super().__init__()
+ def __init__(self, name=None):
+ super().__init__(name=name)
self.op = Signal(Op)
self.valid = Signal()
self.dcbz = Signal()
# First stage register, contains state for stage 1 of load hits
# and for the state machine used by all other operations
class RegStage1(RecordObject):
- def __init__(self):
- super().__init__()
+ def __init__(self, name=None):
+ super().__init__(name=name)
# Info about the request
self.full = Signal() # have uncompleted request
self.mmu_req = Signal() # request is from MMU
- self.req = MemAccessRequest()
+ self.req = MemAccessRequest(name="reqmem")
# Cache hit state
self.hit_way = Signal(WAY_BITS)
self.hit_load_valid = Signal()
- self.hit_index = Signal(NUM_LINES)
+ self.hit_index = Signal(INDEX_BITS)
self.cache_hit = Signal()
# TLB hit state
self.write_bram = Signal()
self.write_tag = Signal()
self.slow_valid = Signal()
- self.wb = WBMasterOut()
+ self.real_adr = Signal(REAL_ADDR_BITS)
+ self.wb = WBMasterOut("wb")
self.reload_tag = Signal(TAG_BITS)
self.store_way = Signal(WAY_BITS)
self.store_row = Signal(ROW_BITS)
self.store_index = Signal(INDEX_BITS)
- self.end_row_ix = Signal(log2_int(ROW_LINE_BITS, False))
+ self.end_row_ix = Signal(ROW_LINE_BITS)
self.rows_valid = RowPerLineValidArray()
self.acks_pending = Signal(3)
self.inc_acks = Signal()
class DTLBUpdate(Elaboratable):
- def __init__(self, dtlb_valid_bits, dtlb_ptes):
+ def __init__(self):
self.tlbie = Signal()
self.tlbwe = Signal()
self.doall = Signal()
+ self.updated = Signal()
+ self.v_updated = Signal()
self.tlb_hit = Signal()
self.tlb_req_index = Signal(TLB_SET_BITS)
- self.dtlb_valid_bits = dtlb_valid_bits
- self.dtlb_ptes = dtlb_ptes
-
self.tlb_hit_way = Signal(TLB_WAY_BITS)
self.tlb_tag_way = Signal(TLB_TAG_WAY_BITS)
self.tlb_pte_way = Signal(TLB_PTE_WAY_BITS)
self.eatag = Signal(TLB_EA_TAG_BITS)
self.pte_data = Signal(TLB_PTE_BITS)
+ self.dv = Signal(TLB_PTE_WAY_BITS)
+
+ self.tb_out = Signal(TLB_TAG_WAY_BITS)
+ self.pb_out = Signal(TLB_NUM_WAYS)
+ self.db_out = Signal(TLB_PTE_WAY_BITS)
+
def elaborate(self, platform):
m = Module()
comb = m.d.comb
tagset = Signal(TLB_TAG_WAY_BITS)
pteset = Signal(TLB_PTE_WAY_BITS)
- vb = Signal(TLB_NUM_WAYS)
- db = Signal(TLB_PTE_WAY_BITS)
-
- sync += vb.eq(self.dtlb_valid_bits[self.tlb_req_index])
- sync += db.eq(self.dtlb_ptes[self.tlb_req_index])
+ tb_out, pb_out, db_out = self.tb_out, self.pb_out, self.db_out
with m.If(self.tlbie & self.doall):
- # clear all valid bits at once
- for i in range(TLB_SET_SIZE):
- sync += self.dtlb_valid_bits[i].eq(0)
-
+ pass # clear all back in parent
with m.Elif(self.tlbie):
with m.If(self.tlb_hit):
- sync += vb.bit_select(self.tlb_hit_way, 1).eq(Const(0, 1))
+ comb += db_out.eq(self.dv)
+ comb += db_out.bit_select(self.tlb_hit_way, 1).eq(1)
+ comb += self.v_updated.eq(1)
with m.Elif(self.tlbwe):
comb += tagset.eq(self.tlb_tag_way)
comb += write_tlb_tag(self.repl_way, tagset, self.eatag)
- sync += db.eq(tagset)
+ comb += tb_out.eq(tagset)
comb += pteset.eq(self.tlb_pte_way)
comb += write_tlb_pte(self.repl_way, pteset, self.pte_data)
- sync += db.eq(pteset)
+ comb += pb_out.eq(pteset)
- sync += vb.bit_select(self.repl_way, 1).eq(1)
+ comb += db_out.bit_select(self.repl_way, 1).eq(1)
+
+ comb += self.updated.eq(1)
+ comb += self.v_updated.eq(1)
+
+ return m
+
+
+class DCachePendingHit(Elaboratable):
+
+ def __init__(self, tlb_pte_way, tlb_valid_way, tlb_hit_way,
+ cache_valid_idx, cache_tag_set,
+ req_addr,
+ hit_set):
+
+ self.go = Signal()
+ self.virt_mode = Signal()
+ self.is_hit = Signal()
+ self.tlb_hit = Signal()
+ self.hit_way = Signal(WAY_BITS)
+ self.rel_match = Signal()
+ self.req_index = Signal(INDEX_BITS)
+ self.reload_tag = Signal(TAG_BITS)
+
+ self.tlb_hit_way = tlb_hit_way
+ self.tlb_pte_way = tlb_pte_way
+ self.tlb_valid_way = tlb_valid_way
+ self.cache_valid_idx = cache_valid_idx
+ self.cache_tag_set = cache_tag_set
+ self.req_addr = req_addr
+ self.hit_set = hit_set
+
+ def elaborate(self, platform):
+ m = Module()
+ comb = m.d.comb
+ sync = m.d.sync
+
+ go = self.go
+ virt_mode = self.virt_mode
+ is_hit = self.is_hit
+ tlb_pte_way = self.tlb_pte_way
+ tlb_valid_way = self.tlb_valid_way
+ cache_valid_idx = self.cache_valid_idx
+ cache_tag_set = self.cache_tag_set
+ req_addr = self.req_addr
+ tlb_hit_way = self.tlb_hit_way
+ tlb_hit = self.tlb_hit
+ hit_set = self.hit_set
+ hit_way = self.hit_way
+ rel_match = self.rel_match
+ req_index = self.req_index
+ reload_tag = self.reload_tag
+
+ rel_matches = Array(Signal(name="rel_matches_%d" % i) \
+ for i in range(TLB_NUM_WAYS))
+ hit_way_set = HitWaySet()
+
+ # Test if pending request is a hit on any way
+ # In order to make timing in virtual mode,
+ # when we are using the TLB, we compare each
+ # way with each of the real addresses from each way of
+ # the TLB, and then decide later which match to use.
+
+ with m.If(virt_mode):
+ for j in range(TLB_NUM_WAYS):
+ s_tag = Signal(TAG_BITS, name="s_tag%d" % j)
+ s_hit = Signal()
+ s_pte = Signal(TLB_PTE_BITS)
+ s_ra = Signal(REAL_ADDR_BITS)
+ comb += s_pte.eq(read_tlb_pte(j, tlb_pte_way))
+ comb += s_ra.eq(Cat(req_addr[0:TLB_LG_PGSZ],
+ s_pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
+ comb += s_tag.eq(get_tag(s_ra))
+
+ for i in range(NUM_WAYS):
+ is_tag_hit = Signal(name="is_tag_hit_%d_%d" % (j, i))
+ comb += is_tag_hit.eq(go & cache_valid_idx[i] &
+ (read_tag(i, cache_tag_set) == s_tag)
+ & tlb_valid_way[j])
+ with m.If(is_tag_hit):
+ comb += hit_way_set[j].eq(i)
+ comb += s_hit.eq(1)
+ comb += hit_set[j].eq(s_hit)
+ with m.If(s_tag == reload_tag):
+ comb += rel_matches[j].eq(1)
+ with m.If(tlb_hit):
+ comb += is_hit.eq(hit_set[tlb_hit_way])
+ comb += hit_way.eq(hit_way_set[tlb_hit_way])
+ comb += rel_match.eq(rel_matches[tlb_hit_way])
+ with m.Else():
+ s_tag = Signal(TAG_BITS)
+ comb += s_tag.eq(get_tag(req_addr))
+ for i in range(NUM_WAYS):
+ is_tag_hit = Signal(name="is_tag_hit_%d" % i)
+ comb += is_tag_hit.eq(go & cache_valid_idx[i] &
+ (read_tag(i, cache_tag_set) == s_tag))
+ with m.If(is_tag_hit):
+ comb += hit_way.eq(i)
+ comb += is_hit.eq(1)
+ with m.If(s_tag == reload_tag):
+ comb += rel_match.eq(1)
return m
while not idle...)
"""
def __init__(self):
- self.d_in = LoadStore1ToDCacheType()
- self.d_out = DCacheToLoadStore1Type()
+ self.d_in = LoadStore1ToDCacheType("d_in")
+ self.d_out = DCacheToLoadStore1Type("d_out")
- self.m_in = MMUToDCacheType()
- self.m_out = DCacheToMMUType()
+ self.m_in = MMUToDCacheType("m_in")
+ self.m_out = DCacheToMMUType("m_out")
self.stall_out = Signal()
sync = m.d.sync
d_in, d_out, m_in = self.d_in, self.d_out, self.m_in
- r = RegStage0()
+ r = RegStage0("stage0")
# TODO, this goes in unit tests and formal proofs
- with m.If(~(d_in.valid & m_in.valid)):
- #sync += Display("request collision loadstore vs MMU")
- pass
+ with m.If(d_in.valid & m_in.valid):
+ sync += Display("request collision loadstore vs MMU")
with m.If(m_in.valid):
sync += r.req.valid.eq(1)
sync += tlb_tag_way.eq(dtlb_tags[index])
sync += tlb_pte_way.eq(dtlb_ptes[index])
- def maybe_tlb_plrus(self, m, r1, tlb_plru_victim, acc, acc_en, lru):
+ def maybe_tlb_plrus(self, m, r1, tlb_plru_victim):
"""Generate TLB PLRUs
"""
comb = m.d.comb
sync = m.d.sync
- with m.If(TLB_NUM_WAYS > 1):
- for i in range(TLB_SET_SIZE):
- # TLB PLRU interface
- tlb_plru = PLRU(TLB_WAY_BITS)
- setattr(m.submodules, "maybe_plru_%d" % i, tlb_plru)
- tlb_plru_acc = Signal(TLB_WAY_BITS)
- tlb_plru_acc_en = Signal()
- tlb_plru_out = Signal(TLB_WAY_BITS)
-
- comb += tlb_plru.acc.eq(tlb_plru_acc)
- comb += tlb_plru.acc_en.eq(tlb_plru_acc_en)
- comb += tlb_plru.lru.eq(tlb_plru_out)
-
- # PLRU interface
- with m.If(r1.tlb_hit_index == i):
- comb += tlb_plru.acc_en.eq(r1.tlb_hit)
- with m.Else():
- comb += tlb_plru.acc_en.eq(0)
- comb += tlb_plru.acc.eq(r1.tlb_hit_way)
+ if TLB_NUM_WAYS == 0:
+ return
+ for i in range(TLB_SET_SIZE):
+ # TLB PLRU interface
+ tlb_plru = PLRU(WAY_BITS)
+ setattr(m.submodules, "maybe_plru_%d" % i, tlb_plru)
+ tlb_plru_acc_en = Signal()
- comb += tlb_plru_victim[i].eq(tlb_plru.lru)
+ comb += tlb_plru_acc_en.eq(r1.tlb_hit & (r1.tlb_hit_index == i))
+ comb += tlb_plru.acc_en.eq(tlb_plru_acc_en)
+ comb += tlb_plru.acc.eq(r1.tlb_hit_way)
+ comb += tlb_plru_victim[i].eq(tlb_plru.lru_o)
def tlb_search(self, m, tlb_req_index, r0, r0_valid,
tlb_valid_way, tlb_tag_way, tlb_hit_way,
comb += perm_attr.reference.eq(1)
comb += perm_attr.changed.eq(1)
- comb += perm_attr.priv.eq(1)
comb += perm_attr.nocache.eq(0)
+ comb += perm_attr.priv.eq(1)
comb += perm_attr.rd_perm.eq(1)
comb += perm_attr.wr_perm.eq(1)
dtlb_tags, tlb_pte_way, dtlb_ptes):
comb = m.d.comb
+ sync = m.d.sync
tlbie = Signal()
tlbwe = Signal()
comb += tlbie.eq(r0_valid & r0.tlbie)
comb += tlbwe.eq(r0_valid & r0.tlbld)
- m.submodules.tlb_update = d = DTLBUpdate(dtlb_valid_bits, dtlb_ptes)
+ m.submodules.tlb_update = d = DTLBUpdate()
+ with m.If(tlbie & r0.doall):
+ # clear all valid bits at once
+ for i in range(TLB_SET_SIZE):
+ sync += dtlb_valid_bits[i].eq(0)
+ with m.If(d.updated):
+ sync += dtlb_tags[tlb_req_index].eq(d.tb_out)
+ sync += dtlb_ptes[tlb_req_index].eq(d.pb_out)
+ with m.If(d.v_updated):
+ sync += dtlb_valid_bits[tlb_req_index].eq(d.db_out)
+
+ comb += d.dv.eq(dtlb_valid_bits[tlb_req_index])
+
comb += d.tlbie.eq(tlbie)
comb += d.tlbwe.eq(tlbwe)
comb += d.doall.eq(r0.doall)
comb = m.d.comb
sync = m.d.sync
+ if TLB_NUM_WAYS == 0:
+ return
+
for i in range(NUM_LINES):
# PLRU interface
- plru = PLRU(TLB_WAY_BITS)
+ plru = PLRU(WAY_BITS)
setattr(m.submodules, "plru%d" % i, plru)
- plru_acc = Signal(WAY_BITS)
plru_acc_en = Signal()
- plru_out = Signal(WAY_BITS)
- comb += plru.acc.eq(plru_acc)
+ comb += plru_acc_en.eq(r1.cache_hit & (r1.hit_index == i))
comb += plru.acc_en.eq(plru_acc_en)
- comb += plru_out.eq(plru.lru_o)
-
- with m.If(r1.hit_index == i):
- comb += plru_acc_en.eq(r1.cache_hit)
-
- comb += plru_acc.eq(r1.hit_way)
- comb += plru_victim[i].eq(plru_out)
+ comb += plru.acc.eq(r1.hit_way)
+ comb += plru_victim[i].eq(plru.lru_o)
def cache_tag_read(self, m, r0_stall, req_index, cache_tag_set, cache_tags):
"""Cache tag RAM read port
sync += cache_tag_set.eq(cache_tags[index])
def dcache_request(self, m, r0, ra, req_index, req_row, req_tag,
- r0_valid, r1, cache_valid_bits, replace_way,
+ r0_valid, r1, cache_valids, replace_way,
use_forward1_next, use_forward2_next,
req_hit_way, plru_victim, rc_ok, perm_attr,
valid_ra, perm_ok, access_ok, req_op, req_go,
opsel = Signal(3)
go = Signal()
nc = Signal()
- hit_set = Array(Signal() for i in range(TLB_NUM_WAYS))
- hit_way_set = HitWaySet()
- rel_matches = Array(Signal() for i in range(TLB_NUM_WAYS))
- rel_match = Signal()
+ hit_set = Array(Signal(name="hit_set_%d" % i) \
+ for i in range(TLB_NUM_WAYS))
+ cache_valid_idx = Signal(NUM_WAYS)
# Extract line, row and tag from request
comb += req_index.eq(get_index(r0.req.addr))
comb += req_row.eq(get_row(r0.req.addr))
comb += req_tag.eq(get_tag(ra))
- comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
-
- # Test if pending request is a hit on any way
- # In order to make timing in virtual mode,
- # when we are using the TLB, we compare each
- # way with each of the real addresses from each way of
- # the TLB, and then decide later which match to use.
-
- with m.If(r0.req.virt_mode):
- for j in range(TLB_NUM_WAYS):
- s_tag = Signal(TAG_BITS)
- s_hit = Signal()
- s_pte = Signal(TLB_PTE_BITS)
- s_ra = Signal(REAL_ADDR_BITS)
- comb += s_pte.eq(read_tlb_pte(j, tlb_pte_way))
- comb += s_ra.eq(Cat(r0.req.addr[0:TLB_LG_PGSZ],
- s_pte[TLB_LG_PGSZ:REAL_ADDR_BITS]))
- comb += s_tag.eq(get_tag(s_ra))
-
- for i in range(NUM_WAYS):
- is_tag_hit = Signal()
- comb += is_tag_hit.eq(go & cache_valid_bits[req_index][i] &
- (read_tag(i, cache_tag_set) == s_tag)
- & tlb_valid_way[j])
- with m.If(is_tag_hit):
- comb += hit_way_set[j].eq(i)
- comb += s_hit.eq(1)
- comb += hit_set[j].eq(s_hit)
- with m.If(s_tag == r1.reload_tag):
- comb += rel_matches[j].eq(1)
- with m.If(tlb_hit):
- comb += is_hit.eq(hit_set[tlb_hit_way])
- comb += hit_way.eq(hit_way_set[tlb_hit_way])
- comb += rel_match.eq(rel_matches[tlb_hit_way])
- with m.Else():
- s_tag = Signal(TAG_BITS)
- comb += s_tag.eq(get_tag(r0.req.addr))
- for i in range(NUM_WAYS):
- is_tag_hit = Signal()
- comb += is_tag_hit.eq(go & cache_valid_bits[req_index][i] &
- read_tag(i, cache_tag_set) == s_tag)
- with m.If(is_tag_hit):
- comb += hit_way.eq(i)
- comb += is_hit.eq(1)
- with m.If(s_tag == r1.reload_tag):
- comb += rel_match.eq(1)
+ if False: # display on comb is a bit... busy.
+ comb += Display("dcache_req addr:%x ra: %x idx: %x tag: %x row: %x",
+ r0.req.addr, ra, req_index, req_tag, req_row)
- comb += req_same_tag.eq(rel_match)
+ comb += go.eq(r0_valid & ~(r0.tlbie | r0.tlbld) & ~r1.ls_error)
+ comb += cache_valid_idx.eq(cache_valids[req_index])
+
+ m.submodules.dcache_pend = dc = DCachePendingHit(tlb_pte_way,
+ tlb_valid_way, tlb_hit_way,
+ cache_valid_idx, cache_tag_set,
+ r0.req.addr,
+ hit_set)
+
+ comb += dc.tlb_hit.eq(tlb_hit)
+ comb += dc.reload_tag.eq(r1.reload_tag)
+ comb += dc.virt_mode.eq(r0.req.virt_mode)
+ comb += dc.go.eq(go)
+ comb += dc.req_index.eq(req_index)
+ comb += is_hit.eq(dc.is_hit)
+ comb += hit_way.eq(dc.hit_way)
+ comb += req_same_tag.eq(dc.rel_match)
# See if the request matches the line currently being reloaded
with m.If((r1.state == State.RELOAD_WAIT_ACK) &
- (req_index == r1.store_index) & rel_match):
+ (req_index == r1.store_index) & req_same_tag):
# For a store, consider this a hit even if the row isn't
# valid since it will be by the time we perform the store.
# For a load, check the appropriate row valid bit.
- valid = r1.rows_valid[req_row % ROW_PER_LINE]
+ rrow = Signal(ROW_LINE_BITS)
+ comb += rrow.eq(req_row)
+ valid = r1.rows_valid[rrow]
comb += is_hit.eq(~r0.req.load | valid)
comb += hit_way.eq(replace_way)
# Whether to use forwarded data for a load or not
- comb += use_forward1_next.eq(0)
with m.If((get_row(r1.req.real_addr) == req_row) &
(r1.req.hit_way == hit_way)):
# Only need to consider r1.write_bram here, since if we
# cycles after the refill starts before we see the updated
# cache tag. In that case we don't use the bypass.)
comb += use_forward1_next.eq(r1.write_bram)
- comb += use_forward2_next.eq(0)
with m.If((r1.forward_row1 == req_row) & (r1.forward_way1 == hit_way)):
comb += use_forward2_next.eq(r1.forward_valid1)
# The way to replace on a miss
with m.If(r1.write_tag):
- replace_way.eq(plru_victim[r1.store_index])
+ comb += replace_way.eq(plru_victim[r1.store_index])
with m.Else():
comb += replace_way.eq(r1.store_way)
comb += rc_ok.eq(perm_attr.reference
& (r0.req.load | perm_attr.changed)
)
- comb += perm_ok.eq((r0.req.priv_mode | ~perm_attr.priv)
- & perm_attr.wr_perm
- | (r0.req.load & perm_attr.rd_perm)
- )
+ comb += perm_ok.eq((r0.req.priv_mode | ~perm_attr.priv) &
+ (perm_attr.wr_perm |
+ (r0.req.load & perm_attr.rd_perm)))
comb += access_ok.eq(valid_ra & perm_ok & rc_ok)
# Combine the request and cache hit status to decide what
# operation needs to be done
with m.Else():
comb += opsel.eq(Cat(is_hit, nc, r0.req.load))
with m.Switch(opsel):
- with m.Case(0b101):
- comb += op.eq(Op.OP_LOAD_HIT)
- with m.Case(0b100):
- comb += op.eq(Op.OP_LOAD_MISS)
- with m.Case(0b110):
- comb += op.eq(Op.OP_LOAD_NC)
- with m.Case(0b001):
- comb += op.eq(Op.OP_STORE_HIT)
- with m.Case(0b000):
- comb += op.eq(Op.OP_STORE_MISS)
- with m.Case(0b010):
- comb += op.eq(Op.OP_STORE_MISS)
- with m.Case(0b011):
- comb += op.eq(Op.OP_BAD)
- with m.Case(0b111):
- comb += op.eq(Op.OP_BAD)
- with m.Default():
- comb += op.eq(Op.OP_NONE)
+ with m.Case(0b101): comb += op.eq(Op.OP_LOAD_HIT)
+ with m.Case(0b100): comb += op.eq(Op.OP_LOAD_MISS)
+ with m.Case(0b110): comb += op.eq(Op.OP_LOAD_NC)
+ with m.Case(0b001): comb += op.eq(Op.OP_STORE_HIT)
+ with m.Case(0b000): comb += op.eq(Op.OP_STORE_MISS)
+ with m.Case(0b010): comb += op.eq(Op.OP_STORE_MISS)
+ with m.Case(0b011): comb += op.eq(Op.OP_BAD)
+ with m.Case(0b111): comb += op.eq(Op.OP_BAD)
comb += req_op.eq(op)
comb += req_go.eq(go)
sync = m.d.sync
with m.If(r0_valid & r0.req.reserve):
-
# XXX generate alignment interrupt if address
# is not aligned XXX or if r0.req.nc = '1'
with m.If(r0.req.load):
comb += set_rsrv.eq(1) # load with reservation
with m.Else():
comb += clear_rsrv.eq(1) # store conditional
- with m.If(~reservation.valid | r0.req.addr[LINE_OFF_BITS:64]):
+ with m.If(~reservation.valid |
+ (r0.req.addr[LINE_OFF_BITS:64] != reservation.addr)):
comb += cancel_store.eq(1)
def reservation_reg(self, m, r0_valid, access_ok, set_rsrv, clear_rsrv,
# Request came from loadstore1...
# Load hit case is the standard path
with m.If(r1.hit_load_valid):
- #Display(f"completing load hit data={data_out}")
- pass
+ sync += Display("completing load hit data=%x", data_out)
# error cases complete without stalling
with m.If(r1.ls_error):
- # Display("completing ld/st with error")
- pass
+ sync += Display("completing ld/st with error")
# Slow ops (load miss, NC, stores)
with m.If(r1.slow_valid):
- #Display(f"completing store or load miss data={data_out}")
- pass
+ sync += Display("completing store or load miss data=%x",
+ data_out)
with m.Else():
# Request came from MMU
with m.If(r1.hit_load_valid):
- # Display(f"completing load hit to MMU, data={m_out.data}")
- pass
+ sync += Display("completing load hit to MMU, data=%x",
+ m_out.data)
# error cases complete without stalling
with m.If(r1.mmu_error):
- #Display("combpleting MMU ld with error")
- pass
+ sync += Display("combpleting MMU ld with error")
# Slow ops (i.e. load miss)
with m.If(r1.slow_valid):
- #Display("completing MMU load miss, data={m_out.data}")
- pass
+ sync += Display("completing MMU load miss, data=%x",
+ m_out.data)
def rams(self, m, r1, early_req_row, cache_out, replace_way):
"""rams
wb_in = self.wb_in
for i in range(NUM_WAYS):
- do_read = Signal()
+ do_read = Signal(name="do_rd%d" % i)
rd_addr = Signal(ROW_BITS)
- do_write = Signal()
+ do_write = Signal(name="do_wr%d" % i)
wr_addr = Signal(ROW_BITS)
wr_data = Signal(WB_DATA_BITS)
wr_sel = Signal(ROW_SIZE)
wr_sel_m = Signal(ROW_SIZE)
- _d_out = Signal(WB_DATA_BITS)
+ _d_out = Signal(WB_DATA_BITS, name="dout_%d" % i)
way = CacheRam(ROW_BITS, WB_DATA_BITS, True)
setattr(m.submodules, "cacheram_%d" % i, way)
# Cache hit reads
comb += do_read.eq(1)
- comb += rd_addr.eq(early_req_row)
+ comb += rd_addr.eq(early_req_row[:ROW_BITS])
comb += cache_out[i].eq(_d_out)
# Write mux:
& wb_in.ack & (replace_way == i)):
comb += do_write.eq(1)
- # Mask write selects with do_write since BRAM
- # doesn't have a global write-enable
- with m.If(do_write):
- comb += wr_sel_m.eq(wr_sel)
+ # Mask write selects with do_write since BRAM
+ # doesn't have a global write-enable
+ with m.If(do_write):
+ comb += wr_sel_m.eq(wr_sel)
# Cache hit synchronous machine for the easy case.
# This handles load hits.
# It also handles error cases (TLB miss, cache paradox)
def dcache_fast_hit(self, m, req_op, r0_valid, r0, r1,
- req_hit_way, req_index, access_ok,
+ req_hit_way, req_index, req_tag, access_ok,
tlb_hit, tlb_hit_way, tlb_req_index):
comb = m.d.comb
sync = m.d.sync
with m.If(req_op != Op.OP_NONE):
- #Display(f"op:{req_op} addr:{r0.req.addr} nc: {r0.req.nc}" \
- # f"idx:{req_index} tag:{req_tag} way: {req_hit_way}"
- # )
- pass
+ sync += Display("op:%d addr:%x nc: %d idx: %x tag: %x way: %x",
+ req_op, r0.req.addr, r0.req.nc,
+ req_index, req_tag, req_hit_way)
with m.If(r0_valid):
sync += r1.mmu_req.eq(r0.mmu_req)
# All wishbone requests generation is done here.
# This machine operates at stage 1.
def dcache_slow(self, m, r1, use_forward1_next, use_forward2_next,
- cache_valid_bits, r0, replace_way,
+ cache_valids, r0, replace_way,
req_hit_way, req_same_tag,
- r0_valid, req_op, cache_tag, req_go, ra):
+ r0_valid, req_op, cache_tags, req_go, ra):
comb = m.d.comb
sync = m.d.sync
wb_in = self.wb_in
- req = MemAccessRequest()
+ req = MemAccessRequest("mreq_ds")
acks = Signal(3)
adjust_acks = Signal(3)
- stbs_done = Signal()
+
+ req_row = Signal(ROW_BITS)
+ req_idx = Signal(INDEX_BITS)
+ req_tag = Signal(TAG_BITS)
+ comb += req_idx.eq(get_index(req.real_addr))
+ comb += req_row.eq(get_row(req.real_addr))
+ comb += req_tag.eq(get_tag(req.real_addr))
sync += r1.use_forward1.eq(use_forward1_next)
sync += r1.forward_sel.eq(0)
for i in range(NUM_WAYS):
with m.If(i == replace_way):
ct = Signal(TAG_RAM_WIDTH)
- comb += ct.eq(cache_tag[r1.store_index])
+ comb += ct.eq(cache_tags[r1.store_index])
comb += ct.word_select(i, TAG_WIDTH).eq(r1.reload_tag)
- sync += cache_tag[r1.store_index].eq(ct)
+ sync += cache_tags[r1.store_index].eq(ct)
sync += r1.store_way.eq(replace_way)
sync += r1.write_tag.eq(0)
with m.Switch(r1.state):
with m.Case(State.IDLE):
-# XXX check 'left downto. probably means len(r1.wb.adr)
-# r1.wb.adr <= req.real_addr(
-# r1.wb.adr'left downto 0
-# );
- sync += r1.wb.adr.eq(req.real_addr)
+ sync += r1.real_adr.eq(req.real_addr)
sync += r1.wb.sel.eq(req.byte_sel)
sync += r1.wb.dat.eq(req.data)
sync += r1.dcbz.eq(req.dcbz)
# Keep track of our index and way
# for subsequent stores.
- sync += r1.store_index.eq(get_index(req.real_addr))
- sync += r1.store_row.eq(get_row(req.real_addr))
- sync += r1.end_row_ix.eq(
- get_row_of_line(get_row(req.real_addr))
- )
- sync += r1.reload_tag.eq(get_tag(req.real_addr))
+ sync += r1.store_index.eq(req_idx)
+ sync += r1.store_row.eq(req_row)
+ sync += r1.end_row_ix.eq(get_row_of_line(req_row))
+ sync += r1.reload_tag.eq(req_tag)
sync += r1.req.same_tag.eq(1)
with m.If(req.op == Op.OP_STORE_HIT):
for i in range(ROW_PER_LINE):
sync += r1.rows_valid[i].eq(0)
+ with m.If(req_op != Op.OP_NONE):
+ sync += Display("cache op %d", req.op)
+
with m.Switch(req.op):
with m.Case(Op.OP_LOAD_HIT):
# stay in IDLE state
pass
with m.Case(Op.OP_LOAD_MISS):
- #Display(f"cache miss real addr:" \
- # f"{req_real_addr}" \
- # f" idx:{get_index(req_real_addr)}" \
- # f" tag:{get_tag(req.real_addr)}")
- pass
+ sync += Display("cache miss real addr: %x " \
+ "idx: %x tag: %x",
+ req.real_addr, req_row, req_tag)
# Start the wishbone cycle
sync += r1.wb.we.eq(0)
with m.If(req.op == Op.OP_STORE_HIT):
sync += r1.write_bram.eq(1)
with m.Else():
+ # dcbz is handled much like a load miss except
+ # that we are writing to memory instead of reading
sync += r1.state.eq(State.RELOAD_WAIT_ACK)
with m.If(req.op == Op.OP_STORE_MISS):
pass
with m.Case(State.RELOAD_WAIT_ACK):
+ ld_stbs_done = Signal()
# Requests are all sent if stb is 0
- comb += stbs_done.eq(~r1.wb.stb)
+ comb += ld_stbs_done.eq(~r1.wb.stb)
- with m.If(~wb_in.stall & ~stbs_done):
+ with m.If((~wb_in.stall) & r1.wb.stb):
# That was the last word?
# We are done sending.
- # Clear stb and set stbs_done
+ # Clear stb and set ld_stbs_done
# so we can handle an eventual
# last ack on the same cycle.
- with m.If(is_last_row_addr(
- r1.wb.adr, r1.end_row_ix)):
+ with m.If(is_last_row_addr(r1.real_adr, r1.end_row_ix)):
sync += r1.wb.stb.eq(0)
- comb += stbs_done.eq(0)
+ comb += ld_stbs_done.eq(1)
# Calculate the next row address in the current cache line
- rarange = r1.wb.adr[ROW_OFF_BITS : LINE_OFF_BITS]
- sync += rarange.eq(rarange + 1)
+ row = Signal(LINE_OFF_BITS-ROW_OFF_BITS)
+ comb += row.eq(r1.real_adr[ROW_OFF_BITS:])
+ sync += r1.real_adr[ROW_OFF_BITS:LINE_OFF_BITS].eq(row+1)
# Incoming acks processing
sync += r1.forward_valid1.eq(wb_in.ack)
with m.If(wb_in.ack):
- # XXX needs an Array bit-accessor here
- sync += r1.rows_valid[r1.store_row % ROW_PER_LINE].eq(1)
+ srow = Signal(ROW_LINE_BITS)
+ comb += srow.eq(r1.store_row)
+ sync += r1.rows_valid[srow].eq(1)
# If this is the data we were looking for,
# we can complete the request next cycle.
sync += r1.use_forward1.eq(1)
# Check for completion
- with m.If(stbs_done & is_last_row(r1.store_row,
+ with m.If(ld_stbs_done & is_last_row(r1.store_row,
r1.end_row_ix)):
# Complete wishbone cycle
sync += r1.wb.cyc.eq(0)
# Cache line is now valid
cv = Signal(INDEX_BITS)
- sync += cv.eq(cache_valid_bits[r1.store_index])
- sync += cv.bit_select(r1.store_way, 1).eq(1)
+ comb += cv.eq(cache_valids[r1.store_index])
+ comb += cv.bit_select(r1.store_way, 1).eq(1)
+ sync += cache_valids[r1.store_index].eq(cv)
sync += r1.state.eq(State.IDLE)
# Increment store row counter
sync += r1.store_row.eq(next_row(r1.store_row))
with m.Case(State.STORE_WAIT_ACK):
- comb += stbs_done.eq(~r1.wb.stb)
+ st_stbs_done = Signal()
+ comb += st_stbs_done.eq(~r1.wb.stb)
comb += acks.eq(r1.acks_pending)
with m.If(r1.inc_acks != r1.dec_acks):
# to be done which is in the same real page.
with m.If(req.valid):
ra = req.real_addr[0:SET_SIZE_BITS]
- sync += r1.wb.adr[0:SET_SIZE_BITS].eq(ra)
+ sync += r1.real_adr[0:SET_SIZE_BITS].eq(ra)
sync += r1.wb.dat.eq(req.data)
sync += r1.wb.sel.eq(req.byte_sel)
((req.op == Op.OP_STORE_MISS)
| (req.op == Op.OP_STORE_HIT))):
sync += r1.wb.stb.eq(1)
- comb += stbs_done.eq(0)
+ comb += st_stbs_done.eq(0)
with m.If(req.op == Op.OP_STORE_HIT):
sync += r1.write_bram.eq(1)
# Store requests never come from the MMU
sync += r1.ls_valid.eq(1)
- comb += stbs_done.eq(0)
+ comb += st_stbs_done.eq(0)
sync += r1.inc_acks.eq(1)
with m.Else():
sync += r1.wb.stb.eq(0)
- comb += stbs_done.eq(1)
+ comb += st_stbs_done.eq(1)
# Got ack ? See if complete.
with m.If(wb_in.ack):
- with m.If(stbs_done & (adjust_acks == 1)):
+ with m.If(st_stbs_done & (adjust_acks == 1)):
sync += r1.state.eq(State.IDLE)
sync += r1.wb.cyc.eq(0)
sync += r1.wb.stb.eq(0)
sync += log_out.eq(Cat(r1.state[:3], valid_ra, tlb_hit_way[:3],
stall_out, req_op[:3], d_out.valid, d_out.error,
r1.wb.cyc, r1.wb.stb, wb_in.ack, wb_in.stall,
- r1.wb.adr[3:6]))
+ r1.real_adr[3:6]))
def elaborate(self, platform):
# Storage. Hopefully "cache_rows" is a BRAM, the rest is LUTs
cache_tags = CacheTagArray()
cache_tag_set = Signal(TAG_RAM_WIDTH)
- cache_valid_bits = CacheValidBitsArray()
+ cache_valids = CacheValidBitsArray()
# TODO attribute ram_style : string;
# TODO attribute ram_style of cache_tags : signal is "distributed";
# TODO attribute ram_style of
# dtlb_ptes : signal is "distributed";
- r0 = RegStage0()
+ r0 = RegStage0("r0")
r0_full = Signal()
- r1 = RegStage1()
+ r1 = RegStage1("r1")
reservation = Reservation()
pte = Signal(TLB_PTE_BITS)
ra = Signal(REAL_ADDR_BITS)
valid_ra = Signal()
- perm_attr = PermAttr()
+ perm_attr = PermAttr("dc_perms")
rc_ok = Signal()
perm_ok = Signal()
access_ok = Signal()
comb += self.stall_out.eq(r0_stall)
# Wire up wishbone request latch out of stage 1
+ comb += r1.wb.adr.eq(r1.real_adr[ROW_OFF_BITS:]) # truncate LSBs
comb += self.wb_out.eq(r1.wb)
# call sub-functions putting everything together, using shared
tlb_hit_way, tlb_hit, tlb_plru_victim, tlb_tag_way,
dtlb_tags, tlb_pte_way, dtlb_ptes)
self.maybe_plrus(m, r1, plru_victim)
+ self.maybe_tlb_plrus(m, r1, tlb_plru_victim)
self.cache_tag_read(m, r0_stall, req_index, cache_tag_set, cache_tags)
self.dcache_request(m, r0, ra, req_index, req_row, req_tag,
- r0_valid, r1, cache_valid_bits, replace_way,
+ r0_valid, r1, cache_valids, replace_way,
use_forward1_next, use_forward2_next,
req_hit_way, plru_victim, rc_ok, perm_attr,
valid_ra, perm_ok, access_ok, req_op, req_go,
self.writeback_control(m, r1, cache_out)
self.rams(m, r1, early_req_row, cache_out, replace_way)
self.dcache_fast_hit(m, req_op, r0_valid, r0, r1,
- req_hit_way, req_index, access_ok,
+ req_hit_way, req_index, req_tag, access_ok,
tlb_hit, tlb_hit_way, tlb_req_index)
self.dcache_slow(m, r1, use_forward1_next, use_forward2_next,
- cache_valid_bits, r0, replace_way,
+ cache_valids, r0, replace_way,
req_hit_way, req_same_tag,
r0_valid, req_op, cache_tags, req_go, ra)
#self.dcache_log(m, r1, valid_ra, tlb_hit_way, stall_out)
return m
+def dcache_load(dut, addr, nc=0):
+ yield dut.d_in.load.eq(1)
+ yield dut.d_in.nc.eq(nc)
+ yield dut.d_in.addr.eq(addr)
+ yield dut.d_in.byte_sel.eq(~0)
+ yield dut.d_in.valid.eq(1)
+ yield
+ yield dut.d_in.valid.eq(0)
+ yield dut.d_in.byte_sel.eq(0)
+ yield
+ while not (yield dut.d_out.valid):
+ yield
+ data = yield dut.d_out.data
+ return data
+
+
+def dcache_store(dut, addr, data, nc=0):
+ yield dut.d_in.load.eq(0)
+ yield dut.d_in.nc.eq(nc)
+ yield dut.d_in.data.eq(data)
+ yield dut.d_in.byte_sel.eq(~0)
+ yield dut.d_in.addr.eq(addr)
+ yield dut.d_in.valid.eq(1)
+ yield
+ yield dut.d_in.valid.eq(0)
+ yield dut.d_in.byte_sel.eq(0)
+ yield
+ while not (yield dut.d_out.valid):
+ yield
+
+
+def dcache_random_sim(dut):
+
+ # start with stack of zeros
+ sim_mem = [0] * 512
-# dcache_tb.vhdl
-#
-# entity dcache_tb is
-# end dcache_tb;
-#
-# architecture behave of dcache_tb is
-# signal clk : std_ulogic;
-# signal rst : std_ulogic;
-#
-# signal d_in : Loadstore1ToDcacheType;
-# signal d_out : DcacheToLoadstore1Type;
-#
-# signal m_in : MmuToDcacheType;
-# signal m_out : DcacheToMmuType;
-#
-# signal wb_bram_in : wishbone_master_out;
-# signal wb_bram_out : wishbone_slave_out;
-#
-# constant clk_period : time := 10 ns;
-# begin
-# dcache0: entity work.dcache
-# generic map(
-#
-# LINE_SIZE => 64,
-# NUM_LINES => 4
-# )
-# port map(
-# clk => clk,
-# rst => rst,
-# d_in => d_in,
-# d_out => d_out,
-# m_in => m_in,
-# m_out => m_out,
-# wishbone_out => wb_bram_in,
-# wishbone_in => wb_bram_out
-# );
-#
-# -- BRAM Memory slave
-# bram0: entity work.wishbone_bram_wrapper
-# generic map(
-# MEMORY_SIZE => 1024,
-# RAM_INIT_FILE => "icache_test.bin"
-# )
-# port map(
-# clk => clk,
-# rst => rst,
-# wishbone_in => wb_bram_in,
-# wishbone_out => wb_bram_out
-# );
-#
-# clk_process: process
-# begin
-# clk <= '0';
-# wait for clk_period/2;
-# clk <= '1';
-# wait for clk_period/2;
-# end process;
-#
-# rst_process: process
-# begin
-# rst <= '1';
-# wait for 2*clk_period;
-# rst <= '0';
-# wait;
-# end process;
-#
-# stim: process
-# begin
-# -- Clear stuff
-# d_in.valid <= '0';
-# d_in.load <= '0';
-# d_in.nc <= '0';
-# d_in.addr <= (others => '0');
-# d_in.data <= (others => '0');
-# m_in.valid <= '0';
-# m_in.addr <= (others => '0');
-# m_in.pte <= (others => '0');
-#
-# wait for 4*clk_period;
-# wait until rising_edge(clk);
-#
-# -- Cacheable read of address 4
-# d_in.load <= '1';
-# d_in.nc <= '0';
-# d_in.addr <= x"0000000000000004";
-# d_in.valid <= '1';
-# wait until rising_edge(clk);
-# d_in.valid <= '0';
-#
-# wait until rising_edge(clk) and d_out.valid = '1';
-# assert d_out.data = x"0000000100000000"
-# report "data @" & to_hstring(d_in.addr) &
-# "=" & to_hstring(d_out.data) &
-# " expected 0000000100000000"
-# severity failure;
-# -- wait for clk_period;
-#
-# -- Cacheable read of address 30
-# d_in.load <= '1';
-# d_in.nc <= '0';
-# d_in.addr <= x"0000000000000030";
-# d_in.valid <= '1';
-# wait until rising_edge(clk);
-# d_in.valid <= '0';
-#
-# wait until rising_edge(clk) and d_out.valid = '1';
-# assert d_out.data = x"0000000D0000000C"
-# report "data @" & to_hstring(d_in.addr) &
-# "=" & to_hstring(d_out.data) &
-# " expected 0000000D0000000C"
-# severity failure;
-#
-# -- Non-cacheable read of address 100
-# d_in.load <= '1';
-# d_in.nc <= '1';
-# d_in.addr <= x"0000000000000100";
-# d_in.valid <= '1';
-# wait until rising_edge(clk);
-# d_in.valid <= '0';
-# wait until rising_edge(clk) and d_out.valid = '1';
-# assert d_out.data = x"0000004100000040"
-# report "data @" & to_hstring(d_in.addr) &
-# "=" & to_hstring(d_out.data) &
-# " expected 0000004100000040"
-# severity failure;
-#
-# wait until rising_edge(clk);
-# wait until rising_edge(clk);
-# wait until rising_edge(clk);
-# wait until rising_edge(clk);
-#
-# std.env.finish;
-# end process;
-# end;
-def dcache_sim(dut):
# clear stuff
yield dut.d_in.valid.eq(0)
yield dut.d_in.load.eq(0)
+ yield dut.d_in.priv_mode.eq(1)
yield dut.d_in.nc.eq(0)
- yield dut.d_in.adrr.eq(0)
+ yield dut.d_in.addr.eq(0)
yield dut.d_in.data.eq(0)
yield dut.m_in.valid.eq(0)
yield dut.m_in.addr.eq(0)
yield
yield
yield
- # wait_until rising_edge(clk)
- yield
- # Cacheable read of address 4
- yield dut.d_in.load.eq(1)
+
+ print ()
+
+ for i in range(256):
+ addr = randint(0, 255)
+ data = randint(0, (1<<64)-1)
+ sim_mem[addr] = data
+ addr *= 8
+
+ print ("testing %x data %x" % (addr, data))
+
+ yield from dcache_load(dut, addr)
+ yield from dcache_store(dut, addr, data)
+
+ addr = randint(0, 255)
+ sim_data = sim_mem[addr]
+ addr *= 8
+
+ data = yield from dcache_load(dut, addr)
+ assert data == sim_data, \
+ "check %x data %x != %x" % (addr, data, sim_data)
+
+ for addr in range(256):
+ data = yield from dcache_load(dut, addr*8)
+ assert data == sim_mem[addr], \
+ "final check %x data %x != %x" % (addr*8, data, sim_mem[addr])
+
+def dcache_sim(dut):
+ # clear stuff
+ yield dut.d_in.valid.eq(0)
+ yield dut.d_in.load.eq(0)
+ yield dut.d_in.priv_mode.eq(1)
yield dut.d_in.nc.eq(0)
- yield dut.d_in.addr.eq(Const(0x0000000000000004, 64))
- yield dut.d_in.valid.eq(1)
- # wait-until rising_edge(clk)
+ yield dut.d_in.addr.eq(0)
+ yield dut.d_in.data.eq(0)
+ yield dut.m_in.valid.eq(0)
+ yield dut.m_in.addr.eq(0)
+ yield dut.m_in.pte.eq(0)
+ # wait 4 * clk_period
+ yield
+ yield
yield
- yield dut.d_in.valid.eq(0)
yield
- while not (yield dut.d_out.valid):
- yield
- assert dut.d_out.data == 0x0000000100000000, \
- f"data @ {dut.d_in.addr}={dut.d_in.data} expected 0000000100000000"
+ # Cacheable read of address 4
+ data = yield from dcache_load(dut, 0x58)
+ addr = yield dut.d_in.addr
+ assert data == 0x0000001700000016, \
+ f"data @%x=%x expected 0x0000001700000016" % (addr, data)
+
+ # Cacheable read of address 20
+ data = yield from dcache_load(dut, 0x20)
+ addr = yield dut.d_in.addr
+ assert data == 0x0000000900000008, \
+ f"data @%x=%x expected 0x0000000900000008" % (addr, data)
# Cacheable read of address 30
- yield dut.d_in.load.eq(1)
- yield dut.d_in.nc.eq(0)
- yield dut.d_in.addr.eq(Const(0x0000000000000030, 64))
- yield dut.d_in.valid.eq(1)
- yield
- yield dut.d_in.valid.eq(0)
- yield
- while not (yield dut.d_out.valid):
- yield
- assert dut.d_out.data == 0x0000000D0000000C, \
- f"data @{dut.d_in.addr}={dut.d_out.data} expected 0000000D0000000C"
+ data = yield from dcache_load(dut, 0x530)
+ addr = yield dut.d_in.addr
+ assert data == 0x0000014D0000014C, \
+ f"data @%x=%x expected 0000014D0000014C" % (addr, data)
+
+ # 2nd Cacheable read of address 30
+ data = yield from dcache_load(dut, 0x530)
+ addr = yield dut.d_in.addr
+ assert data == 0x0000014D0000014C, \
+ f"data @%x=%x expected 0000014D0000014C" % (addr, data)
# Non-cacheable read of address 100
- yield dut.d_in.load.eq(1)
- yield dut.d_in.nc.eq(1)
- yield dut.d_in.addr.eq(Const(0x0000000000000100, 64))
- yield dut.d_in.valid.eq(1)
- yield
- yield dut.d_in.valid.eq(0)
- yield
- while not (yield dut.d_out.valid):
- yield
- assert dut.d_out.data == 0x0000004100000040, \
- f"data @ {dut.d_in.addr}={dut.d_out.data} expected 0000004100000040"
+ data = yield from dcache_load(dut, 0x100, nc=1)
+ addr = yield dut.d_in.addr
+ assert data == 0x0000004100000040, \
+ f"data @%x=%x expected 0000004100000040" % (addr, data)
+
+ # Store at address 530
+ yield from dcache_store(dut, 0x530, 0x121)
+
+ # Store at address 30
+ yield from dcache_store(dut, 0x530, 0x12345678)
+
+ # 3nd Cacheable read of address 530
+ data = yield from dcache_load(dut, 0x530)
+ addr = yield dut.d_in.addr
+ assert data == 0x12345678, \
+ f"data @%x=%x expected 0x12345678" % (addr, data)
+
+ # 4th Cacheable read of address 20
+ data = yield from dcache_load(dut, 0x20)
+ addr = yield dut.d_in.addr
+ assert data == 0x0000000900000008, \
+ f"data @%x=%x expected 0x0000000900000008" % (addr, data)
yield
yield
yield
-def test_dcache():
+def test_dcache(mem, test_fn, test_name):
+ dut = DCache()
+
+ memory = Memory(width=64, depth=16*64, init=mem)
+ sram = SRAM(memory=memory, granularity=8)
+
+ m = Module()
+ m.submodules.dcache = dut
+ m.submodules.sram = sram
+
+ m.d.comb += sram.bus.cyc.eq(dut.wb_out.cyc)
+ m.d.comb += sram.bus.stb.eq(dut.wb_out.stb)
+ m.d.comb += sram.bus.we.eq(dut.wb_out.we)
+ m.d.comb += sram.bus.sel.eq(dut.wb_out.sel)
+ m.d.comb += sram.bus.adr.eq(dut.wb_out.adr)
+ m.d.comb += sram.bus.dat_w.eq(dut.wb_out.dat)
+
+ m.d.comb += dut.wb_in.ack.eq(sram.bus.ack)
+ m.d.comb += dut.wb_in.dat.eq(sram.bus.dat_r)
+
+ # nmigen Simulation
+ sim = Simulator(m)
+ sim.add_clock(1e-6)
+
+ sim.add_sync_process(wrap(test_fn(dut)))
+ with sim.write_vcd('test_dcache%s.vcd' % test_name):
+ sim.run()
+
+if __name__ == '__main__':
dut = DCache()
vl = rtlil.convert(dut, ports=[])
with open("test_dcache.il", "w") as f:
f.write(vl)
- #run_simulation(dut, dcache_sim(), vcd_name='test_dcache.vcd')
+ mem = []
+ for i in range(0,512):
+ mem.append((i*2)| ((i*2+1)<<32))
-if __name__ == '__main__':
- test_dcache()
+ test_dcache(mem, dcache_sim, "")
+ test_dcache(None, dcache_random_sim, "random")