"DDR4": 8
}
+
def get_cl_cw(memtype, tck):
f_to_cl_cwl = OrderedDict()
if memtype == "DDR2":
- f_to_cl_cwl[400e6] = (3, 2)
- f_to_cl_cwl[533e6] = (4, 3)
- f_to_cl_cwl[677e6] = (5, 4)
- f_to_cl_cwl[800e6] = (6, 5)
+ f_to_cl_cwl[400e6] = (3, 2)
+ f_to_cl_cwl[533e6] = (4, 3)
+ f_to_cl_cwl[677e6] = (5, 4)
+ f_to_cl_cwl[800e6] = (6, 5)
f_to_cl_cwl[1066e6] = (7, 5)
elif memtype == "DDR3":
- f_to_cl_cwl[800e6] = ( 6, 5)
- f_to_cl_cwl[1066e6] = ( 7, 6)
+ f_to_cl_cwl[800e6] = (6, 5)
+ f_to_cl_cwl[1066e6] = (7, 6)
f_to_cl_cwl[1333e6] = (10, 7)
f_to_cl_cwl[1600e6] = (11, 8)
elif memtype == "DDR4":
return cl, cwl
raise ValueError
+
def get_sys_latency(nphases, cas_latency):
return math.ceil(cas_latency/nphases)
+
def get_sys_phases(nphases, sys_latency, cas_latency):
dat_phase = sys_latency*nphases - cas_latency
- cmd_phase = (dat_phase - 1)%nphases
+ cmd_phase = (dat_phase - 1) % nphases
return cmd_phase, dat_phase
# PHY Pads Transformers ----------------------------------------------------------------------------
+
class PHYPadsReducer:
"""PHY Pads Reducer
For testing purposes, we often need to use only some of the DRAM modules. PHYPadsReducer allows
selecting specific modules and avoid re-definining dram pins in the Platform for this.
"""
+
def __init__(self, pads, modules):
- self.pads = pads
+ self.pads = pads
self.modules = modules
def __getattr__(self, name):
if name in ["dq"]:
return Array([getattr(self.pads, name)[8*i + j]
- for i in self.modules
- for j in range(8)])
+ for i in self.modules
+ for j in range(8)])
if name in ["dm", "dqs", "dqs_p", "dqs_n"]:
return Array([getattr(self.pads, name)[i] for i in self.modules])
else:
return getattr(self.pads, name)
+
class PHYPadsCombiner:
"""PHY Pads Combiner
and this combiner can be used to re-create a single pads structure (that will be compatible with
LiteDRAM's PHYs) to create a single DRAM controller from multiple fully dissociated DRAMs chips.
"""
+
def __init__(self, pads):
if not isinstance(pads, list):
self.groups = [pads]
def __getattr__(self, name):
if name in ["dm", "dq", "dqs", "dqs_p", "dqs_n"]:
return Array([getattr(self.groups[j], name)[i]
- for i in range(len(getattr(self.groups[0], name)))
- for j in range(len(self.groups))])
+ for i in range(len(getattr(self.groups[0], name)))
+ for j in range(len(self.groups))])
else:
return getattr(self.groups[self.sel], name)
# BitSlip ------------------------------------------------------------------------------------------
+
class BitSlip(Elaboratable):
def __init__(self, dw, rst=None, slp=None, cycles=1):
self.i = Signal(dw)
# DQS Pattern --------------------------------------------------------------------------------------
+
class DQSPattern(Elaboratable):
def __init__(self, preamble=None, postamble=None, wlevel_en=0, wlevel_strobe=0, register=False):
- self.preamble = Signal() if preamble is None else preamble
+ self.preamble = Signal() if preamble is None else preamble
self.postamble = Signal() if postamble is None else postamble
self.o = Signal(8)
self._wlevel_en = wlevel_en
# Settings -----------------------------------------------------------------------------------------
+
class Settings:
def set_attributes(self, attributes):
for k, v in attributes.items():
self.is_rdimm = True
self.set_attributes(locals())
+
class GeomSettings(Settings):
def __init__(self, bankbits, rowbits, colbits):
self.set_attributes(locals())
# Layouts/Interface --------------------------------------------------------------------------------
+
def cmd_layout(address_width):
return [
("valid", 1, DIR_FANOUT),
("ready", 1, DIR_FANIN),
("we", 1, DIR_FANOUT),
("addr", address_width, DIR_FANOUT),
- ("lock", 1, DIR_FANIN), # only used internally
+ ("lock", 1, DIR_FANIN), # only used internally
("wdata_ready", 1, DIR_FANIN),
("rdata_valid", 1, DIR_FANIN)
]
+
def data_layout(data_width):
return [
("wdata", data_width, DIR_FANOUT),
("rdata", data_width, DIR_FANIN)
]
+
def cmd_description(address_width):
return [
("we", 1),
("addr", address_width)
]
+
def wdata_description(data_width):
return [
("data", data_width),
("we", data_width//8)
]
+
def rdata_description(data_width):
return [("data", data_width)]
+
def cmd_request_layout(a, ba):
return [
("a", a),
("we", 1)
]
+
def cmd_request_rw_layout(a, ba):
return cmd_request_layout(a, ba) + [
("is_cmd", 1),
def __init__(self, address_align, settings):
rankbits = log2_int(settings.phy.nranks)
self.address_align = address_align
- self.address_width = settings.geom.rowbits + settings.geom.colbits + rankbits - address_align
- self.data_width = settings.phy.dfi_databits*settings.phy.nphases
- self.nbanks = settings.phy.nranks*(2**settings.geom.bankbits)
- self.nranks = settings.phy.nranks
+ self.address_width = settings.geom.rowbits + \
+ settings.geom.colbits + rankbits - address_align
+ self.data_width = settings.phy.dfi_databits*settings.phy.nphases
+ self.nbanks = settings.phy.nranks*(2**settings.geom.bankbits)
+ self.nranks = settings.phy.nranks
self.settings = settings
- layout = [("bank"+str(i), cmd_layout(self.address_width)) for i in range(self.nbanks)]
+ layout = [("bank"+str(i), cmd_layout(self.address_width))
+ for i in range(self.nbanks)]
layout += data_layout(self.data_width)
Record.__init__(self, layout)
# Ports --------------------------------------------------------------------------------------------
+
class gramNativePort(Settings):
def __init__(self, mode, address_width, data_width, clock_domain="sys", id=0):
self.set_attributes(locals())
self.lock = Signal()
- self.cmd = stream.Endpoint(cmd_description(address_width))
+ self.cmd = stream.Endpoint(cmd_description(address_width))
self.wdata = stream.Endpoint(wdata_description(data_width))
self.rdata = stream.Endpoint(rdata_description(data_width))
def __init__(self, txxd):
self.valid = Signal()
self.ready = ready = Signal(reset=txxd is None)
- #ready.attr.add("no_retiming") TODO
+ # ready.attr.add("no_retiming") TODO
self._txxd = txxd
def elaborate(self, platform):
def __init__(self, tfaw):
self.valid = Signal()
self.ready = Signal(reset=1)
- #ready.attr.add("no_retiming") TODO
+ # ready.attr.add("no_retiming") TODO
self._tfaw = tfaw
def elaborate(self, platform):
m = Module()
if self._tfaw is not None:
- count = Signal(range(max(self._tfaw, 2)))
+ count = Signal(range(max(self._tfaw, 2)))
window = Signal(self._tfaw)
m.d.sync += window.eq(Cat(self.valid, window))
- m.d.comb += count.eq(reduce(add, [window[i] for i in range(self._tfaw)]))
+ m.d.comb += count.eq(reduce(add, [window[i]
+ for i in range(self._tfaw)]))
with m.If(count < 4):
with m.If(count == 3):
m.d.sync += self.ready.eq(~self.valid)
__ALL__ = ["delayed_enter", "RoundRobin", "Timeline", "CSRPrefixProxy"]
+
def delayed_enter(m, src, dst, delay):
assert delay > 0
m.next = deststate
# Original nMigen implementation by HarryHo90sHK
+
+
class RoundRobin(Elaboratable):
"""A round-robin scheduler.
Parameters
stb : Signal()
Strobe signal to enable granting access to the next device requesting. Externally driven.
"""
+
def __init__(self, n):
self.n = n
self.request = Signal(n)
return m
+
class Timeline(Elaboratable):
def __init__(self, events):
self.trigger = Signal()
return m
+
class CSRPrefixProxy:
def __init__(self, bank, prefix):
self._bank = bank
prefixed_name = "{}_{}".format(self._prefix, name)
return self._bank.csr(width=width, access=access, addr=addr,
- alignment=alignment, name=prefixed_name)
+ alignment=alignment, name=prefixed_name)
# Core ---------------------------------------------------------------------------------------------
+
class gramCore(Peripheral, Elaboratable):
def __init__(self, phy, geom_settings, timing_settings, clk_freq, **kwargs):
super().__init__()
bank = self.csr_bank()
-
+
self._zero_ev = self.event(mode="rise")
self._phy = phy
self._kwargs = kwargs
self.dfii = DFIInjector(
- csr_bank = CSRPrefixProxy(bank, "dfii"),
- addressbits = self._geom_settings.addressbits,
- bankbits = self._geom_settings.bankbits,
- nranks = self._phy.settings.nranks,
- databits = self._phy.settings.dfi_databits,
- nphases = self._phy.settings.nphases)
+ csr_bank=CSRPrefixProxy(bank, "dfii"),
+ addressbits=self._geom_settings.addressbits,
+ bankbits=self._geom_settings.bankbits,
+ nranks=self._phy.settings.nranks,
+ databits=self._phy.settings.dfi_databits,
+ nphases=self._phy.settings.nphases)
self.controller = gramController(
- phy_settings = self._phy.settings,
- geom_settings = self._geom_settings,
- timing_settings = self._timing_settings,
- clk_freq = self._clk_freq,
+ phy_settings=self._phy.settings,
+ geom_settings=self._geom_settings,
+ timing_settings=self._timing_settings,
+ clk_freq=self._clk_freq,
**self._kwargs)
# Size in bytes
- self.size = 2**geom_settings.bankbits * 2**geom_settings.rowbits * 2**geom_settings.colbits
+ self.size = 2**geom_settings.bankbits * \
+ 2**geom_settings.rowbits * 2**geom_settings.colbits
self.crossbar = gramCrossbar(self.controller.interface)
- self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
+ self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
self.bus = self._bridge.bus
self.irq = self._bridge.irq
# AddressSlicer ------------------------------------------------------------------------------------
+
class _AddressSlicer:
"""Helper for extracting row/col from address
Column occupies lower bits of the address, row - higher bits. Address has
a forced alignment, so column does not contain alignment bits.
"""
+
def __init__(self, colbits, address_align):
- self.colbits = colbits
+ self.colbits = colbits
self.address_align = address_align
def row(self, address):
# BankMachine --------------------------------------------------------------------------------------
+
class BankMachine(Elaboratable):
"""Converts requests from ports into DRAM commands
cmd : Endpoint(cmd_request_rw_layout)
Stream of commands to the Multiplexer
"""
+
def __init__(self, n, address_width, address_align, nranks, settings):
self.settings = settings
self.req = req = Record(cmd_layout(address_width))
self.refresh_req = refresh_req = Signal()
self.refresh_gnt = refresh_gnt = Signal()
- a = settings.geom.addressbits
+ a = settings.geom.addressbits
ba = settings.geom.bankbits + log2_int(nranks)
self.cmd = stream.Endpoint(cmd_request_rw_layout(a, ba))
auto_precharge = Signal()
# Command buffer ---------------------------------------------------------------------------
- cmd_buffer_layout = [("we", 1), ("addr", len(self.req.addr))]
+ cmd_buffer_layout = [("we", 1), ("addr", len(self.req.addr))]
cmd_buffer_lookahead = stream.SyncFIFO(
cmd_buffer_layout, self.settings.cmd_buffer_depth,
buffered=self.settings.cmd_buffer_buffered)
- cmd_buffer = stream.Buffer(cmd_buffer_layout) # 1 depth buffer to detect row change
+ # 1 depth buffer to detect row change
+ cmd_buffer = stream.Buffer(cmd_buffer_layout)
m.submodules += cmd_buffer_lookahead, cmd_buffer
m.d.comb += [
#self.req.connect(cmd_buffer_lookahead.sink, include={"valid", "ready", "payload.we", "payload.addr"}),
cmd_buffer_lookahead.sink.payload.addr.eq(self.req.addr),
cmd_buffer_lookahead.source.connect(cmd_buffer.sink),
- cmd_buffer.source.ready.eq(self.req.wdata_ready | self.req.rdata_valid),
- self.req.lock.eq(cmd_buffer_lookahead.source.valid | cmd_buffer.source.valid),
+ cmd_buffer.source.ready.eq(
+ self.req.wdata_ready | self.req.rdata_valid),
+ self.req.lock.eq(cmd_buffer_lookahead.source.valid |
+ cmd_buffer.source.valid),
]
- slicer = _AddressSlicer(self.settings.geom.colbits, self._address_align)
+ slicer = _AddressSlicer(
+ self.settings.geom.colbits, self._address_align)
# Row tracking -----------------------------------------------------------------------------
- row = Signal(self.settings.geom.rowbits)
+ row = Signal(self.settings.geom.rowbits)
row_opened = Signal()
- row_hit = Signal()
- row_open = Signal()
- row_close = Signal()
+ row_hit = Signal()
+ row_open = Signal()
+ row_close = Signal()
m.d.comb += row_hit.eq(row == slicer.row(cmd_buffer.source.addr))
with m.If(row_close):
m.d.sync += row_opened.eq(0)
with m.If(row_col_n_addr_sel):
m.d.comb += self.cmd.a.eq(slicer.row(cmd_buffer.source.addr))
with m.Else():
- m.d.comb += self.cmd.a.eq((auto_precharge << 10) | slicer.col(cmd_buffer.source.addr))
+ m.d.comb += self.cmd.a.eq((auto_precharge << 10)
+ | slicer.col(cmd_buffer.source.addr))
# tWTP (write-to-precharge) controller -----------------------------------------------------
- write_latency = math.ceil(self.settings.phy.cwl / self.settings.phy.nphases)
- precharge_time = write_latency + self.settings.timing.tWR + self.settings.timing.tCCD # AL=0
+ write_latency = math.ceil(
+ self.settings.phy.cwl / self.settings.phy.nphases)
+ precharge_time = write_latency + self.settings.timing.tWR + \
+ self.settings.timing.tCCD # AL=0
m.submodules.twtpcon = twtpcon = tXXDController(precharge_time)
- m.d.comb += twtpcon.valid.eq(self.cmd.valid & self.cmd.ready & self.cmd.is_write)
+ m.d.comb += twtpcon.valid.eq(self.cmd.valid &
+ self.cmd.ready & self.cmd.is_write)
# tRC (activate-activate) controller -------------------------------------------------------
m.submodules.trccon = trccon = tXXDController(self.settings.timing.tRC)
m.d.comb += trccon.valid.eq(self.cmd.valid & self.cmd.ready & row_open)
# tRAS (activate-precharge) controller -----------------------------------------------------
- m.submodules.trascon = trascon = tXXDController(self.settings.timing.tRAS)
- m.d.comb += trascon.valid.eq(self.cmd.valid & self.cmd.ready & row_open)
+ m.submodules.trascon = trascon = tXXDController(
+ self.settings.timing.tRAS)
+ m.d.comb += trascon.valid.eq(self.cmd.valid &
+ self.cmd.ready & row_open)
# Auto Precharge generation ----------------------------------------------------------------
# generate auto precharge when current and next cmds are to different rows
# Settings -----------------------------------------------------------------------------------------
+
class ControllerSettings(Settings):
def __init__(self,
- # Command buffers
- cmd_buffer_depth = 8,
- cmd_buffer_buffered = False,
+ # Command buffers
+ cmd_buffer_depth=8,
+ cmd_buffer_buffered=False,
- # Read/Write times
- read_time = 32,
- write_time = 16,
+ # Read/Write times
+ read_time=32,
+ write_time=16,
- # Refresh
- with_refresh = True,
- refresh_cls = Refresher,
- refresh_zqcs_freq = 1e0,
- refresh_postponing = 1,
+ # Refresh
+ with_refresh=True,
+ refresh_cls=Refresher,
+ refresh_zqcs_freq=1e0,
+ refresh_postponing=1,
- # Auto-Precharge
- with_auto_precharge = True,
+ # Auto-Precharge
+ with_auto_precharge=True,
- # Address mapping
- address_mapping = "ROW_BANK_COL"):
+ # Address mapping
+ address_mapping="ROW_BANK_COL"):
self.set_attributes(locals())
# Controller ---------------------------------------------------------------------------------------
+
class gramController(Elaboratable):
def __init__(self, phy_settings, geom_settings, timing_settings, clk_freq,
- controller_settings=ControllerSettings()):
+ controller_settings=ControllerSettings()):
self._address_align = log2_int(burst_lengths[phy_settings.memtype])
# Settings ---------------------------------------------------------------------------------
- self.settings = controller_settings
- self.settings.phy = phy_settings
- self.settings.geom = geom_settings
+ self.settings = controller_settings
+ self.settings.phy = phy_settings
+ self.settings.geom = geom_settings
self.settings.timing = timing_settings
# LiteDRAM Interface (User) ----------------------------------------------------------------
- self.interface = interface = gramInterface(self._address_align, self.settings)
+ self.interface = interface = gramInterface(
+ self._address_align, self.settings)
# DFI Interface (Memory) -------------------------------------------------------------------
self.dfi = dfi.Interface(
- addressbits = geom_settings.addressbits,
- bankbits = geom_settings.bankbits,
- nranks = phy_settings.nranks,
- databits = phy_settings.dfi_databits,
- nphases = phy_settings.nphases)
+ addressbits=geom_settings.addressbits,
+ bankbits=geom_settings.bankbits,
+ nranks=phy_settings.nranks,
+ databits=phy_settings.dfi_databits,
+ nphases=phy_settings.nphases)
self._clk_freq = clk_freq
# Refresher --------------------------------------------------------------------------------
m.submodules.refresher = self.settings.refresh_cls(self.settings,
- clk_freq = self._clk_freq,
- zqcs_freq = self.settings.refresh_zqcs_freq,
- postponing = self.settings.refresh_postponing)
+ clk_freq=self._clk_freq,
+ zqcs_freq=self.settings.refresh_zqcs_freq,
+ postponing=self.settings.refresh_postponing)
# Bank Machines ----------------------------------------------------------------------------
bank_machines = []
for n in range(nranks*nbanks):
bank_machine = BankMachine(n,
- address_width = self.interface.address_width,
- address_align = self._address_align,
- nranks = nranks,
- settings = self.settings)
+ address_width=self.interface.address_width,
+ address_align=self._address_align,
+ nranks=nranks,
+ settings=self.settings)
bank_machines.append(bank_machine)
m.submodules += bank_machine
- m.d.comb += getattr(self.interface, "bank"+str(n)).connect(bank_machine.req)
+ m.d.comb += getattr(self.interface, "bank" +
+ str(n)).connect(bank_machine.req)
# Multiplexer ------------------------------------------------------------------------------
m.submodules.multiplexer = Multiplexer(
- settings = self.settings,
- bank_machines = bank_machines,
- refresher = m.submodules.refresher,
- dfi = self.dfi,
- interface = self.interface)
+ settings=self.settings,
+ bank_machines=bank_machines,
+ refresher=m.submodules.refresher,
+ dfi=self.dfi,
+ interface=self.interface)
return m
# LiteDRAMCrossbar ---------------------------------------------------------------------------------
+
class gramCrossbar(Elaboratable):
"""Multiplexes LiteDRAMController (slave) between ports (masters)
masters : [LiteDRAMNativePort, ...]
LiteDRAM memory ports
"""
+
def __init__(self, controller):
self.controller = controller
- self.rca_bits = controller.address_width
- self.nbanks = controller.nbanks
- self.nranks = controller.nranks
+ self.rca_bits = controller.address_width
+ self.nbanks = controller.nbanks
+ self.nranks = controller.nranks
self.cmd_buffer_depth = controller.settings.cmd_buffer_depth
- self.read_latency = controller.settings.phy.read_latency + 1
- self.write_latency = controller.settings.phy.write_latency + 1
+ self.read_latency = controller.settings.phy.read_latency + 1
+ self.write_latency = controller.settings.phy.write_latency + 1
self.bank_bits = log2_int(self.nbanks, False)
self.rank_bits = log2_int(self.nranks, False)
# Crossbar port ----------------------------------------------------------------------------
port = gramNativePort(
- mode = mode,
- address_width = self.rca_bits + self.bank_bits - self.rank_bits,
- data_width = self.controller.data_width,
- clock_domain = "sys",
- id = len(self.masters))
+ mode=mode,
+ address_width=self.rca_bits + self.bank_bits - self.rank_bits,
+ data_width=self.controller.data_width,
+ clock_domain="sys",
+ id=len(self.masters))
self.masters.append(port)
# Clock domain crossing --------------------------------------------------------------------
if clock_domain != "sys":
new_port = gramNativePort(
- mode = mode,
- address_width = port.address_width,
- data_width = port.data_width,
- clock_domain = clock_domain,
- id = port.id)
+ mode=mode,
+ address_width=port.address_width,
+ data_width=port.data_width,
+ clock_domain=clock_domain,
+ id=port.id)
self._pending_submodules.append(gramNativePortCDC(new_port, port))
port = new_port
else:
addr_shift = log2_int(self.controller.data_width//data_width)
new_port = gramNativePort(
- mode = mode,
- address_width = port.address_width + addr_shift,
- data_width = data_width,
- clock_domain = clock_domain,
- id = port.id)
+ mode=mode,
+ address_width=port.address_width + addr_shift,
+ data_width=data_width,
+ clock_domain=clock_domain,
+ id=port.id)
self._pending_submodules.append(ClockDomainsRenamer(clock_domain)(
gramNativePortConverter(new_port, port, reverse)))
port = new_port
m.submodules += self._pending_submodules
controller = self.controller
- nmasters = len(self.masters)
+ nmasters = len(self.masters)
# Address mapping --------------------------------------------------------------------------
- cba_shifts = {"ROW_BANK_COL": controller.settings.geom.colbits - controller.address_align}
+ cba_shifts = {
+ "ROW_BANK_COL": controller.settings.geom.colbits - controller.address_align}
cba_shift = cba_shifts[controller.settings.address_mapping]
- m_ba = [master.get_bank_address(self.bank_bits, cba_shift) for master in self.masters]
- m_rca = [master.get_row_column_address(self.bank_bits, self.rca_bits, cba_shift) for master in self.masters]
+ m_ba = [master.get_bank_address(
+ self.bank_bits, cba_shift) for master in self.masters]
+ m_rca = [master.get_row_column_address(
+ self.bank_bits, self.rca_bits, cba_shift) for master in self.masters]
- master_readys = [0]*nmasters
+ master_readys = [0]*nmasters
master_wdata_readys = [0]*nmasters
master_rdata_valids = [0]*nmasters
for other_nb, other_arbiter in enumerate(arbiters):
if other_nb != nb:
other_bank = getattr(controller, "bank"+str(other_nb))
- locked = locked | (other_bank.lock & (other_arbiter.grant == nm))
+ locked = locked | (other_bank.lock & (
+ other_arbiter.grant == nm))
master_locked.append(locked)
# Arbitrate ----------------------------------------------------------------------------
- bank_selected = [(ba == nb) & ~locked for ba, locked in zip(m_ba, master_locked)]
- bank_requested = [bs & master.cmd.valid for bs, master in zip(bank_selected, self.masters)]
+ bank_selected = [(ba == nb) & ~locked for ba,
+ locked in zip(m_ba, master_locked)]
+ bank_requested = [bs & master.cmd.valid for bs,
+ master in zip(bank_selected, self.masters)]
m.d.comb += [
arbiter.request.eq(Cat(*bank_requested)),
arbiter.stb.eq(~bank.valid & ~bank.lock)
bank.valid.eq(Array(bank_requested)[arbiter.grant])
]
master_readys = [master_ready | ((arbiter.grant == nm) & bank_selected[nm] & bank.ready)
- for nm, master_ready in enumerate(master_readys)]
+ for nm, master_ready in enumerate(master_readys)]
master_wdata_readys = [master_wdata_ready | ((arbiter.grant == nm) & bank.wdata_ready)
- for nm, master_wdata_ready in enumerate(master_wdata_readys)]
+ for nm, master_wdata_ready in enumerate(master_wdata_readys)]
master_rdata_valids = [master_rdata_valid | ((arbiter.grant == nm) & bank.rdata_valid)
- for nm, master_rdata_valid in enumerate(master_rdata_valids)]
+ for nm, master_rdata_valid in enumerate(master_rdata_valids)]
# Delay write/read signals based on their latency
for nm, master_wdata_ready in enumerate(master_wdata_readys):
# _CommandChooser ----------------------------------------------------------------------------------
+
class _CommandChooser(Elaboratable):
"""Arbitrates between requests, filtering them based on their type
cmd : Endpoint(cmd_request_rw_layout)
Currently selected request stream (when ~cmd.valid, cas/ras/we are 0)
"""
+
def __init__(self, requests):
- self.want_reads = Signal()
- self.want_writes = Signal()
- self.want_cmds = Signal()
+ self.want_reads = Signal()
+ self.want_writes = Signal()
+ self.want_cmds = Signal()
self.want_activates = Signal()
self._requests = requests
- a = len(requests[0].a)
+ a = len(requests[0].a)
ba = len(requests[0].ba)
# cas/ras/we are 0 when valid is inactive
valids = Signal(n)
for i, request in enumerate(self._requests):
is_act_cmd = request.ras & ~request.cas & ~request.we
- command = request.is_cmd & self.want_cmds & (~is_act_cmd | self.want_activates)
+ command = request.is_cmd & self.want_cmds & (
+ ~is_act_cmd | self.want_activates)
read = request.is_read == self.want_reads
write = request.is_write == self.want_writes
- m.d.comb += valids[i].eq(request.valid & (command | (read & write)))
-
+ m.d.comb += valids[i].eq(request.valid &
+ (command | (read & write)))
arbiter = RoundRobin(n)
m.submodules += arbiter
m.d.comb += getattr(self.cmd, name).eq(choices[arbiter.grant])
for i, request in enumerate(self._requests):
- #with m.If(self.cmd.valid & self.cmd.ready & (arbiter.grant == i)):
- #m.d.comb += request.ready.eq(1) # TODO: this shouldn't be commented
- self.ready[i].eq(self.cmd.valid & self.cmd.ready & (arbiter.grant == i))
+ # with m.If(self.cmd.valid & self.cmd.ready & (arbiter.grant == i)):
+ # m.d.comb += request.ready.eq(1) # TODO: this shouldn't be commented
+ self.ready[i].eq(self.cmd.valid & self.cmd.ready &
+ (arbiter.grant == i))
# Arbitrate if a command is being accepted or if the command is not valid to ensure a valid
# command is selected when cmd.ready goes high.
# _Steerer -----------------------------------------------------------------------------------------
+
(STEER_NOP, STEER_CMD, STEER_REQ, STEER_REFRESH) = range(4)
+
class _Steerer(Elaboratable):
"""Connects selected request to DFI interface
DFI phase. The signals should take one of the values from STEER_* to
select given source.
"""
+
def __init__(self, commands, dfi):
self._commands = commands
self._dfi = dfi
ncmd = len(commands)
- nph = len(dfi.phases)
+ nph = len(dfi.phases)
self.sel = [Signal(range(ncmd)) for i in range(nph)]
def elaborate(self, platform):
return cmd.valid & cmd.ready & getattr(cmd, attr)
for i, (phase, sel) in enumerate(zip(dfi.phases, self.sel)):
- nranks = len(phase.cs_n)
+ nranks = len(phase.cs_n)
rankbits = log2_int(nranks)
if hasattr(phase, "reset_n"):
m.d.comb += phase.reset_n.eq(1)
if rankbits:
rank_decoder = Decoder(nranks)
m.submodules += rank_decoder
- m.d.comb += rank_decoder.i.eq((Array(cmd.ba[-rankbits:] for cmd in commands)[sel]))
- if i == 0: # Select all ranks on refresh.
+ m.d.comb += rank_decoder.i.eq(
+ (Array(cmd.ba[-rankbits:] for cmd in commands)[sel]))
+ if i == 0: # Select all ranks on refresh.
with m.If(sel == STEER_REFRESH):
m.d.sync += phase.cs_n.eq(0)
with m.Else():
m.d.sync += phase.cs_n.eq(~rank_decoder.o)
else:
m.d.sync += phase.cs_n.eq(~rank_decoder.o)
- m.d.sync += phase.bank.eq(Array(cmd.ba[:-rankbits] for cmd in commands)[sel])
+ m.d.sync += phase.bank.eq(Array(cmd.ba[:-rankbits]
+ for cmd in commands)[sel])
else:
m.d.sync += [
phase.cs_n.eq(0),
m.d.sync += [
phase.address.eq(Array(cmd.a for cmd in commands)[sel]),
- phase.cas_n.eq(~Array(valid_and(cmd, "cas") for cmd in commands)[sel]),
- phase.ras_n.eq(~Array(valid_and(cmd, "ras") for cmd in commands)[sel]),
- phase.we_n.eq(~Array(valid_and(cmd, "we") for cmd in commands)[sel])
+ phase.cas_n.eq(~Array(valid_and(cmd, "cas")
+ for cmd in commands)[sel]),
+ phase.ras_n.eq(~Array(valid_and(cmd, "ras")
+ for cmd in commands)[sel]),
+ phase.we_n.eq(~Array(valid_and(cmd, "we")
+ for cmd in commands)[sel])
]
rddata_ens = Array(valid_and(cmd, "is_read") for cmd in commands)
# Multiplexer --------------------------------------------------------------------------------------
+
class Multiplexer(Peripheral, Elaboratable):
"""Multplexes requets from BankMachines to DFI
interface : LiteDRAMInterface
Data interface connected directly to LiteDRAMCrossbar
"""
+
def __init__(self,
- settings,
- bank_machines,
- refresher,
- dfi,
- interface):
+ settings,
+ bank_machines,
+ refresher,
+ dfi,
+ interface):
assert(settings.phy.nphases == len(dfi.phases))
self._settings = settings
self._bank_machines = bank_machines
m.submodules.choose_cmd = choose_cmd = _CommandChooser(requests)
m.submodules.choose_req = choose_req = _CommandChooser(requests)
for i, request in enumerate(requests):
- m.d.comb += request.ready.eq(choose_cmd.ready[i] | choose_req.ready[i])
+ m.d.comb += request.ready.eq(
+ choose_cmd.ready[i] | choose_req.ready[i])
if settings.phy.nphases == 1:
# When only 1 phase, use choose_req for all requests
choose_cmd = choose_req
# tRRD timing (Row to Row delay) -----------------------------------------------------------
m.submodules.trrdcon = trrdcon = tXXDController(settings.timing.tRRD)
- m.d.comb += trrdcon.valid.eq(choose_cmd.accept() & choose_cmd.activate())
+ m.d.comb += trrdcon.valid.eq(choose_cmd.accept()
+ & choose_cmd.activate())
# tFAW timing (Four Activate Window) -------------------------------------------------------
m.submodules.tfawcon = tfawcon = tFAWController(settings.timing.tFAW)
- m.d.comb += tfawcon.valid.eq(choose_cmd.accept() & choose_cmd.activate())
+ m.d.comb += tfawcon.valid.eq(choose_cmd.accept()
+ & choose_cmd.activate())
# RAS control ------------------------------------------------------------------------------
m.d.comb += ras_allowed.eq(trrdcon.ready & tfawcon.ready)
# tCCD timing (Column to Column delay) -----------------------------------------------------
m.submodules.tccdcon = tccdcon = tXXDController(settings.timing.tCCD)
- m.d.comb += tccdcon.valid.eq(choose_req.accept() & (choose_req.write() | choose_req.read()))
+ m.d.comb += tccdcon.valid.eq(choose_req.accept()
+ & (choose_req.write() | choose_req.read()))
# CAS control ------------------------------------------------------------------------------
m.d.comb += cas_allowed.eq(tccdcon.ready)
write_time_en, max_write_time = anti_starvation(settings.write_time)
# Refresh ----------------------------------------------------------------------------------
- m.d.comb += [bm.refresh_req.eq(refresher.cmd.valid) for bm in bank_machines]
+ m.d.comb += [bm.refresh_req.eq(refresher.cmd.valid)
+ for bm in bank_machines]
go_to_refresh = Signal()
bm_refresh_gnts = [bm.refresh_gnt for bm in bank_machines]
m.d.comb += go_to_refresh.eq(reduce(and_, bm_refresh_gnts))
]
with m.If(settings.phy.nphases == 1):
- m.d.comb += choose_req.cmd.ready.eq(cas_allowed & (~choose_req.activate() | ras_allowed))
+ m.d.comb += choose_req.cmd.ready.eq(
+ cas_allowed & (~choose_req.activate() | ras_allowed))
with m.Else():
m.d.comb += [
choose_cmd.want_activates.eq(ras_allowed),
- choose_cmd.cmd.ready.eq(~choose_cmd.activate() | ras_allowed),
+ choose_cmd.cmd.ready.eq(
+ ~choose_cmd.activate() | ras_allowed),
choose_req.cmd.ready.eq(cas_allowed),
]
]
with m.If(settings.phy.nphases == 1):
- m.d.comb += choose_req.cmd.ready.eq(cas_allowed & (~choose_req.activate() | ras_allowed))
+ m.d.comb += choose_req.cmd.ready.eq(
+ cas_allowed & (~choose_req.activate() | ras_allowed))
with m.Else():
m.d.comb += [
choose_cmd.want_activates.eq(ras_allowed),
- choose_cmd.cmd.ready.eq(~choose_cmd.activate() | ras_allowed),
+ choose_cmd.cmd.ready.eq(
+ ~choose_cmd.activate() | ras_allowed),
choose_req.cmd.ready.eq(cas_allowed),
]
with m.State("WTR"):
with m.If(twtrcon.ready):
m.next = "Read"
-
+
# TODO: reduce this, actual limit is around (cl+1)/nphases
delayed_enter(m, "RTW", "Write", settings.phy.read_latency-1)
# RefreshExecuter ----------------------------------------------------------------------------------
+
class RefreshExecuter(Elaboratable):
"""Refresh Executer
- Send an "Auto Refresh" command
- Wait tRFC
"""
+
def __init__(self, trp, trfc):
self.start = Signal()
- self.done = Signal()
+ self.done = Signal()
self._trp = trp
self._trfc = trfc
# RefreshSequencer ---------------------------------------------------------------------------------
+
class RefreshSequencer(Elaboratable):
"""Refresh Sequencer
Sequence N refreshs to the DRAM.
"""
+
def __init__(self, trp, trfc, postponing=1):
self.start = Signal()
- self.done = Signal()
+ self.done = Signal()
self._trp = trp
self._trfc = trfc
# RefreshTimer -------------------------------------------------------------------------------------
+
class RefreshTimer(Elaboratable):
"""Refresh Timer
Generate periodic pulses (tREFI period) to trigger DRAM refresh.
"""
+
def __init__(self, trefi):
- self.wait = Signal()
- self.done = Signal()
+ self.wait = Signal()
+ self.done = Signal()
self.count = Signal(bits_for(trefi))
self._trefi = trefi
trefi = self._trefi
- done = Signal()
+ done = Signal()
count = Signal(bits_for(trefi), reset=trefi-1)
with m.If(self.wait & ~self.done):
# RefreshPostponer -------------------------------------------------------------------------------
+
class RefreshPostponer(Elaboratable):
"""Refresh Postponer
Postpone N Refresh requests and generate a request when N is reached.
"""
+
def __init__(self, postponing=1):
self.req_i = Signal()
self.req_o = Signal()
# ZQCSExecuter ----------------------------------------------------------------------------------
+
class ZQCSExecuter(Elaboratable):
"""ZQ Short Calibration Executer
- Send an "ZQ Short Calibration" command
- Wait tZQCS
"""
+
def __init__(self, trp, tzqcs):
self.start = Signal()
- self.done = Signal()
+ self.done = Signal()
self._trp = trp
self._tzqcs = tzqcs
tl = Timeline([
# Precharge All
(0, [
- self.a.eq( 2**10),
- self.ba.eq( 0),
+ self.a.eq(2**10),
+ self.ba.eq(0),
self.cas.eq(0),
self.ras.eq(1),
self.we.eq(1),
]),
# ZQ Short Calibration after tRP
(trp, [
- self.a.eq( 0),
- self.ba.eq( 0),
+ self.a.eq(0),
+ self.ba.eq(0),
self.cas.eq(0),
self.ras.eq(0),
- self.we.eq( 1),
+ self.we.eq(1),
self.done.eq(0),
]),
# Done after tRP + tZQCS
(trp + tzqcs, [
- self.a.eq( 0),
- self.ba.eq( 0),
+ self.a.eq(0),
+ self.ba.eq(0),
self.cas.eq(0),
self.ras.eq(0),
- self.we.eq( 0),
+ self.we.eq(0),
self.done.eq(1)
]),
])
# Refresher ----------------------------------------------------------------------------------------
+
class Refresher(Elaboratable):
"""Refresher
transactions are done, the Refresher can execute the refresh Sequence and release the Controller.
"""
+
def __init__(self, settings, clk_freq, zqcs_freq=1e0, postponing=1):
assert postponing <= 8
- abits = settings.geom.addressbits
+ abits = settings.geom.addressbits
babits = settings.geom.bankbits + log2_int(settings.phy.nranks)
- self.cmd = cmd = stream.Endpoint(cmd_request_rw_layout(a=abits, ba=babits))
+ self.cmd = cmd = stream.Endpoint(
+ cmd_request_rw_layout(a=abits, ba=babits))
self._postponing = postponing
self._settings = settings
self._clk_freq = clk_freq
m = Module()
wants_refresh = Signal()
- wants_zqcs = Signal()
+ wants_zqcs = Signal()
settings = self._settings
]
# Refresh Sequencer ------------------------------------------------------------------------
- sequencer = RefreshSequencer(settings.timing.tRP, settings.timing.tRFC, self._postponing)
+ sequencer = RefreshSequencer(
+ settings.timing.tRP, settings.timing.tRFC, self._postponing)
m.submodules.sequencer = sequencer
if settings.timing.tZQCS is not None:
m.d.comb += wants_zqcs.eq(zqcs_timer.done)
# ZQCS Executer ------------------------------------------------------------------------
- zqcs_executer = ZQCSExecuter(settings.timing.tRP, settings.timing.tZQCS)
+ zqcs_executer = ZQCSExecuter(
+ settings.timing.tRP, settings.timing.tZQCS)
m.submodules.zqs_executer = zqcs_executer
m.d.comb += zqcs_timer.wait.eq(~zqcs_executer.done)
# PhaseInjector ------------------------------------------------------------------------------------
+
class PhaseInjector(Elaboratable):
def __init__(self, csr_bank, phase):
self._command = csr_bank.csr(6, "rw")
m.d.comb += [
self._phase.address.eq(self._address.r_data),
self._phase.bank.eq(self._baddress.r_data),
- self._phase.wrdata_en.eq(self._command_issue.r_stb & self._command.r_data[4]),
- self._phase.rddata_en.eq(self._command_issue.r_stb & self._command.r_data[5]),
+ self._phase.wrdata_en.eq(
+ self._command_issue.r_stb & self._command.r_data[4]),
+ self._phase.rddata_en.eq(
+ self._command_issue.r_stb & self._command.r_data[5]),
self._phase.wrdata.eq(self._wrdata.r_data),
self._phase.wrdata_mask.eq(0)
]
with m.If(self._command_issue.r_stb):
m.d.comb += [
- self._phase.cs_n.eq(Repl(value=~self._command.r_data[0], count=len(self._phase.cs_n))),
+ self._phase.cs_n.eq(
+ Repl(value=~self._command.r_data[0], count=len(self._phase.cs_n))),
self._phase.we_n.eq(~self._command.r_data[1]),
self._phase.cas_n.eq(~self._command.r_data[2]),
self._phase.ras_n.eq(~self._command.r_data[3]),
]
with m.Else():
m.d.comb += [
- self._phase.cs_n.eq(Repl(value=1, count=len(self._phase.cs_n))),
+ self._phase.cs_n.eq(
+ Repl(value=1, count=len(self._phase.cs_n))),
self._phase.we_n.eq(1),
self._phase.cas_n.eq(1),
self._phase.ras_n.eq(1),
# DFIInjector --------------------------------------------------------------------------------------
+
class DFIInjector(Elaboratable):
def __init__(self, csr_bank, addressbits, bankbits, nranks, databits, nphases=1):
self._nranks = nranks
- self._inti = dfi.Interface(addressbits, bankbits, nranks, databits, nphases)
- self.slave = dfi.Interface(addressbits, bankbits, nranks, databits, nphases)
- self.master = dfi.Interface(addressbits, bankbits, nranks, databits, nphases)
+ self._inti = dfi.Interface(
+ addressbits, bankbits, nranks, databits, nphases)
+ self.slave = dfi.Interface(
+ addressbits, bankbits, nranks, databits, nphases)
+ self.master = dfi.Interface(
+ addressbits, bankbits, nranks, databits, nphases)
self._control = csr_bank.csr(4, "rw") # sel, cke, odt, reset_n
self._phases = []
for n, phase in enumerate(self._inti.phases):
- self._phases += [PhaseInjector(CSRPrefixProxy(csr_bank, "p{}".format(n)), phase)]
+ self._phases += [PhaseInjector(CSRPrefixProxy(csr_bank,
+ "p{}".format(n)), phase)]
def elaborate(self, platform):
m = Module()
m.d.comb += self._inti.connect(self.master)
for i in range(self._nranks):
- m.d.comb += [phase.cke[i].eq(self._control.r_data[1]) for phase in self._inti.phases]
- m.d.comb += [phase.odt[i].eq(self._control.r_data[2]) for phase in self._inti.phases if hasattr(phase, "odt")]
- m.d.comb += [phase.reset_n.eq(self._control.r_data[3]) for phase in self._inti.phases if hasattr(phase, "reset_n")]
+ m.d.comb += [phase.cke[i].eq(self._control.r_data[1])
+ for phase in self._inti.phases]
+ m.d.comb += [phase.odt[i].eq(self._control.r_data[2])
+ for phase in self._inti.phases if hasattr(phase, "odt")]
+ m.d.comb += [phase.reset_n.eq(self._control.r_data[3])
+ for phase in self._inti.phases if hasattr(phase, "reset_n")]
return m
# LiteDRAMNativePortCDC ----------------------------------------------------------------------------
+
class gramNativePortCDC(Elaboratable):
def __init__(self, port_from, port_to,
- cmd_depth = 4,
- wdata_depth = 16,
- rdata_depth = 16):
+ cmd_depth=4,
+ wdata_depth=16,
+ rdata_depth=16):
assert port_from.address_width == port_to.address_width
- assert port_from.data_width == port_to.data_width
- assert port_from.mode == port_to.mode
+ assert port_from.data_width == port_to.data_width
+ assert port_from.mode == port_to.mode
self._port_from = port_from
self._port_to = port_to
# LiteDRAMNativePortDownConverter ------------------------------------------------------------------
+
class gramNativePortDownConverter(Elaboratable):
"""LiteDRAM port DownConverter
- A read from the user generates N reads to the controller and returned
datas are regrouped in a single data presented to the user.
"""
+
def __init__(self, port_from, port_to, reverse=False):
assert port_from.clock_domain == port_to.clock_domain
- assert port_from.data_width > port_to.data_width
- assert port_from.mode == port_to.mode
+ assert port_from.data_width > port_to.data_width
+ assert port_from.mode == port_to.mode
if port_from.data_width % port_to.data_width:
raise ValueError("Ratio must be an int")
reverse = self._reverse
ratio = port_from.data_width//port_to.data_width
- mode = port_from.mode
+ mode = port_from.mode
- counter = Signal(max=ratio)
+ counter = Signal(max=ratio)
counter_reset = Signal()
- counter_ce = Signal()
+ counter_ce = Signal()
with m.If(counter_reset):
m.d.sync += counter.eq(0)
# LiteDRAMNativeWritePortUpConverter ---------------------------------------------------------------
+
class gramNativeWritePortUpConverter(Elaboratable):
# TODO: finish and remove hack
"""LiteDRAM write port UpConverter
- N writes from user are regrouped in a single one to the controller
(when possible, ie when consecutive and bursting)
"""
+
def __init__(self, port_from, port_to, reverse=False):
assert port_from.clock_domain == port_to.clock_domain
- assert port_from.data_width < port_to.data_width
- assert port_from.mode == port_to.mode
- assert port_from.mode == "write"
+ assert port_from.data_width < port_to.data_width
+ assert port_from.mode == port_to.mode
+ assert port_from.mode == "write"
if port_to.data_width % port_from.data_width:
raise ValueError("Ratio must be an int")
ratio = port_to.data_width//port_from.data_width
- we = Signal()
+ we = Signal()
address = Signal(port_to.address_width)
- counter = Signal(max=ratio)
+ counter = Signal(max=ratio)
counter_reset = Signal()
- counter_ce = Signal()
+ counter_ce = Signal()
self.sync += \
If(counter_reset,
counter.eq(0)
- ).Elif(counter_ce,
- counter.eq(counter + 1)
- )
+ ).Elif(counter_ce,
+ counter.eq(counter + 1)
+ )
with m.FSM():
with m.State("Idle"):
# LiteDRAMNativeReadPortUpConverter ----------------------------------------------------------------
+
class gramNativeReadPortUpConverter(Elaboratable):
"""LiteDRAM port UpConverter
- N read from user are regrouped in a single one to the controller
(when possible, ie when consecutive and bursting)
"""
+
def __init__(self, port_from, port_to, reverse=False):
assert port_from.clock_domain == port_to.clock_domain
- assert port_from.data_width < port_to.data_width
- assert port_from.mode == port_to.mode
- assert port_from.mode == "read"
+ assert port_from.data_width < port_to.data_width
+ assert port_from.mode == port_to.mode
+ assert port_from.mode == "read"
if port_to.data_width % port_from.data_width:
raise ValueError("Ratio must be an int")
# Datapath ---------------------------------------------------------------------------------
- rdata_buffer = stream.Buffer(port_to.rdata.description)
+ rdata_buffer = stream.Buffer(port_to.rdata.description)
rdata_converter = stream.StrideConverter(
port_to.rdata.description,
port_from.rdata.description,
reverse=reverse)
- m.submodules += rdata_buffer, rdata_converter
+ m.submodules += rdata_buffer, rdata_converter
- rdata_chunk = Signal(ratio, reset=1)
+ rdata_chunk = Signal(ratio, reset=1)
rdata_chunk_valid = Signal()
with m.If(rdata_converter.source.valid & rdata_converter.source.ready):
- m.d.sync += rdata_chunk.eq(Cat(rdata_chunk[ratio-1], rdata_chunk[:ratio-1]))
+ m.d.sync += rdata_chunk.eq(
+ Cat(rdata_chunk[ratio-1], rdata_chunk[:ratio-1]))
m.d.comb += [
port_to.rdata.connect(rdata_buffer.sink),
rdata_buffer.source.connect(rdata_converter.sink),
rdata_chunk_valid.eq((cmd_buffer.source.sel & rdata_chunk) != 0),
- cmd_buffer.source.ready.eq(rdata_converter.source.ready & rdata_chunk[ratio-1]),
+ cmd_buffer.source.ready.eq(
+ rdata_converter.source.ready & rdata_chunk[ratio-1]),
]
with m.If(port_from.flush):
# LiteDRAMNativePortConverter ----------------------------------------------------------------------
+
class LiteDRAMNativePortConverter(Elaboratable):
def __init__(self, port_from, port_to, reverse=False):
assert port_from.clock_domain == port_to.clock_domain
- assert port_from.mode == port_to.mode
+ assert port_from.mode == port_to.mode
self._port_from = port_from
self._port_to = port_to
mode = port_from.mode
if port_from.data_width > port_to.data_width:
- converter = gramNativePortDownConverter(port_from, port_to, reverse)
+ converter = gramNativePortDownConverter(
+ port_from, port_to, reverse)
m.submodules += converter
elif port_from.data_width < port_to.data_width:
if mode == "write":
- converter = gramNativeWritePortUpConverter(port_from, port_to, reverse)
+ converter = gramNativeWritePortUpConverter(
+ port_from, port_to, reverse)
elif mode == "read":
- converter = gramNativeReadPortUpConverter(port_from, port_to, reverse)
+ converter = gramNativeReadPortUpConverter(
+ port_from, port_to, reverse)
else:
raise NotImplementedError
m.submodules += converter
# LFSR ---------------------------------------------------------------------------------------------
+
class LFSR(Module):
"""Linear-Feedback Shift Register to generate a pseudo-random sequence.
o : out
Output data
"""
+
def __init__(self, n_out, n_state, taps):
self.o = Signal(n_out)
# # #
- state = Signal(n_state)
+ state = Signal(n_state)
curval = [state[i] for i in range(n_state)]
curval += [0]*(n_out - n_state)
for i in range(n_out):
# Counter ------------------------------------------------------------------------------------------
+
class Counter(Module):
"""Simple incremental counter.
o : out
Output data
"""
+
def __init__(self, n_out):
self.o = Signal(n_out)
# Generator ----------------------------------------------------------------------------------------
+
@CEInserter()
class Generator(Module):
"""Address/Data Generator.
o : out
Output data
"""
+
def __init__(self, n_out, n_state, taps):
self.random_enable = Signal()
self.o = Signal(n_out)
# # #
- lfsr = LFSR(n_out, n_state, taps)
+ lfsr = LFSR(n_out, n_state, taps)
count = Counter(n_out)
self.submodules += lfsr, count
self.comb += \
If(self.random_enable,
self.o.eq(lfsr.o)
- ).Else(
+ ).Else(
self.o.eq(count.o)
)
# _LiteDRAMBISTGenerator ---------------------------------------------------------------------------
+
@ResetInserter()
class _LiteDRAMBISTGenerator(Module):
def __init__(self, dram_port):
ashift, awidth = get_ashift_awidth(dram_port)
- self.start = Signal()
- self.done = Signal()
- self.base = Signal(awidth)
- self.end = Signal(awidth)
- self.length = Signal(awidth)
+ self.start = Signal()
+ self.done = Signal()
+ self.base = Signal(awidth)
+ self.end = Signal(awidth)
+ self.length = Signal(awidth)
self.random_data = Signal()
self.random_addr = Signal()
- self.ticks = Signal(32)
+ self.ticks = Signal(32)
- self.run_cascade_in = Signal(reset=1)
+ self.run_cascade_in = Signal(reset=1)
self.run_cascade_out = Signal()
# # #
# Data / Address generators ----------------------------------------------------------------
- data_gen = Generator(31, n_state=31, taps=[27, 30]) # PRBS31
+ data_gen = Generator(31, n_state=31, taps=[27, 30]) # PRBS31
addr_gen = Generator(31, n_state=31, taps=[27, 30])
self.submodules += data_gen, addr_gen
self.comb += data_gen.random_enable.eq(self.random_data)
fsm = FSM(reset_state="IDLE")
self.submodules += fsm
fsm.act("IDLE",
- If(self.start,
- NextValue(cmd_counter, 0),
- NextState("RUN")
- ),
- NextValue(self.ticks, 0)
- )
+ If(self.start,
+ NextValue(cmd_counter, 0),
+ NextState("RUN")
+ ),
+ NextValue(self.ticks, 0)
+ )
fsm.act("WAIT",
- If(self.run_cascade_in,
- NextState("RUN")
- )
- )
+ If(self.run_cascade_in,
+ NextState("RUN")
+ )
+ )
fsm.act("RUN",
- dma.sink.valid.eq(1),
- If(dma.sink.ready,
- self.run_cascade_out.eq(1),
- data_gen.ce.eq(1),
- addr_gen.ce.eq(1),
- NextValue(cmd_counter, cmd_counter + 1),
- If(cmd_counter == (self.length[ashift:] - 1),
- NextState("DONE")
- ).Elif(~self.run_cascade_in,
- NextState("WAIT")
+ dma.sink.valid.eq(1),
+ If(dma.sink.ready,
+ self.run_cascade_out.eq(1),
+ data_gen.ce.eq(1),
+ addr_gen.ce.eq(1),
+ NextValue(cmd_counter, cmd_counter + 1),
+ If(cmd_counter == (self.length[ashift:] - 1),
+ NextState("DONE")
+ ).Elif(~self.run_cascade_in,
+ NextState("WAIT")
+ )
+ ),
+ NextValue(self.ticks, self.ticks + 1)
)
- ),
- NextValue(self.ticks, self.ticks + 1)
- )
fsm.act("DONE",
- self.run_cascade_out.eq(1),
- self.done.eq(1)
- )
+ self.run_cascade_out.eq(1),
+ self.done.eq(1)
+ )
- if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
+ if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
dma_sink_addr = dma.sink.address
elif isinstance(dram_port, LiteDRAMAXIPort): # addressing in bytes
dma_sink_addr = dma.sink.address[ashift:]
else:
raise NotImplementedError
- self.comb += dma_sink_addr.eq(self.base[ashift:] + (addr_gen.o & addr_mask))
+ self.comb += dma_sink_addr.eq(self.base[ashift:] +
+ (addr_gen.o & addr_mask))
self.comb += dma.sink.data.eq(data_gen.o)
class _LiteDRAMPatternGenerator(Module):
def __init__(self, dram_port, init=[]):
ashift, awidth = get_ashift_awidth(dram_port)
- self.start = Signal()
- self.done = Signal()
- self.ticks = Signal(32)
+ self.start = Signal()
+ self.done = Signal()
+ self.ticks = Signal(32)
- self.run_cascade_in = Signal(reset=1)
+ self.run_cascade_in = Signal(reset=1)
self.run_cascade_out = Signal()
# # #
# Data / Address pattern -------------------------------------------------------------------
addr_init, data_init = zip(*init)
- addr_mem = Memory(dram_port.address_width, len(addr_init), init=addr_init)
- data_mem = Memory(dram_port.data_width, len(data_init), init=data_init)
+ addr_mem = Memory(dram_port.address_width,
+ len(addr_init), init=addr_init)
+ data_mem = Memory(dram_port.data_width,
+ len(data_init), init=data_init)
addr_port = addr_mem.get_port(async_read=True)
data_port = data_mem.get_port(async_read=True)
self.specials += addr_mem, data_mem, addr_port, data_port
fsm = FSM(reset_state="IDLE")
self.submodules += fsm
fsm.act("IDLE",
- If(self.start,
- NextValue(cmd_counter, 0),
- NextState("RUN")
- ),
- NextValue(self.ticks, 0)
- )
+ If(self.start,
+ NextValue(cmd_counter, 0),
+ NextState("RUN")
+ ),
+ NextValue(self.ticks, 0)
+ )
fsm.act("WAIT",
- If(self.run_cascade_in,
- NextState("RUN")
- )
- )
+ If(self.run_cascade_in,
+ NextState("RUN")
+ )
+ )
fsm.act("RUN",
- dma.sink.valid.eq(1),
- If(dma.sink.ready,
- self.run_cascade_out.eq(1),
- NextValue(cmd_counter, cmd_counter + 1),
- If(cmd_counter == (len(init) - 1),
- NextState("DONE")
- ).Elif(~self.run_cascade_in,
- NextState("WAIT")
+ dma.sink.valid.eq(1),
+ If(dma.sink.ready,
+ self.run_cascade_out.eq(1),
+ NextValue(cmd_counter, cmd_counter + 1),
+ If(cmd_counter == (len(init) - 1),
+ NextState("DONE")
+ ).Elif(~self.run_cascade_in,
+ NextState("WAIT")
+ )
+ ),
+ NextValue(self.ticks, self.ticks + 1)
)
- ),
- NextValue(self.ticks, self.ticks + 1)
- )
fsm.act("DONE",
- self.run_cascade_out.eq(1),
- self.done.eq(1)
- )
+ self.run_cascade_out.eq(1),
+ self.done.eq(1)
+ )
- if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
+ if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
dma_sink_addr = dma.sink.address
elif isinstance(dram_port, LiteDRAMAXIPort): # addressing in bytes
dma_sink_addr = dma.sink.address[ashift:]
# LiteDRAMBISTGenerator ----------------------------------------------------------------------------
+
class LiteDRAMBISTGenerator(Module, AutoCSR):
"""DRAM memory pattern generator.
ticks : out
Duration of the generation.
"""
+
def __init__(self, dram_port):
ashift, awidth = get_ashift_awidth(dram_port)
- self.reset = CSR()
- self.start = CSR()
- self.done = CSRStatus()
- self.base = CSRStorage(awidth)
- self.end = CSRStorage(awidth)
- self.length = CSRStorage(awidth)
- self.random = CSRStorage(fields=[
+ self.reset = CSR()
+ self.start = CSR()
+ self.done = CSRStatus()
+ self.base = CSRStorage(awidth)
+ self.end = CSRStorage(awidth)
+ self.length = CSRStorage(awidth)
+ self.random = CSRStorage(fields=[
CSRField("data", size=1),
CSRField("addr", size=1),
])
- self.ticks = CSRStatus(32)
+ self.ticks = CSRStatus(32)
# # #
("ticks", 32),
]
control_cdc = stream.AsyncFIFO(control_layout)
- control_cdc = ClockDomainsRenamer({"write" : "sys", "read": clock_domain})(control_cdc)
- status_cdc = stream.AsyncFIFO(status_layout)
- status_cdc = ClockDomainsRenamer({"write" : clock_domain, "read": "sys"})(status_cdc)
+ control_cdc = ClockDomainsRenamer(
+ {"write": "sys", "read": clock_domain})(control_cdc)
+ status_cdc = stream.AsyncFIFO(status_layout)
+ status_cdc = ClockDomainsRenamer(
+ {"write": clock_domain, "read": "sys"})(status_cdc)
self.submodules += control_cdc, status_cdc
# Control CDC In
self.comb += [
# Control CDC Out
self.comb += [
control_cdc.source.ready.eq(1),
- core.reset.eq(control_cdc.source.valid & control_cdc.source.reset),
- core.start.eq(control_cdc.source.valid & control_cdc.source.start),
+ core.reset.eq(control_cdc.source.valid &
+ control_cdc.source.reset),
+ core.start.eq(control_cdc.source.valid &
+ control_cdc.source.start),
]
self.sync += [
If(control_cdc.source.valid,
core.length.eq(control_cdc.source.length),
core.random_data.eq(control_cdc.source.random_data),
core.random_addr.eq(control_cdc.source.random_addr),
- )
+ )
]
# Status CDC In
self.comb += [
If(status_cdc.source.valid,
self.done.status.eq(status_cdc.source.done),
self.ticks.status.eq(status_cdc.source.ticks),
- )
+ )
]
else:
self.comb += [
# _LiteDRAMBISTChecker -----------------------------------------------------------------------------
+
@ResetInserter()
class _LiteDRAMBISTChecker(Module, AutoCSR):
def __init__(self, dram_port):
ashift, awidth = get_ashift_awidth(dram_port)
- self.start = Signal()
- self.done = Signal()
- self.base = Signal(awidth)
- self.end = Signal(awidth)
- self.length = Signal(awidth)
+ self.start = Signal()
+ self.done = Signal()
+ self.base = Signal(awidth)
+ self.end = Signal(awidth)
+ self.length = Signal(awidth)
self.random_data = Signal()
self.random_addr = Signal()
- self.ticks = Signal(32)
- self.errors = Signal(32)
+ self.ticks = Signal(32)
+ self.errors = Signal(32)
- self.run_cascade_in = Signal(reset=1)
+ self.run_cascade_in = Signal(reset=1)
self.run_cascade_out = Signal()
# # #
# Data / Address generators ----------------------------------------------------------------
- data_gen = Generator(31, n_state=31, taps=[27, 30]) # PRBS31
+ data_gen = Generator(31, n_state=31, taps=[27, 30]) # PRBS31
addr_gen = Generator(31, n_state=31, taps=[27, 30])
self.submodules += data_gen, addr_gen
self.comb += data_gen.random_enable.eq(self.random_data)
cmd_fsm = FSM(reset_state="IDLE")
self.submodules += cmd_fsm
cmd_fsm.act("IDLE",
- If(self.start,
- NextValue(cmd_counter, 0),
- NextState("WAIT")
- )
- )
+ If(self.start,
+ NextValue(cmd_counter, 0),
+ NextState("WAIT")
+ )
+ )
cmd_fsm.act("WAIT",
- If(self.run_cascade_in,
- NextState("RUN")
- )
- )
+ If(self.run_cascade_in,
+ NextState("RUN")
+ )
+ )
cmd_fsm.act("RUN",
- dma.sink.valid.eq(1),
- If(dma.sink.ready,
- self.run_cascade_out.eq(1),
- addr_gen.ce.eq(1),
- NextValue(cmd_counter, cmd_counter + 1),
- If(cmd_counter == (self.length[ashift:] - 1),
- NextState("DONE")
- ).Elif(~self.run_cascade_in,
- NextState("WAIT")
- )
- )
- )
+ dma.sink.valid.eq(1),
+ If(dma.sink.ready,
+ self.run_cascade_out.eq(1),
+ addr_gen.ce.eq(1),
+ NextValue(cmd_counter, cmd_counter + 1),
+ If(cmd_counter == (self.length[ashift:] - 1),
+ NextState("DONE")
+ ).Elif(~self.run_cascade_in,
+ NextState("WAIT")
+ )
+ )
+ )
cmd_fsm.act("DONE")
- if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
+ if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
dma_sink_addr = dma.sink.address
elif isinstance(dram_port, LiteDRAMAXIPort): # addressing in bytes
dma_sink_addr = dma.sink.address[ashift:]
else:
raise NotImplementedError
- self.comb += dma_sink_addr.eq(self.base[ashift:] + (addr_gen.o & addr_mask))
+ self.comb += dma_sink_addr.eq(self.base[ashift:] +
+ (addr_gen.o & addr_mask))
# Data FSM ---------------------------------------------------------------------------------
data_counter = Signal(dram_port.address_width, reset_less=True)
data_fsm = FSM(reset_state="IDLE")
self.submodules += data_fsm
data_fsm.act("IDLE",
- If(self.start,
- NextValue(data_counter, 0),
- NextValue(self.errors, 0),
- NextState("RUN")
- ),
- NextValue(self.ticks, 0)
- )
+ If(self.start,
+ NextValue(data_counter, 0),
+ NextValue(self.errors, 0),
+ NextState("RUN")
+ ),
+ NextValue(self.ticks, 0)
+ )
data_fsm.act("RUN",
- dma.source.ready.eq(1),
- If(dma.source.valid,
- data_gen.ce.eq(1),
- NextValue(data_counter, data_counter + 1),
- If(dma.source.data != data_gen.o[:min(len(data_gen.o), dram_port.data_width)],
- NextValue(self.errors, self.errors + 1)
- ),
- If(data_counter == (self.length[ashift:] - 1),
- NextState("DONE")
- )
- ),
- NextValue(self.ticks, self.ticks + 1)
- )
+ dma.source.ready.eq(1),
+ If(dma.source.valid,
+ data_gen.ce.eq(1),
+ NextValue(data_counter, data_counter + 1),
+ If(dma.source.data != data_gen.o[:min(len(data_gen.o), dram_port.data_width)],
+ NextValue(self.errors, self.errors + 1)
+ ),
+ If(data_counter == (self.length[ashift:] - 1),
+ NextState("DONE")
+ )
+ ),
+ NextValue(self.ticks, self.ticks + 1)
+ )
data_fsm.act("DONE",
- self.done.eq(1)
- )
+ self.done.eq(1)
+ )
+
@ResetInserter()
class _LiteDRAMPatternChecker(Module, AutoCSR):
def __init__(self, dram_port, init=[]):
ashift, awidth = get_ashift_awidth(dram_port)
- self.start = Signal()
- self.done = Signal()
- self.ticks = Signal(32)
+ self.start = Signal()
+ self.done = Signal()
+ self.ticks = Signal(32)
self.errors = Signal(32)
- self.run_cascade_in = Signal(reset=1)
+ self.run_cascade_in = Signal(reset=1)
self.run_cascade_out = Signal()
# # #
# Data / Address pattern -------------------------------------------------------------------
addr_init, data_init = zip(*init)
- addr_mem = Memory(dram_port.address_width, len(addr_init), init=addr_init)
- data_mem = Memory(dram_port.data_width, len(data_init), init=data_init)
+ addr_mem = Memory(dram_port.address_width,
+ len(addr_init), init=addr_init)
+ data_mem = Memory(dram_port.data_width,
+ len(data_init), init=data_init)
addr_port = addr_mem.get_port(async_read=True)
data_port = data_mem.get_port(async_read=True)
self.specials += addr_mem, data_mem, addr_port, data_port
cmd_fsm = FSM(reset_state="IDLE")
self.submodules += cmd_fsm
cmd_fsm.act("IDLE",
- If(self.start,
- NextValue(cmd_counter, 0),
- If(self.run_cascade_in,
- NextState("RUN")
- ).Else(
- NextState("WAIT")
- )
- )
- )
+ If(self.start,
+ NextValue(cmd_counter, 0),
+ If(self.run_cascade_in,
+ NextState("RUN")
+ ).Else(
+ NextState("WAIT")
+ )
+ )
+ )
cmd_fsm.act("WAIT",
- If(self.run_cascade_in,
- NextState("RUN")
- ),
- NextValue(self.ticks, self.ticks + 1)
- )
+ If(self.run_cascade_in,
+ NextState("RUN")
+ ),
+ NextValue(self.ticks, self.ticks + 1)
+ )
cmd_fsm.act("RUN",
- dma.sink.valid.eq(1),
- If(dma.sink.ready,
- self.run_cascade_out.eq(1),
- NextValue(cmd_counter, cmd_counter + 1),
- If(cmd_counter == (len(init) - 1),
- NextState("DONE")
- ).Elif(~self.run_cascade_in,
- NextState("WAIT")
- )
- )
- )
+ dma.sink.valid.eq(1),
+ If(dma.sink.ready,
+ self.run_cascade_out.eq(1),
+ NextValue(cmd_counter, cmd_counter + 1),
+ If(cmd_counter == (len(init) - 1),
+ NextState("DONE")
+ ).Elif(~self.run_cascade_in,
+ NextState("WAIT")
+ )
+ )
+ )
cmd_fsm.act("DONE")
- if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
+ if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
dma_sink_addr = dma.sink.address
elif isinstance(dram_port, LiteDRAMAXIPort): # addressing in bytes
dma_sink_addr = dma.sink.address[ashift:]
data_fsm = FSM(reset_state="IDLE")
self.submodules += data_fsm
data_fsm.act("IDLE",
- If(self.start,
- NextValue(data_counter, 0),
- NextValue(self.errors, 0),
- NextState("RUN")
- ),
- NextValue(self.ticks, 0)
- )
+ If(self.start,
+ NextValue(data_counter, 0),
+ NextValue(self.errors, 0),
+ NextState("RUN")
+ ),
+ NextValue(self.ticks, 0)
+ )
data_fsm.act("RUN",
- dma.source.ready.eq(1),
- If(dma.source.valid,
- NextValue(data_counter, data_counter + 1),
- If(dma.source.data != expected_data,
- NextValue(self.errors, self.errors + 1)
- ),
- If(data_counter == (len(init) - 1),
- NextState("DONE")
- )
- ),
- NextValue(self.ticks, self.ticks + 1)
- )
+ dma.source.ready.eq(1),
+ If(dma.source.valid,
+ NextValue(data_counter, data_counter + 1),
+ If(dma.source.data != expected_data,
+ NextValue(self.errors, self.errors + 1)
+ ),
+ If(data_counter == (len(init) - 1),
+ NextState("DONE")
+ )
+ ),
+ NextValue(self.ticks, self.ticks + 1)
+ )
data_fsm.act("DONE",
- self.done.eq(1)
- )
+ self.done.eq(1)
+ )
# LiteDRAMBISTChecker ------------------------------------------------------------------------------
+
class LiteDRAMBISTChecker(Module, AutoCSR):
"""DRAM memory pattern checker.
errors : out
Number of DRAM words which don't match.
"""
+
def __init__(self, dram_port):
ashift, awidth = get_ashift_awidth(dram_port)
- self.reset = CSR()
- self.start = CSR()
- self.done = CSRStatus()
- self.base = CSRStorage(awidth)
- self.end = CSRStorage(awidth)
- self.length = CSRStorage(awidth)
- self.random = CSRStorage(fields=[
+ self.reset = CSR()
+ self.start = CSR()
+ self.done = CSRStatus()
+ self.base = CSRStorage(awidth)
+ self.end = CSRStorage(awidth)
+ self.length = CSRStorage(awidth)
+ self.random = CSRStorage(fields=[
CSRField("data", size=1),
CSRField("addr", size=1),
])
- self.ticks = CSRStatus(32)
- self.errors = CSRStatus(32)
+ self.ticks = CSRStatus(32)
+ self.errors = CSRStatus(32)
# # #
("errors", 32),
]
control_cdc = stream.AsyncFIFO(control_layout)
- control_cdc = ClockDomainsRenamer({"write" : "sys", "read": clock_domain})(control_cdc)
- status_cdc = stream.AsyncFIFO(status_layout)
- status_cdc = ClockDomainsRenamer({"write" : clock_domain, "read": "sys"})(status_cdc)
+ control_cdc = ClockDomainsRenamer(
+ {"write": "sys", "read": clock_domain})(control_cdc)
+ status_cdc = stream.AsyncFIFO(status_layout)
+ status_cdc = ClockDomainsRenamer(
+ {"write": clock_domain, "read": "sys"})(status_cdc)
self.submodules += control_cdc, status_cdc
# Control CDC In
self.comb += [
# Control CDC Out
self.comb += [
control_cdc.source.ready.eq(1),
- core.reset.eq(control_cdc.source.valid & control_cdc.source.reset),
- core.start.eq(control_cdc.source.valid & control_cdc.source.start),
+ core.reset.eq(control_cdc.source.valid &
+ control_cdc.source.reset),
+ core.start.eq(control_cdc.source.valid &
+ control_cdc.source.start),
]
self.sync += [
If(control_cdc.source.valid,
core.length.eq(control_cdc.source.length),
core.random_data.eq(control_cdc.source.random_data),
core.random_addr.eq(control_cdc.source.random_addr),
- )
+ )
]
# Status CDC In
self.comb += [
self.done.status.eq(status_cdc.source.done),
self.ticks.status.eq(status_cdc.source.ticks),
self.errors.status.eq(status_cdc.source.errors),
- )
+ )
]
else:
self.comb += [
# LiteDRAMDMAReader --------------------------------------------------------------------------------
+
class LiteDRAMDMAReader(Module, AutoCSR):
"""Read data from DRAM memory.
def __init__(self, port, fifo_depth=16, fifo_buffered=False):
assert isinstance(port, (LiteDRAMNativePort, LiteDRAMAXIPort))
- self.port = port
- self.sink = sink = stream.Endpoint([("address", port.address_width)])
+ self.port = port
+ self.sink = sink = stream.Endpoint([("address", port.address_width)])
self.source = source = stream.Endpoint([("data", port.data_width)])
# # #
# Native / AXI selection
is_native = isinstance(port, LiteDRAMNativePort)
- is_axi = isinstance(port, LiteDRAMAXIPort)
+ is_axi = isinstance(port, LiteDRAMAXIPort)
if is_native:
(cmd, rdata) = port.cmd, port.rdata
elif is_axi:
self.sync += [
If(request_issued,
If(~data_dequeued, rsv_level.eq(self.rsv_level + 1))
- ).Elif(data_dequeued,
- rsv_level.eq(rsv_level - 1)
- )
+ ).Elif(data_dequeued,
+ rsv_level.eq(rsv_level - 1)
+ )
]
self.comb += request_enable.eq(rsv_level != fifo_depth)
# FIFO -------------------------------------------------------------------------------------
- fifo = stream.SyncFIFO([("data", port.data_width)], fifo_depth, fifo_buffered)
+ fifo = stream.SyncFIFO(
+ [("data", port.data_width)], fifo_depth, fifo_buffered)
self.submodules += fifo
self.comb += [
]
def add_csr(self):
- self._base = CSRStorage(32)
+ self._base = CSRStorage(32)
self._length = CSRStorage(32)
- self._start = CSR()
- self._done = CSRStatus()
- self._loop = CSRStorage()
+ self._start = CSR()
+ self._done = CSRStatus()
+ self._loop = CSRStorage()
# # #
- shift = log2_int(self.port.data_width//8)
- base = Signal(self.port.address_width)
- offset = Signal(self.port.address_width)
- length = Signal(self.port.address_width)
+ shift = log2_int(self.port.data_width//8)
+ base = Signal(self.port.address_width)
+ offset = Signal(self.port.address_width)
+ length = Signal(self.port.address_width)
self.comb += [
base.eq(self._base.storage[shift:]),
length.eq(self._length.storage[shift:]),
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
- self._done.status.eq(1),
- If(self._start.re,
- NextValue(offset, 0),
- NextState("RUN"),
- )
- )
+ self._done.status.eq(1),
+ If(self._start.re,
+ NextValue(offset, 0),
+ NextState("RUN"),
+ )
+ )
fsm.act("RUN",
- self.sink.valid.eq(1),
- self.sink.address.eq(base + offset),
- If(self.sink.ready,
- NextValue(offset, offset + 1),
- If(offset == (length - 1),
- If(self._loop.storage,
- NextValue(offset, 0)
- ).Else(
- NextState("IDLE")
- )
+ self.sink.valid.eq(1),
+ self.sink.address.eq(base + offset),
+ If(self.sink.ready,
+ NextValue(offset, offset + 1),
+ If(offset == (length - 1),
+ If(self._loop.storage,
+ NextValue(offset, 0)
+ ).Else(
+ NextState("IDLE")
+ )
+ )
+ )
)
- )
- )
# LiteDRAMDMAWriter --------------------------------------------------------------------------------
+
class LiteDRAMDMAWriter(Module, AutoCSR):
"""Write data to DRAM memory.
sink : Record("address", "data")
Sink for DRAM addresses and DRAM data word to be written too.
"""
+
def __init__(self, port, fifo_depth=16, fifo_buffered=False):
assert isinstance(port, (LiteDRAMNativePort, LiteDRAMAXIPort))
self.port = port
# Native / AXI selection -------------------------------------------------------------------
is_native = isinstance(port, LiteDRAMNativePort)
- is_axi = isinstance(port, LiteDRAMAXIPort)
+ is_axi = isinstance(port, LiteDRAMAXIPort)
if is_native:
(cmd, wdata) = port.cmd, port.wdata
elif is_axi:
raise NotImplementedError
# FIFO -------------------------------------------------------------------------------------
- fifo = stream.SyncFIFO([("data", port.data_width)], fifo_depth, fifo_buffered)
+ fifo = stream.SyncFIFO(
+ [("data", port.data_width)], fifo_depth, fifo_buffered)
self.submodules += fifo
if is_native:
def add_csr(self):
self._sink = self.sink
- self.sink = stream.Endpoint([("data", self.port.data_width)])
+ self.sink = stream.Endpoint([("data", self.port.data_width)])
- self._base = CSRStorage(32)
+ self._base = CSRStorage(32)
self._length = CSRStorage(32)
- self._start = CSR()
- self._done = CSRStatus()
- self._loop = CSRStorage()
+ self._start = CSR()
+ self._done = CSRStatus()
+ self._loop = CSRStorage()
# # #
- shift = log2_int(self.port.data_width//8)
- base = Signal(self.port.address_width)
- offset = Signal(self.port.address_width)
- length = Signal(self.port.address_width)
+ shift = log2_int(self.port.data_width//8)
+ base = Signal(self.port.address_width)
+ offset = Signal(self.port.address_width)
+ length = Signal(self.port.address_width)
self.comb += [
base.eq(self._base.storage[shift:]),
length.eq(self._length.storage[shift:]),
self.submodules.fsm = fsm = FSM(reset_state="IDLE")
fsm.act("IDLE",
- self._done.status.eq(1),
- If(self._start.re,
- NextValue(offset, 0),
- NextState("RUN"),
- )
- )
+ self._done.status.eq(1),
+ If(self._start.re,
+ NextValue(offset, 0),
+ NextState("RUN"),
+ )
+ )
fsm.act("RUN",
- self._sink.valid.eq(self.sink.valid),
- self._sink.data.eq(self.sink.data),
- self._sink.address.eq(base + offset),
- self.sink.ready.eq(self._sink.ready),
- If(self.sink.valid & self.sink.ready,
- NextValue(offset, offset + 1),
- If(offset == (length - 1),
- If(self._loop.storage,
- NextValue(offset, 0)
- ).Else(
- NextState("IDLE")
- )
+ self._sink.valid.eq(self.sink.valid),
+ self._sink.data.eq(self.sink.data),
+ self._sink.address.eq(base + offset),
+ self.sink.ready.eq(self._sink.ready),
+ If(self.sink.valid & self.sink.ready,
+ NextValue(offset, offset + 1),
+ If(offset == (length - 1),
+ If(self._loop.storage,
+ NextValue(offset, 0)
+ ).Else(
+ NextState("IDLE")
+ )
+ )
+ )
)
- )
- )
class LiteDRAMNativePortECCW(Module):
def __init__(self, data_width_from, data_width_to):
- self.sink = sink = Endpoint(wdata_description(data_width_from))
+ self.sink = sink = Endpoint(wdata_description(data_width_from))
self.source = source = Endpoint(wdata_description(data_width_to))
# # #
self.submodules += encoder
self.comb += [
sink.connect(source, omit={"data", "we"}),
- encoder.i.eq(sink.data[i*data_width_from//8:(i+1)*data_width_from//8]),
- source.data[i*data_width_to//8:(i+1)*data_width_to//8].eq(encoder.o)
+ encoder.i.eq(sink.data[i*data_width_from //
+ 8:(i+1)*data_width_from//8]),
+ source.data[i*data_width_to //
+ 8:(i+1)*data_width_to//8].eq(encoder.o)
]
self.comb += source.we.eq(2**len(source.we)-1)
# LiteDRAMNativePortECCR ---------------------------------------------------------------------------
+
class LiteDRAMNativePortECCR(Module):
def __init__(self, data_width_from, data_width_to):
- self.sink = sink = Endpoint(rdata_description(data_width_to))
+ self.sink = sink = Endpoint(rdata_description(data_width_to))
self.source = source = Endpoint(rdata_description(data_width_from))
self.enable = Signal()
- self.sec = Signal(8)
- self.ded = Signal(8)
+ self.sec = Signal(8)
+ self.ded = Signal(8)
# # #
- self.comb += sink.connect(source, omit={"data"})
+ self.comb += sink.connect(source, omit={"data"})
for i in range(8):
decoder = ECCDecoder(data_width_from//8)
self.submodules += decoder
self.comb += [
decoder.enable.eq(self.enable),
- decoder.i.eq(sink.data[i*data_width_to//8:(i+1)*data_width_to//8]),
- source.data[i*data_width_from//8:(i+1)*data_width_from//8].eq(decoder.o),
+ decoder.i.eq(sink.data[i*data_width_to //
+ 8:(i+1)*data_width_to//8]),
+ source.data[i*data_width_from //
+ 8:(i+1)*data_width_from//8].eq(decoder.o),
If(source.valid,
self.sec[i].eq(decoder.sec),
self.ded[i].eq(decoder.ded)
- )
+ )
]
# LiteDRAMNativePortECC ----------------------------------------------------------------------------
+
class LiteDRAMNativePortECC(Module, AutoCSR):
def __init__(self, port_from, port_to, with_error_injection=False):
- _ , n = compute_m_n(port_from.data_width//8)
+ _, n = compute_m_n(port_from.data_width//8)
assert port_to.data_width >= (n + 1)*8
- self.enable = CSRStorage(reset=1)
- self.clear = CSR()
+ self.enable = CSRStorage(reset=1)
+ self.clear = CSR()
self.sec_errors = CSRStatus(32)
self.ded_errors = CSRStatus(32)
self.sec_detected = sec_detected = Signal()
self.comb += port_from.cmd.connect(port_to.cmd)
# Wdata (ecc encoding) ---------------------------------------------------------------------
- ecc_wdata = LiteDRAMNativePortECCW(port_from.data_width, port_to.data_width)
+ ecc_wdata = LiteDRAMNativePortECCW(
+ port_from.data_width, port_to.data_width)
ecc_wdata = BufferizeEndpoints({"source": DIR_SOURCE})(ecc_wdata)
self.submodules += ecc_wdata
self.comb += [
ecc_wdata.source.connect(port_to.wdata)
]
if with_error_injection:
- self.comb += port_to.wdata.data[:8].eq(self.flip.storage ^ ecc_wdata.source.data[:8])
+ self.comb += port_to.wdata.data[:8].eq(
+ self.flip.storage ^ ecc_wdata.source.data[:8])
# Rdata (ecc decoding) ---------------------------------------------------------------------
sec = Signal()
ded = Signal()
- ecc_rdata = LiteDRAMNativePortECCR(port_from.data_width, port_to.data_width)
+ ecc_rdata = LiteDRAMNativePortECCR(
+ port_from.data_width, port_to.data_width)
ecc_rdata = BufferizeEndpoints({"source": DIR_SOURCE})(ecc_rdata)
self.submodules += ecc_rdata
self.comb += [
ded_errors.eq(0),
sec_detected.eq(0),
ded_detected.eq(0),
- ).Else(
+ ).Else(
If(sec_errors != (2**len(sec_errors) - 1),
If(ecc_rdata.sec != 0,
sec_detected.eq(1),
sec_errors.eq(sec_errors + 1)
- )
- ),
+ )
+ ),
If(ded_errors != (2**len(ded_errors) - 1),
If(ecc_rdata.ded != 0,
ded_detected.eq(1),
ded_errors.eq(ded_errors + 1)
- )
- )
+ )
+ )
)
]
return signal.eq(signal + 1)
else:
return If(signal == (modulo - 1),
- signal.eq(0)
- ).Else(
+ signal.eq(0)
+ ).Else(
signal.eq(signal + 1)
)
class _LiteDRAMFIFOCtrl(Module):
def __init__(self, base, depth, read_threshold, write_threshold):
- self.base = base
+ self.base = base
self.depth = depth
self.level = Signal(max=depth+1)
# # #
- self.submodules.writer = writer = dma.LiteDRAMDMAWriter(port, fifo_depth=32)
+ self.submodules.writer = writer = dma.LiteDRAMDMAWriter(
+ port, fifo_depth=32)
self.comb += [
writer.sink.valid.eq(sink.valid & ctrl.writable),
writer.sink.address.eq(ctrl.base + ctrl.write_address),
If(writer.sink.valid & writer.sink.ready,
ctrl.write.eq(1),
sink.ready.eq(1)
- )
+ )
]
# # #
- self.submodules.reader = reader = dma.LiteDRAMDMAReader(port, fifo_depth=32)
+ self.submodules.reader = reader = dma.LiteDRAMDMAReader(
+ port, fifo_depth=32)
self.comb += [
reader.sink.valid.eq(ctrl.readable),
reader.sink.address.eq(ctrl.base + ctrl.read_address),
If(reader.sink.valid & reader.sink.ready,
ctrl.read.eq(1)
- )
+ )
]
self.comb += reader.source.connect(source)
class LiteDRAMFIFO(Module):
def __init__(self, data_width, base, depth, write_port, read_port,
- read_threshold=None, write_threshold=None):
- self.sink = stream.Endpoint([("data", data_width)])
+ read_threshold=None, write_threshold=None):
+ self.sink = stream.Endpoint([("data", data_width)])
self.source = stream.Endpoint([("data", data_width)])
# # #
if write_threshold is None:
write_threshold = depth
- self.submodules.ctrl = _LiteDRAMFIFOCtrl(base, depth, read_threshold, write_threshold)
- self.submodules.writer = _LiteDRAMFIFOWriter(data_width, write_port, self.ctrl)
- self.submodules.reader = _LiteDRAMFIFOReader(data_width, read_port, self.ctrl)
+ self.submodules.ctrl = _LiteDRAMFIFOCtrl(
+ base, depth, read_threshold, write_threshold)
+ self.submodules.writer = _LiteDRAMFIFOWriter(
+ data_width, write_port, self.ctrl)
+ self.submodules.reader = _LiteDRAMFIFOReader(
+ data_width, read_port, self.ctrl)
self.comb += [
self.sink.connect(self.writer.sink),
self.reader.source.connect(self.source)
--- /dev/null
+# This file is Copyright (c) 2016-2020 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+"""Wishbone frontend for LiteDRAM"""
+
+from math import log2
+
+from nmigen import *
+
+import gram.stream as stream
+
+
+# LiteDRAMWishbone2Native --------------------------------------------------------------------------
+
+class LiteDRAMWishbone2Native(Module):
+ def __init__(self, wishbone, port, base_address=0x00000000):
+ wishbone_data_width = len(wishbone.dat_w)
+ # Round to lowest power 2
+ port_data_width = 2**int(log2(len(port.wdata.data)))
+ assert wishbone_data_width >= port_data_width
+
+ # # #
+
+ adr_offset = base_address >> log2_int(port.data_width//8)
+
+ # Write Datapath ---------------------------------------------------------------------------
+ wdata_converter = stream.StrideConverter(
+ [("data", wishbone_data_width), ("we", wishbone_data_width//8)],
+ [("data", port_data_width), ("we", port_data_width//8)],
+ )
+ self.submodules += wdata_converter
+ self.comb += [
+ wdata_converter.sink.valid.eq(
+ wishbone.cyc & wishbone.stb & wishbone.we),
+ wdata_converter.sink.data.eq(wishbone.dat_w),
+ wdata_converter.sink.we.eq(wishbone.sel),
+ wdata_converter.source.connect(port.wdata)
+ ]
+
+ # Read Datapath ----------------------------------------------------------------------------
+ rdata_converter = stream.StrideConverter(
+ [("data", port_data_width)],
+ [("data", wishbone_data_width)],
+ )
+ self.submodules += rdata_converter
+ self.comb += [
+ port.rdata.connect(rdata_converter.sink),
+ rdata_converter.source.ready.eq(1),
+ wishbone.dat_r.eq(rdata_converter.source.data),
+ ]
+
+ # Control ----------------------------------------------------------------------------------
+ ratio = wishbone_data_width//port_data_width
+ count = Signal(max=max(ratio, 2))
+ self.submodules.fsm = fsm = FSM(reset_state="CMD")
+ fsm.act("CMD",
+ port.cmd.valid.eq(wishbone.cyc & wishbone.stb),
+ port.cmd.we.eq(wishbone.we),
+ port.cmd.addr.eq(wishbone.adr*ratio + count - adr_offset),
+ If(port.cmd.valid & port.cmd.ready,
+ NextValue(count, count + 1),
+ If(count == (ratio - 1),
+ NextValue(count, 0),
+ If(wishbone.we,
+ NextState("WAIT-WRITE")
+ ).Else(
+ NextState("WAIT-READ")
+ )
+ )
+ )
+ )
+ fsm.act("WAIT-WRITE",
+ If(wdata_converter.sink.ready,
+ wishbone.ack.eq(1),
+ NextState("CMD")
+ )
+ )
+ fsm.act("WAIT-READ",
+ If(rdata_converter.source.valid,
+ wishbone.ack.eq(1),
+ NextState("CMD")
+ )
+ )
from nmigen_soc.memory import MemoryMap
from lambdasoc.periph import Peripheral
+
class gramWishbone(Peripheral, Elaboratable):
def __init__(self, core):
super().__init__()
granularity = 8
self.bus = wishbone.Interface(addr_width=dram_addr_width,
- data_width=32, granularity=granularity)
+ data_width=32, granularity=granularity)
map = MemoryMap(addr_width=dram_addr_width, data_width=granularity)
map.add_resource(self, size=dram_size)
]
with m.If(port.cmd.valid & port.cmd.ready):
+ # with m.State("Write"):
+ # ...
- # with m.State("Write"):
- # ...
-
- # with m.State("Read"):
- # ...
+ # with m.State("Read"):
+ # ...
return m
# IOs/Interfaces -----------------------------------------------------------------------------------
+
def get_common_ios():
return [
# clk / rst
("serial", 0,
Subsignal("tx", Pins(1)),
Subsignal("rx", Pins(1))
- ),
+ ),
# crg status
("pll_locked", 0, Pins(1)),
("user_rst", 0, Pins(1))
]
+
def get_dram_ios(core_config):
sdram_module = core_config["sdram_module"]
return [
("ddram", 0,
- Subsignal("a", Pins(log2_int(core_config["sdram_module"].nrows))),
- Subsignal("ba", Pins(log2_int(core_config["sdram_module"].nbanks))),
+ Subsignal("a", Pins(
+ log2_int(core_config["sdram_module"].nrows))),
+ Subsignal("ba", Pins(
+ log2_int(core_config["sdram_module"].nbanks))),
Subsignal("ras_n", Pins(1)),
Subsignal("cas_n", Pins(1)),
Subsignal("we_n", Pins(1)),
Subsignal("cke", Pins(core_config["sdram_rank_nb"])),
Subsignal("odt", Pins(core_config["sdram_rank_nb"])),
Subsignal("reset_n", Pins(1))
- ),
+ ),
]
+
def get_native_user_port_ios(_id, aw, dw):
return [
("user_port_{}".format(_id), 0,
Subsignal("rdata_valid", Pins(1)),
Subsignal("rdata_ready", Pins(1)),
Subsignal("rdata_data", Pins(dw))
- ),
+ ),
]
+
def get_wishbone_user_port_ios(_id, aw, dw):
return [
("user_port_{}".format(_id), 0,
Subsignal("ack", Pins(1)),
Subsignal("we", Pins(1)),
Subsignal("err", Pins(1)),
- ),
+ ),
]
+
def get_axi_user_port_ios(_id, aw, dw, iw):
return [
("user_port_{}".format(_id), 0,
Subsignal("rresp", Pins(2)),
Subsignal("rdata", Pins(dw)),
Subsignal("rid", Pins(iw))
- ),
+ ),
]
+
def get_fifo_user_port_ios(_id, dw):
return [
("user_fifo_{}".format(_id), 0,
Subsignal("out_valid", Pins(1)),
Subsignal("out_ready", Pins(1)),
Subsignal("out_data", Pins(dw)),
- ),
+ ),
]
# CRG ----------------------------------------------------------------------------------------------
+
class LiteDRAMECP5DDRPHYCRG(Module):
def __init__(self, platform, core_config):
- self.clock_domains.cd_init = ClockDomain()
- self.clock_domains.cd_por = ClockDomain(reset_less=True)
- self.clock_domains.cd_sys = ClockDomain()
- self.clock_domains.cd_sys2x = ClockDomain()
+ self.clock_domains.cd_init = ClockDomain()
+ self.clock_domains.cd_por = ClockDomain(reset_less=True)
+ self.clock_domains.cd_sys = ClockDomain()
+ self.clock_domains.cd_sys2x = ClockDomain()
self.clock_domains.cd_sys2x_i = ClockDomain(reset_less=True)
# # #
# power on reset
por_count = Signal(16, reset=2**16-1)
- por_done = Signal()
+ por_done = Signal()
self.comb += self.cd_por.clk.eq(ClockSignal())
self.comb += por_done.eq(por_count == 0)
self.sync.por += If(~por_done, por_count.eq(por_count - 1))
pll.create_clkout(self.cd_init, core_config["init_clk_freq"])
self.specials += [
Instance("ECLKSYNCB",
- i_ECLKI = self.cd_sys2x_i.clk,
- i_STOP = self.stop,
- o_ECLKO = self.cd_sys2x.clk),
+ i_ECLKI=self.cd_sys2x_i.clk,
+ i_STOP=self.stop,
+ o_ECLKO=self.cd_sys2x.clk),
Instance("CLKDIVF",
- p_DIV = "2.0",
- i_ALIGNWD = 0,
- i_CLKI = self.cd_sys2x.clk,
- i_RST = self.cd_sys2x.rst,
- o_CDIVX = self.cd_sys.clk),
- AsyncResetSynchronizer(self.cd_init, ~por_done | ~pll.locked | rst),
- AsyncResetSynchronizer(self.cd_sys, ~por_done | ~pll.locked | rst),
+ p_DIV="2.0",
+ i_ALIGNWD=0,
+ i_CLKI=self.cd_sys2x.clk,
+ i_RST=self.cd_sys2x.rst,
+ o_CDIVX=self.cd_sys.clk),
+ AsyncResetSynchronizer(
+ self.cd_init, ~por_done | ~pll.locked | rst),
+ AsyncResetSynchronizer(
+ self.cd_sys, ~por_done | ~pll.locked | rst),
]
+
class LiteDRAMS7DDRPHYCRG(Module):
def __init__(self, platform, core_config):
self.clock_domains.cd_sys = ClockDomain()
if core_config["memtype"] == "DDR3":
- self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
+ self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
else:
- self.clock_domains.cd_sys2x = ClockDomain(reset_less=True)
+ self.clock_domains.cd_sys2x = ClockDomain(reset_less=True)
self.clock_domains.cd_sys2x_dqs = ClockDomain(reset_less=True)
self.clock_domains.cd_iodelay = ClockDomain()
clk = platform.request("clk")
rst = platform.request("rst")
- self.submodules.sys_pll = sys_pll = S7PLL(speedgrade=core_config["speedgrade"])
+ self.submodules.sys_pll = sys_pll = S7PLL(
+ speedgrade=core_config["speedgrade"])
self.comb += sys_pll.reset.eq(rst)
sys_pll.register_clkin(clk, core_config["input_clk_freq"])
sys_pll.create_clkout(self.cd_sys, core_config["sys_clk_freq"])
if core_config["memtype"] == "DDR3":
sys_pll.create_clkout(self.cd_sys4x, 4*core_config["sys_clk_freq"])
- sys_pll.create_clkout(self.cd_sys4x_dqs, 4*core_config["sys_clk_freq"], phase=90)
+ sys_pll.create_clkout(self.cd_sys4x_dqs, 4 *
+ core_config["sys_clk_freq"], phase=90)
else:
sys_pll.create_clkout(self.cd_sys2x, 2*core_config["sys_clk_freq"])
- sys_pll.create_clkout(self.cd_sys2x_dqs, 2*core_config["sys_clk_freq"], phase=90)
+ sys_pll.create_clkout(self.cd_sys2x_dqs, 2 *
+ core_config["sys_clk_freq"], phase=90)
self.comb += platform.request("pll_locked").eq(sys_pll.locked)
- self.submodules.iodelay_pll = iodelay_pll = S7PLL(speedgrade=core_config["speedgrade"])
+ self.submodules.iodelay_pll = iodelay_pll = S7PLL(
+ speedgrade=core_config["speedgrade"])
self.comb += iodelay_pll.reset.eq(rst)
iodelay_pll.register_clkin(clk, core_config["input_clk_freq"])
- iodelay_pll.create_clkout(self.cd_iodelay, core_config["iodelay_clk_freq"])
+ iodelay_pll.create_clkout(
+ self.cd_iodelay, core_config["iodelay_clk_freq"])
self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_iodelay)
# LiteDRAMCoreControl ------------------------------------------------------------------------------
+
class LiteDRAMCoreControl(Module, AutoCSR):
def __init__(self):
- self.init_done = CSRStorage()
+ self.init_done = CSRStorage()
self.init_error = CSRStorage()
# LiteDRAMCore -------------------------------------------------------------------------------------
+
class LiteDRAMCore(SoCCore):
def __init__(self, platform, core_config, **kwargs):
platform.add_extension(get_common_ios())
# Parameters -------------------------------------------------------------------------------
- sys_clk_freq = core_config["sys_clk_freq"]
- cpu_type = core_config["cpu"]
- cpu_variant = core_config.get("cpu_variant", "standard")
+ sys_clk_freq = core_config["sys_clk_freq"]
+ cpu_type = core_config["cpu"]
+ cpu_variant = core_config.get("cpu_variant", "standard")
csr_alignment = core_config.get("csr_alignment", 32)
if cpu_type is None:
- kwargs["integrated_rom_size"] = 0
+ kwargs["integrated_rom_size"] = 0
kwargs["integrated_sram_size"] = 0
- kwargs["with_uart"] = False
- kwargs["with_timer"] = False
- kwargs["with_ctrl"] = False
+ kwargs["with_uart"] = False
+ kwargs["with_timer"] = False
+ kwargs["with_ctrl"] = False
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
- cpu_type = cpu_type,
- cpu_variant = cpu_variant,
- csr_alignment = csr_alignment,
- **kwargs)
+ cpu_type=cpu_type,
+ cpu_variant=cpu_variant,
+ csr_alignment=csr_alignment,
+ **kwargs)
# CRG --------------------------------------------------------------------------------------
if core_config["sdram_phy"] in [litedram_phys.ECP5DDRPHY]:
- self.submodules.crg = crg = LiteDRAMECP5DDRPHYCRG(platform, core_config)
+ self.submodules.crg = crg = LiteDRAMECP5DDRPHYCRG(
+ platform, core_config)
if core_config["sdram_phy"] in [litedram_phys.A7DDRPHY, litedram_phys.K7DDRPHY, litedram_phys.V7DDRPHY]:
self.submodules.crg = LiteDRAMS7DDRPHYCRG(platform, core_config)
# DRAM -------------------------------------------------------------------------------------
platform.add_extension(get_dram_ios(core_config))
# ECP5DDRPHY
- if core_config["sdram_phy"] in [litedram_phys.ECP5DDRPHY]:
+ if core_config["sdram_phy"] in [litedram_phys.ECP5DDRPHY]:
assert core_config["memtype"] in ["DDR3"]
self.submodules.ddrphy = core_config["sdram_phy"](
- pads = platform.request("ddram"),
- sys_clk_freq = sys_clk_freq)
+ pads=platform.request("ddram"),
+ sys_clk_freq=sys_clk_freq)
self.comb += crg.stop.eq(self.ddrphy.init.stop)
self.add_constant("ECP5DDRPHY")
sdram_module = core_config["sdram_module"](sys_clk_freq, "1:2")
if core_config["sdram_phy"] in [litedram_phys.A7DDRPHY, litedram_phys.K7DDRPHY, litedram_phys.V7DDRPHY]:
assert core_config["memtype"] in ["DDR2", "DDR3"]
self.submodules.ddrphy = core_config["sdram_phy"](
- pads = platform.request("ddram"),
- memtype = core_config["memtype"],
- nphases = 4 if core_config["memtype"] == "DDR3" else 2,
- sys_clk_freq = sys_clk_freq,
- iodelay_clk_freq = core_config["iodelay_clk_freq"],
- cmd_latency = core_config["cmd_latency"])
+ pads=platform.request("ddram"),
+ memtype=core_config["memtype"],
+ nphases=4 if core_config["memtype"] == "DDR3" else 2,
+ sys_clk_freq=sys_clk_freq,
+ iodelay_clk_freq=core_config["iodelay_clk_freq"],
+ cmd_latency=core_config["cmd_latency"])
self.add_constant("CMD_DELAY", core_config["cmd_delay"])
if core_config["memtype"] == "DDR3":
self.ddrphy.settings.add_electrical_settings(
- rtt_nom = core_config["rtt_nom"],
- rtt_wr = core_config["rtt_wr"],
- ron = core_config["ron"])
+ rtt_nom=core_config["rtt_nom"],
+ rtt_wr=core_config["rtt_wr"],
+ ron=core_config["ron"])
self.add_csr("ddrphy")
sdram_module = core_config["sdram_module"](sys_clk_freq,
- "1:4" if core_config["memtype"] == "DDR3" else "1:2")
+ "1:4" if core_config["memtype"] == "DDR3" else "1:2")
controller_settings = controller_settings = ControllerSettings(
cmd_buffer_depth=core_config["cmd_buffer_depth"])
self.add_sdram("sdram",
- phy = self.ddrphy,
- module = sdram_module,
- origin = self.mem_map["main_ram"],
- size = 0x01000000, # Only expose 16MB to the CPU, enough for Init/Calib.
- with_soc_interconnect = cpu_type is not None,
- l2_cache_size = 0,
- l2_cache_min_data_width = 0,
- controller_settings = controller_settings,
- )
+ phy=self.ddrphy,
+ module=sdram_module,
+ origin=self.mem_map["main_ram"],
+ # Only expose 16MB to the CPU, enough for Init/Calib.
+ size=0x01000000,
+ with_soc_interconnect=cpu_type is not None,
+ l2_cache_size=0,
+ l2_cache_min_data_width=0,
+ controller_settings=controller_settings,
+ )
# DRAM Control/Status ----------------------------------------------------------------------
# Expose calibration status to user.
self.submodules.ddrctrl = LiteDRAMCoreControl()
self.add_csr("ddrctrl")
- self.comb += platform.request("init_done").eq(self.ddrctrl.init_done.storage)
- self.comb += platform.request("init_error").eq(self.ddrctrl.init_error.storage)
+ self.comb += platform.request("init_done").eq(
+ self.ddrctrl.init_done.storage)
+ self.comb += platform.request("init_error").eq(
+ self.ddrctrl.init_error.storage)
# If no CPU, expose a bus control interface to user.
if cpu_type is None:
wb_bus = wishbone.Interface()
if port["type"] == "native":
user_port = self.sdram.crossbar.get_port()
platform.add_extension(get_native_user_port_ios(name,
- user_port.address_width,
- user_port.data_width))
+ user_port.address_width,
+ user_port.data_width))
_user_port_io = platform.request("user_port_{}".format(name))
self.comb += [
# cmd
wishbone2native = LiteDRAMWishbone2Native(wb_port, user_port)
self.submodules += wishbone2native
platform.add_extension(get_wishbone_user_port_ios(name,
- len(wb_port.adr),
- len(wb_port.dat_w)))
+ len(wb_port.adr),
+ len(wb_port.dat_w)))
_wb_port_io = platform.request("user_port_{}".format(name))
self.comb += [
wb_port.adr.eq(_wb_port_io.adr),
# AXI ----------------------------------------------------------------------------------
elif port["type"] == "axi":
user_port = self.sdram.crossbar.get_port()
- axi_port = LiteDRAMAXIPort(
+ axi_port = LiteDRAMAXIPort(
user_port.data_width,
- user_port.address_width + log2_int(user_port.data_width//8),
+ user_port.address_width +
+ log2_int(user_port.data_width//8),
port["id_width"])
axi2native = LiteDRAMAXI2Native(axi_port, user_port)
self.submodules += axi2native
platform.add_extension(get_axi_user_port_ios(name,
- axi_port.address_width,
- axi_port.data_width,
- port["id_width"]))
+ axi_port.address_width,
+ axi_port.data_width,
+ port["id_width"]))
_axi_port_io = platform.request("user_port_{}".format(name))
self.comb += [
# aw
]
# FIFO ---------------------------------------------------------------------------------
elif port["type"] == "fifo":
- platform.add_extension(get_fifo_user_port_ios(name, user_port.data_width))
+ platform.add_extension(
+ get_fifo_user_port_ios(name, user_port.data_width))
_user_fifo_io = platform.request("user_fifo_{}".format(name))
fifo = LiteDRAMFIFO(
- data_width = user_port.data_width,
- base = port["base"],
- depth = port["depth"],
- write_port = self.sdram.crossbar.get_port("write"),
- write_threshold = port["depth"] - 32, # FIXME
- read_port = self.sdram.crossbar.get_port("read"),
- read_threshold = 32 # FIXME
+ data_width=user_port.data_width,
+ base=port["base"],
+ depth=port["depth"],
+ write_port=self.sdram.crossbar.get_port("write"),
+ write_threshold=port["depth"] - 32, # FIXME
+ read_port=self.sdram.crossbar.get_port("read"),
+ read_threshold=32 # FIXME
)
self.submodules += fifo
self.comb += [
_user_fifo_io.out_data.eq(fifo.source.data),
]
else:
- raise ValueError("Unsupported port type: {}".format(port["type"]))
+ raise ValueError(
+ "Unsupported port type: {}".format(port["type"]))
# Build --------------------------------------------------------------------------------------------
+
def main():
- parser = argparse.ArgumentParser(description="LiteDRAM standalone core generator")
+ parser = argparse.ArgumentParser(
+ description="LiteDRAM standalone core generator")
builder_args(parser)
parser.set_defaults(output_dir="build")
parser.add_argument("config", help="YAML config file")
# Generate core --------------------------------------------------------------------------------
if core_config["sdram_phy"] in [litedram_phys.ECP5DDRPHY]:
- platform = LatticePlatform("LFE5UM5G-45F-8BG381C", io=[], toolchain="trellis") # FIXME: allow other devices.
+ # FIXME: allow other devices.
+ platform = LatticePlatform(
+ "LFE5UM5G-45F-8BG381C", io=[], toolchain="trellis")
elif core_config["sdram_phy"] in [litedram_phys.A7DDRPHY, litedram_phys.K7DDRPHY, litedram_phys.V7DDRPHY]:
platform = XilinxPlatform("", io=[], toolchain="vivado")
else:
- raise ValueError("Unsupported SDRAM PHY: {}".format(core_config["sdram_phy"]))
+ raise ValueError("Unsupported SDRAM PHY: {}".format(
+ core_config["sdram_phy"]))
builder_arguments = builder_argdict(args)
builder_arguments["compile_gateware"] = False
- soc = LiteDRAMCore(platform, core_config, integrated_rom_size=0x6000)
- builder = Builder(soc, **builder_arguments)
- vns = builder.build(build_name="litedram_core", regular_comb=False)
+ soc = LiteDRAMCore(platform, core_config, integrated_rom_size=0x6000)
+ builder = Builder(soc, **builder_arguments)
+ vns = builder.build(build_name="litedram_core", regular_comb=False)
if soc.cpu_type is not None:
init_filename = "mem.init"
os.path.join(builder.gateware_dir, init_filename),
os.path.join(builder.gateware_dir, "litedram_core.init"),
))
- replace_in_file(os.path.join(builder.gateware_dir, "litedram_core.v"), init_filename, "litedram_core.init")
+ replace_in_file(os.path.join(builder.gateware_dir,
+ "litedram_core.v"), init_filename, "litedram_core.init")
+
if __name__ == "__main__":
main()
# SDR ----------------------------------------------------------------------------------------------
+
def get_sdr_phy_init_sequence(phy_settings, timing_settings):
cl = phy_settings.cl
bl = 1
init_sequence = [
("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
- ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
+ ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(
+ cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
- ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200)
+ ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl,
+ bl), mr, 0, cmds["MODE_REGISTER"], 200)
]
return init_sequence, None
# DDR ----------------------------------------------------------------------------------------------
+
def get_ddr_phy_init_sequence(phy_settings, timing_settings):
- cl = phy_settings.cl
- bl = 4
- mr = log2_int(bl) + (cl << 4)
+ cl = phy_settings.cl
+ bl = 4
+ mr = log2_int(bl) + (cl << 4)
emr = 0
reset_dll = 1 << 8
("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Load Extended Mode Register", emr, 1, cmds["MODE_REGISTER"], 0),
- ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
+ ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(
+ cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
- ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200)
+ ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl,
+ bl), mr, 0, cmds["MODE_REGISTER"], 200)
]
return init_sequence, None
# LPDDR --------------------------------------------------------------------------------------------
+
def get_lpddr_phy_init_sequence(phy_settings, timing_settings):
- cl = phy_settings.cl
- bl = 4
- mr = log2_int(bl) + (cl << 4)
+ cl = phy_settings.cl
+ bl = 4
+ mr = log2_int(bl) + (cl << 4)
emr = 0
reset_dll = 1 << 8
("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Load Extended Mode Register", emr, 2, cmds["MODE_REGISTER"], 0),
- ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
+ ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(
+ cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
- ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200)
+ ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl,
+ bl), mr, 0, cmds["MODE_REGISTER"], 200)
]
return init_sequence, None
# DDR2 ---------------------------------------------------------------------------------------------
+
def get_ddr2_phy_init_sequence(phy_settings, timing_settings):
- cl = phy_settings.cl
- bl = 4
- wr = 2
- mr = log2_int(bl) + (cl << 4) + (wr << 9)
- emr = 0
+ cl = phy_settings.cl
+ bl = 4
+ wr = 2
+ mr = log2_int(bl) + (cl << 4) + (wr << 9)
+ emr = 0
emr2 = 0
emr3 = 0
- ocd = 7 << 7
+ ocd = 7 << 7
reset_dll = 1 << 8
init_sequence = [
("Load Extended Mode Register 3", emr3, 3, cmds["MODE_REGISTER"], 0),
("Load Extended Mode Register 2", emr2, 2, cmds["MODE_REGISTER"], 0),
("Load Extended Mode Register", emr, 1, cmds["MODE_REGISTER"], 0),
- ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
+ ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(
+ cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
- ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200),
- ("Load Extended Mode Register / OCD Default", emr+ocd, 1, cmds["MODE_REGISTER"], 0),
- ("Load Extended Mode Register / OCD Exit", emr, 1, cmds["MODE_REGISTER"], 0),
+ ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl,
+ bl), mr, 0, cmds["MODE_REGISTER"], 200),
+ ("Load Extended Mode Register / OCD Default",
+ emr+ocd, 1, cmds["MODE_REGISTER"], 0),
+ ("Load Extended Mode Register / OCD Exit",
+ emr, 1, cmds["MODE_REGISTER"], 0),
]
return init_sequence, None
# DDR3 ---------------------------------------------------------------------------------------------
+
def get_ddr3_phy_init_sequence(phy_settings, timing_settings):
- cl = phy_settings.cl
- bl = 8
+ cl = phy_settings.cl
+ bl = 8
cwl = phy_settings.cwl
def format_mr0(bl, cl, wr, dll_reset):
8: 0b00
}
cl_to_mr0 = {
- 5: 0b0010,
- 6: 0b0100,
- 7: 0b0110,
- 8: 0b1000,
- 9: 0b1010,
+ 5: 0b0010,
+ 6: 0b0100,
+ 7: 0b0110,
+ 8: 0b1000,
+ 9: 0b1010,
10: 0b1100,
11: 0b1110,
12: 0b0001,
}
wr_to_mr0 = {
16: 0b000,
- 5: 0b001,
- 6: 0b010,
- 7: 0b011,
- 8: 0b100,
+ 5: 0b001,
+ 6: 0b010,
+ 7: 0b011,
+ 8: 0b100,
10: 0b101,
12: 0b110,
14: 0b111
return mr2
z_to_rtt_nom = {
- "disabled" : 0,
- "60ohm" : 1,
- "120ohm" : 2,
- "40ohm" : 3,
- "20ohm" : 4,
- "30ohm" : 5
+ "disabled": 0,
+ "60ohm": 1,
+ "120ohm": 2,
+ "40ohm": 3,
+ "20ohm": 4,
+ "30ohm": 5
}
z_to_rtt_wr = {
- "disabled" : 0,
- "60ohm" : 1,
- "120ohm" : 2,
+ "disabled": 0,
+ "60ohm": 1,
+ "120ohm": 2,
}
z_to_ron = {
- "40ohm" : 0,
- "34ohm" : 1,
+ "40ohm": 0,
+ "34ohm": 1,
}
# default electrical settings (point to point)
rtt_nom = "60ohm"
- rtt_wr = "60ohm"
- ron = "34ohm"
+ rtt_wr = "60ohm"
+ ron = "34ohm"
# override electrical settings if specified
if hasattr(phy_settings, "rtt_nom"):
if hasattr(phy_settings, "ron"):
ron = phy_settings.ron
- wr = max(timing_settings.tWTR*phy_settings.nphases, 5) # >= ceiling(tWR/tCK)
+ # >= ceiling(tWR/tCK)
+ wr = max(timing_settings.tWTR*phy_settings.nphases, 5)
mr0 = format_mr0(bl, cl, wr, 1)
mr1 = format_mr1(z_to_ron[ron], z_to_rtt_nom[rtt_nom])
mr2 = format_mr2(cwl, z_to_rtt_wr[rtt_wr])
init_sequence = [
("Release reset", 0x0000, 0, cmds["UNRESET"], 50000),
("Bring CKE high", 0x0000, 0, cmds["CKE"], 10000),
- ("Load Mode Register 2, CWL={0:d}".format(cwl), mr2, 2, cmds["MODE_REGISTER"], 0),
+ ("Load Mode Register 2, CWL={0:d}".format(
+ cwl), mr2, 2, cmds["MODE_REGISTER"], 0),
("Load Mode Register 3", mr3, 3, cmds["MODE_REGISTER"], 0),
("Load Mode Register 1", mr1, 1, cmds["MODE_REGISTER"], 0),
- ("Load Mode Register 0, CL={0:d}, BL={1:d}".format(cl, bl), mr0, 0, cmds["MODE_REGISTER"], 200),
+ ("Load Mode Register 0, CL={0:d}, BL={1:d}".format(
+ cl, bl), mr0, 0, cmds["MODE_REGISTER"], 200),
("ZQ Calibration", 0x0400, 0, "DFII_COMMAND_WE|DFII_COMMAND_CS", 200),
]
# DDR4 ---------------------------------------------------------------------------------------------
+
def get_ddr4_phy_init_sequence(phy_settings, timing_settings):
- cl = phy_settings.cl
- bl = 8
+ cl = phy_settings.cl
+ bl = 8
cwl = phy_settings.cwl
def format_mr0(bl, cl, wr, dll_reset):
8: 0b00
}
cl_to_mr0 = {
- 9: 0b00000,
+ 9: 0b00000,
10: 0b00001,
11: 0b00010,
12: 0b00011,
def format_mr2(cwl, rtt_wr):
cwl_to_mr2 = {
- 9: 0b000,
+ 9: 0b000,
10: 0b001,
11: 0b010,
12: 0b011,
return mr6
z_to_rtt_nom = {
- "disabled" : 0b000,
- "60ohm" : 0b001,
- "120ohm" : 0b010,
- "40ohm" : 0b011,
- "240ohm" : 0b100,
- "48ohm" : 0b101,
- "80ohm" : 0b110,
- "34ohm" : 0b111
+ "disabled": 0b000,
+ "60ohm": 0b001,
+ "120ohm": 0b010,
+ "40ohm": 0b011,
+ "240ohm": 0b100,
+ "48ohm": 0b101,
+ "80ohm": 0b110,
+ "34ohm": 0b111
}
z_to_rtt_wr = {
- "disabled" : 0b000,
- "120ohm" : 0b001,
- "240ohm" : 0b010,
- "high-z" : 0b011,
- "80ohm" : 0b100,
+ "disabled": 0b000,
+ "120ohm": 0b001,
+ "240ohm": 0b010,
+ "high-z": 0b011,
+ "80ohm": 0b100,
}
z_to_ron = {
- "34ohm" : 0b00,
- "48ohm" : 0b01,
+ "34ohm": 0b00,
+ "48ohm": 0b01,
}
# default electrical settings (point to point)
rtt_nom = "40ohm"
- rtt_wr = "120ohm"
- ron = "34ohm"
+ rtt_wr = "120ohm"
+ ron = "34ohm"
# override electrical settings if specified
if hasattr(phy_settings, "rtt_nom"):
if hasattr(phy_settings, "ron"):
ron = phy_settings.ron
- wr = max(timing_settings.tWTR*phy_settings.nphases, 10) # >= ceiling(tWR/tCK)
+ # >= ceiling(tWR/tCK)
+ wr = max(timing_settings.tWTR*phy_settings.nphases, 10)
mr0 = format_mr0(bl, cl, wr, 1)
mr1 = format_mr1(1, z_to_ron[ron], z_to_rtt_nom[rtt_nom])
mr2 = format_mr2(cwl, z_to_rtt_wr[rtt_wr])
mr3 = format_mr3(timing_settings.fine_refresh_mode)
mr4 = 0
mr5 = 0
- mr6 = format_mr6(4) # FIXME: tCCD
+ mr6 = format_mr6(4) # FIXME: tCCD
rdimm_init = []
if phy_settings.is_rdimm:
return 7
else:
for f, speed in f_to_coarse_speed.items():
- if tck >= 2/f:
- return speed
+ if tck >= 2/f:
+ return speed
raise ValueError
+
def get_fine_speed(tck):
# JESD82-31A page 83
freq = 2/tck
fine_speed = min(fine_speed, 0b1100001)
return fine_speed
- coarse_speed = get_coarse_speed(phy_settings.tck, phy_settings.rcd_pll_bypass)
+ coarse_speed = get_coarse_speed(
+ phy_settings.tck, phy_settings.rcd_pll_bypass)
fine_speed = get_fine_speed(phy_settings.tck)
- rcd_reset = 0x060 | 0x0 # F0RC06: command space control; 0: reset RCD
+ # F0RC06: command space control; 0: reset RCD
+ rcd_reset = 0x060 | 0x0
f0rc0f = 0x0F0 | 0x4 # F0RC05: 0 nCK latency adder
f0rc04 = 0x040 | phy_settings.rcd_odt_cke_drive # F0RC04: ODT/CKE drive strength
f0rc05 = 0x050 | phy_settings.rcd_clk_drive # F0RC04: ODT/CKE drive strength
- f0rc0a = 0x0A0 | coarse_speed # F0RC0A: coarse speed selection and PLL bypass
+ # F0RC0A: coarse speed selection and PLL bypass
+ f0rc0a = 0x0A0 | coarse_speed
f0rc3x = 0x300 | fine_speed # F0RC3x: fine speed selection
rdimm_init = [
("Load Mode Register 6", mr6, 6, cmds["MODE_REGISTER"], 0),
("Load Mode Register 5", mr5, 5, cmds["MODE_REGISTER"], 0),
("Load Mode Register 4", mr4, 4, cmds["MODE_REGISTER"], 0),
- ("Load Mode Register 2, CWL={0:d}".format(cwl), mr2, 2, cmds["MODE_REGISTER"], 0),
+ ("Load Mode Register 2, CWL={0:d}".format(
+ cwl), mr2, 2, cmds["MODE_REGISTER"], 0),
("Load Mode Register 1", mr1, 1, cmds["MODE_REGISTER"], 0),
- ("Load Mode Register 0, CL={0:d}, BL={1:d}".format(cl, bl), mr0, 0, cmds["MODE_REGISTER"], 200),
+ ("Load Mode Register 0, CL={0:d}, BL={1:d}".format(
+ cl, bl), mr0, 0, cmds["MODE_REGISTER"], 200),
("ZQ Calibration", 0x0400, 0, "DFII_COMMAND_WE|DFII_COMMAND_CS", 200),
]
# Init Sequence ------------------------------------------------------------------------------------
+
def get_sdram_phy_init_sequence(phy_settings, timing_settings):
return {
- "SDR" : get_sdr_phy_init_sequence,
- "DDR" : get_ddr_phy_init_sequence,
+ "SDR": get_sdr_phy_init_sequence,
+ "DDR": get_ddr_phy_init_sequence,
"LPDDR": get_lpddr_phy_init_sequence,
- "DDR2" : get_ddr2_phy_init_sequence,
- "DDR3" : get_ddr3_phy_init_sequence,
- "DDR4" : get_ddr4_phy_init_sequence,
+ "DDR2": get_ddr2_phy_init_sequence,
+ "DDR3": get_ddr3_phy_init_sequence,
+ "DDR4": get_ddr4_phy_init_sequence,
}[phy_settings.memtype](phy_settings, timing_settings)
# C Header -----------------------------------------------------------------------------------------
+
def get_sdram_phy_c_header(phy_settings, timing_settings):
r = "#ifndef __GENERATED_SDRAM_PHY_H\n#define __GENERATED_SDRAM_PHY_H\n"
r += "#include <hw/common.h>\n"
r += "#define DFII_PIX_DATA_SIZE CSR_SDRAM_DFII_PI0_WRDATA_SIZE\n"
sdram_dfii_pix_wrdata_addr = []
for n in range(nphases):
- sdram_dfii_pix_wrdata_addr.append("CSR_SDRAM_DFII_PI{n}_WRDATA_ADDR".format(n=n))
+ sdram_dfii_pix_wrdata_addr.append(
+ "CSR_SDRAM_DFII_PI{n}_WRDATA_ADDR".format(n=n))
r += """
const unsigned long sdram_dfii_pix_wrdata_addr[SDRAM_PHY_PHASES] = {{
\t{sdram_dfii_pix_wrdata_addr}
sdram_dfii_pix_rddata_addr = []
for n in range(nphases):
- sdram_dfii_pix_rddata_addr.append("CSR_SDRAM_DFII_PI{n}_RDDATA_ADDR".format(n=n))
+ sdram_dfii_pix_rddata_addr.append(
+ "CSR_SDRAM_DFII_PI{n}_RDDATA_ADDR".format(n=n))
r += """
const unsigned long sdram_dfii_pix_rddata_addr[SDRAM_PHY_PHASES] = {{
\t{sdram_dfii_pix_rddata_addr}
""".format(sdram_dfii_pix_rddata_addr=",\n\t".join(sdram_dfii_pix_rddata_addr))
r += "\n"
- init_sequence, mr1 = get_sdram_phy_init_sequence(phy_settings, timing_settings)
+ init_sequence, mr1 = get_sdram_phy_init_sequence(
+ phy_settings, timing_settings)
if phy_settings.memtype in ["DDR3", "DDR4"]:
# the value of MR1 needs to be modified during write leveling
for a_inv, ba_inv in invert_masks:
r += "\t/* {0} */\n".format(comment)
r += "\tsdram_dfii_pi0_address_write({0:#x});\n".format(a ^ a_inv)
- r += "\tsdram_dfii_pi0_baddress_write({0:d});\n".format(ba ^ ba_inv)
+ r += "\tsdram_dfii_pi0_baddress_write({0:d});\n".format(
+ ba ^ ba_inv)
if cmd[:12] == "DFII_CONTROL":
r += "\tsdram_dfii_control_write({0});\n".format(cmd)
else:
# Python Header ------------------------------------------------------------------------------------
+
def get_sdram_phy_py_header(phy_settings, timing_settings):
r = ""
r += "dfii_control_sel = 0x01\n"
r += "dfii_command_rddata = 0x20\n"
r += "\n"
- init_sequence, mr1 = get_sdram_phy_init_sequence(phy_settings, timing_settings)
+ init_sequence, mr1 = get_sdram_phy_init_sequence(
+ phy_settings, timing_settings)
if mr1 is not None:
r += "ddrx_mr1 = 0x{:x}\n".format(mr1)
_technology_timings = ["tREFI", "tWTR", "tCCD", "tRRD", "tZQCS"]
+
class _TechnologyTimings(Settings):
def __init__(self, tREFI, tWTR, tCCD, tRRD, tZQCS=None):
self.set_attributes(locals())
_speedgrade_timings = ["tRP", "tRCD", "tWR", "tRFC", "tFAW", "tRAS"]
+
class _SpeedgradeTimings(Settings):
def __init__(self, tRP, tRCD, tWR, tRFC, tFAW, tRAS):
self.set_attributes(locals())
# SPD ----------------------------------------------------------------------------------------------
+
def _read_field(byte, nbits, shift):
mask = 2**nbits - 1
return (byte & (mask << shift)) >> shift
+
def _twos_complement(value, nbits):
if value & (1 << (nbits - 1)):
value -= (1 << nbits)
return value
+
def _word(msb, lsb):
return (msb << 8) | lsb
return _read_field(byte, nbits=4, shift=0)
b = spd_data
- tck_min = self.txx_ns(mtb=b[12], ftb=b[34])
- taa_min = self.txx_ns(mtb=b[16], ftb=b[35])
- twr_min = self.txx_ns(mtb=b[17])
+ tck_min = self.txx_ns(mtb=b[12], ftb=b[34])
+ taa_min = self.txx_ns(mtb=b[16], ftb=b[35])
+ twr_min = self.txx_ns(mtb=b[17])
trcd_min = self.txx_ns(mtb=b[18], ftb=b[36])
trrd_min = self.txx_ns(mtb=b[19])
- trp_min = self.txx_ns(mtb=b[20], ftb=b[37])
+ trp_min = self.txx_ns(mtb=b[20], ftb=b[37])
tras_min = self.txx_ns(mtb=_word(lsn(b[21]), b[22]))
- trc_min = self.txx_ns(mtb=_word(msn(b[21]), b[23]), ftb=b[38])
+ trc_min = self.txx_ns(mtb=_word(msn(b[21]), b[23]), ftb=b[38])
trfc_min = self.txx_ns(mtb=_word(b[25], b[24]))
twtr_min = self.txx_ns(mtb=b[26])
trtp_min = self.txx_ns(mtb=b[27])
tfaw_min = self.txx_ns(mtb=_word(lsn(b[28]), b[29]))
technology_timings = _TechnologyTimings(
- tREFI = 64e6/8192, # 64ms/8192ops
- tWTR = (4, twtr_min), # min 4 cycles
- tCCD = (4, None), # min 4 cycles
- tRRD = (4, trrd_min), # min 4 cycles
- tZQCS = (64, 80),
+ tREFI=64e6/8192, # 64ms/8192ops
+ tWTR=(4, twtr_min), # min 4 cycles
+ tCCD=(4, None), # min 4 cycles
+ tRRD=(4, trrd_min), # min 4 cycles
+ tZQCS=(64, 80),
)
speedgrade_timings = _SpeedgradeTimings(
- tRP = trp_min,
- tRCD = trcd_min,
- tWR = twr_min,
- tRFC = (None, trfc_min),
- tFAW = (None, tfaw_min),
- tRAS = tras_min,
+ tRP=trp_min,
+ tRCD=trcd_min,
+ tWR=twr_min,
+ tRFC=(None, trfc_min),
+ tFAW=(None, tfaw_min),
+ tRAS=tras_min,
)
self.speedgrade = str(self.speedgrade_freq(tck_min))
# All the DDR3 timings are defined in the units of "timebase", which
# consists of medium timebase (nanosec) and fine timebase (picosec).
fine_timebase_dividend = _read_field(data[9], nbits=4, shift=4)
- fine_timebase_divisor = _read_field(data[9], nbits=4, shift=0)
+ fine_timebase_divisor = _read_field(data[9], nbits=4, shift=0)
fine_timebase_ps = fine_timebase_dividend / fine_timebase_divisor
self.fine_timebase_ns = fine_timebase_ps * 1e-3
medium_timebase_dividend = data[10]
- medium_timebase_divisor = data[11]
+ medium_timebase_divisor = data[11]
self.medium_timebase_ns = medium_timebase_dividend / medium_timebase_divisor
def txx_ns(self, mtb, ftb=0):
raise ValueError("Transfer rate = {:.2f} does not correspond to any DDR3 speedgrade"
.format(freq_mhz))
+
def parse_spd_hexdump(filename):
"""Parse data dumped using the `spdread` command in LiteX BIOS
# SDRAMModule --------------------------------------------------------------------------------------
+
class SDRAMModule:
"""SDRAM module geometry and timings.
various speedgrades.
"""
registered = False
+
def __init__(self, clk_freq, rate, speedgrade=None, fine_refresh_mode=None):
- self.clk_freq = clk_freq
- self.rate = rate
- self.speedgrade = speedgrade
+ self.clk_freq = clk_freq
+ self.rate = rate
+ self.speedgrade = speedgrade
self.geom_settings = GeomSettings(
- bankbits = log2_int(self.nbanks),
- rowbits = log2_int(self.nrows),
- colbits = log2_int(self.ncols),
+ bankbits=log2_int(self.nbanks),
+ rowbits=log2_int(self.nrows),
+ colbits=log2_int(self.ncols),
)
assert not (self.memtype != "DDR4" and fine_refresh_mode != None)
assert fine_refresh_mode in [None, "1x", "2x", "4x"]
if (fine_refresh_mode is None) and (self.memtype == "DDR4"):
fine_refresh_mode = "1x"
self.timing_settings = TimingSettings(
- tRP = self.ns_to_cycles(self.get("tRP")),
- tRCD = self.ns_to_cycles(self.get("tRCD")),
- tWR = self.ns_to_cycles(self.get("tWR")),
- tREFI = self.ns_to_cycles(self.get("tREFI", fine_refresh_mode), False),
- tRFC = self.ck_ns_to_cycles(*self.get("tRFC", fine_refresh_mode)),
- tWTR = self.ck_ns_to_cycles(*self.get("tWTR")),
- tFAW = None if self.get("tFAW") is None else self.ck_ns_to_cycles(*self.get("tFAW")),
- tCCD = None if self.get("tCCD") is None else self.ck_ns_to_cycles(*self.get("tCCD")),
- tRRD = None if self.get("tRRD") is None else self.ck_ns_to_cycles(*self.get("tRRD")),
- tRC = None if self.get("tRAS") is None else self.ns_to_cycles(self.get("tRP") + self.get("tRAS")),
- tRAS = None if self.get("tRAS") is None else self.ns_to_cycles(self.get("tRAS")),
- tZQCS = None if self.get("tZQCS") is None else self.ck_ns_to_cycles(*self.get("tZQCS"))
+ tRP=self.ns_to_cycles(self.get("tRP")),
+ tRCD=self.ns_to_cycles(self.get("tRCD")),
+ tWR=self.ns_to_cycles(self.get("tWR")),
+ tREFI=self.ns_to_cycles(
+ self.get("tREFI", fine_refresh_mode), False),
+ tRFC=self.ck_ns_to_cycles(*self.get("tRFC", fine_refresh_mode)),
+ tWTR=self.ck_ns_to_cycles(*self.get("tWTR")),
+ tFAW=None if self.get("tFAW") is None else self.ck_ns_to_cycles(
+ *self.get("tFAW")),
+ tCCD=None if self.get("tCCD") is None else self.ck_ns_to_cycles(
+ *self.get("tCCD")),
+ tRRD=None if self.get("tRRD") is None else self.ck_ns_to_cycles(
+ *self.get("tRRD")),
+ tRC=None if self.get("tRAS") is None else self.ns_to_cycles(
+ self.get("tRP") + self.get("tRAS")),
+ tRAS=None if self.get(
+ "tRAS") is None else self.ns_to_cycles(self.get("tRAS")),
+ tZQCS=None if self.get(
+ "tZQCS") is None else self.ck_ns_to_cycles(*self.get("tZQCS"))
)
self.timing_settings.fine_refresh_mode = fine_refresh_mode
clk_period_ns = 1e9/self.clk_freq
if margin:
margins = {
- "1:1" : 0,
- "1:2" : clk_period_ns/2,
- "1:4" : 3*clk_period_ns/4
+ "1:1": 0,
+ "1:2": clk_period_ns/2,
+ "1:4": 3*clk_period_ns/4
}
t += margins[self.rate]
return ceil(t/clk_period_ns)
def ck_to_cycles(self, c):
d = {
- "1:1" : 1,
- "1:2" : 2,
- "1:4" : 4
+ "1:1": 1,
+ "1:2": 2,
+ "1:4": 4
}
return ceil(c/d[self.rate])
rate = "1:{}".format(nphases)
return _SDRAMModule(clk_freq,
- rate = rate,
- speedgrade = spd.speedgrade,
- fine_refresh_mode = fine_refresh_mode)
+ rate=rate,
+ speedgrade=spd.speedgrade,
+ fine_refresh_mode=fine_refresh_mode)
+
-class SDRAMRegisteredModule(SDRAMModule): registered = True
+class SDRAMRegisteredModule(SDRAMModule):
+ registered = True
# SDR ----------------------------------------------------------------------------------------------
-class SDRModule(SDRAMModule): memtype = "SDR"
-class SDRRegisteredModule(SDRAMRegisteredModule): memtype = "SDR"
+
+class SDRModule(SDRAMModule):
+ memtype = "SDR"
+
+
+class SDRRegisteredModule(SDRAMRegisteredModule):
+ memtype = "SDR"
+
class IS42S16160(SDRModule):
# geometry
nbanks = 4
- nrows = 8192
- ncols = 512
+ nrows = 8192
+ ncols = 512
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=20, tRCD=20, tWR=20, tRFC=(None, 70), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=20, tRCD=20, tWR=20, tRFC=(None, 70), tFAW=None, tRAS=None)}
+
class IS42S16320(SDRModule):
# geometry
nbanks = 4
- nrows = 8192
- ncols = 1024
+ nrows = 8192
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=20, tRCD=20, tWR=20, tRFC=(None, 70), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=20, tRCD=20, tWR=20, tRFC=(None, 70), tFAW=None, tRAS=None)}
+
class MT48LC4M16(SDRModule):
# geometry
nbanks = 4
- nrows = 4096
- ncols = 256
+ nrows = 4096
+ ncols = 256
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=14, tRFC=(None, 66), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=15, tRCD=15, tWR=14, tRFC=(None, 66), tFAW=None, tRAS=None)}
+
class MT48LC16M16(SDRModule):
# geometry
nbanks = 4
- nrows = 8192
- ncols = 512
+ nrows = 8192
+ ncols = 512
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 15))
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=20, tRCD=20, tWR=15, tRFC=(None, 66), tFAW=None, tRAS=44)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 15))
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=20, tRCD=20, tWR=15, tRFC=(None, 66), tFAW=None, tRAS=44)}
+
class AS4C16M16(SDRModule):
# geometry
nbanks = 4
- nrows = 8192
- ncols = 512
+ nrows = 8192
+ ncols = 512
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=18, tRCD=18, tWR=12, tRFC=(None, 60), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=18, tRCD=18, tWR=12, tRFC=(None, 60), tFAW=None, tRAS=None)}
+
class AS4C32M16(SDRModule):
# geometry
nbanks = 4
- nrows = 8192
- ncols = 1024
+ nrows = 8192
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=18, tRCD=18, tWR=12, tRFC=(None, 60), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=18, tRCD=18, tWR=12, tRFC=(None, 60), tFAW=None, tRAS=None)}
+
class AS4C32M8(SDRModule):
# geometry
nbanks = 4
- nrows = 8192
- ncols = 1024
+ nrows = 8192
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 15))
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=20, tRCD=20, tWR=15, tRFC=(None, 66), tFAW=None, tRAS=44)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 15))
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=20, tRCD=20, tWR=15, tRFC=(None, 66), tFAW=None, tRAS=44)}
+
class M12L64322A(SDRModule):
# geometry
nbanks = 4
- nrows = 2048
- ncols = 256
+ nrows = 2048
+ ncols = 256
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/4096, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 10))
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 55), tFAW=None, tRAS=40)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/4096, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 10))
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=15, tRCD=15, tWR=15, tRFC=(None, 55), tFAW=None, tRAS=40)}
+
class M12L16161A(SDRModule):
# geometry
nbanks = 2
- nrows = 2048
- ncols = 256
+ nrows = 2048
+ ncols = 256
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/4096, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 10))
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 55), tFAW=None, tRAS=40)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/4096, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 10))
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=15, tRCD=15, tWR=15, tRFC=(None, 55), tFAW=None, tRAS=40)}
# DDR ----------------------------------------------------------------------------------------------
-class DDRModule(SDRAMModule): memtype = "DDR"
-class DDRRegisteredModule(SDRAMRegisteredModule): memtype = "DDR"
+
+class DDRModule(SDRAMModule):
+ memtype = "DDR"
+
+
+class DDRRegisteredModule(SDRAMRegisteredModule):
+ memtype = "DDR"
+
class MT46V32M16(SDRAMModule):
memtype = "DDR"
# geometry
nbanks = 4
- nrows = 8192
- ncols = 1024
+ nrows = 8192
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 70), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=15, tRCD=15, tWR=15, tRFC=(None, 70), tFAW=None, tRAS=None)}
# LPDDR --------------------------------------------------------------------------------------------
-class LPDDRModule(SDRAMModule): memtype = "LPDDR"
-class LPDDRRegisteredModule(SDRAMRegisteredModule): memtype = "LPDDR"
+
+class LPDDRModule(SDRAMModule):
+ memtype = "LPDDR"
+
+
+class LPDDRRegisteredModule(SDRAMRegisteredModule):
+ memtype = "LPDDR"
+
class MT46H32M16(LPDDRModule):
# geometry
nbanks = 4
- nrows = 8192
- ncols = 1024
+ nrows = 8192
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 72), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=15, tRCD=15, tWR=15, tRFC=(None, 72), tFAW=None, tRAS=None)}
+
class MT46H32M32(LPDDRModule):
# geometry
nbanks = 4
- nrows = 8192
- ncols = 1024
+ nrows = 8192
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 72), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=15, tRCD=15, tWR=15, tRFC=(None, 72), tFAW=None, tRAS=None)}
# DDR2 ---------------------------------------------------------------------------------------------
-class DDR2Module(SDRAMModule): memtype = "DDR2"
-class DDR2RegisteredModule(SDRAMRegisteredModule): memtype = "DDR2"
+
+class DDR2Module(SDRAMModule):
+ memtype = "DDR2"
+
+
+class DDR2RegisteredModule(SDRAMRegisteredModule):
+ memtype = "DDR2"
+
class MT47H128M8(DDR2Module):
memtype = "DDR2"
# geometry
nbanks = 8
- nrows = 16384
- ncols = 1024
+ nrows = 16384
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=15, tRCD=15, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+
class MT47H32M16(DDR2Module):
memtype = "DDR2"
# geometry
nbanks = 4
- nrows = 8192
- ncols = 1024
+ nrows = 8192
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=15, tRCD=15, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+
class MT47H64M16(DDR2Module):
memtype = "DDR2"
# geometry
nbanks = 8
- nrows = 8192
- ncols = 1024
+ nrows = 8192
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=15, tRCD=15, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+
class P3R1GE4JGF(DDR2Module):
memtype = "DDR2"
# geometry
nbanks = 8
- nrows = 8192
- ncols = 1024
+ nrows = 8192
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
- speedgrade_timings = {"default": _SpeedgradeTimings(tRP=12.5, tRCD=12.5, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
+ speedgrade_timings = {"default": _SpeedgradeTimings(
+ tRP=12.5, tRCD=12.5, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
# DDR3 (Chips) -------------------------------------------------------------------------------------
-class DDR3Module(SDRAMModule): memtype = "DDR3"
-class DDR3RegisteredModule(SDRAMRegisteredModule): memtype = "DDR3"
+
+class DDR3Module(SDRAMModule):
+ memtype = "DDR3"
+
+
+class DDR3RegisteredModule(SDRAMRegisteredModule):
+ memtype = "DDR3"
+
class MT41K64M16(DDR3Module):
memtype = "DDR3"
# geometry
nbanks = 8
- nrows = 8192
- ncols = 1024
+ nrows = 8192
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
speedgrade_timings = {
"800": _SpeedgradeTimings(tRP=13.1, tRCD=13.1, tWR=13.1, tRFC=(64, None), tFAW=(None, 50), tRAS=37.5),
"1066": _SpeedgradeTimings(tRP=13.1, tRCD=13.1, tWR=13.1, tRFC=(86, None), tFAW=(None, 50), tRAS=37.5),
}
speedgrade_timings["default"] = speedgrade_timings["1600"]
+
class MT41J128M16(DDR3Module):
memtype = "DDR3"
# geometry
nbanks = 8
- nrows = 16384
- ncols = 1024
+ nrows = 16384
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
speedgrade_timings = {
"800": _SpeedgradeTimings(tRP=13.1, tRCD=13.1, tWR=13.1, tRFC=(64, None), tFAW=(None, 50), tRAS=37.5),
"1066": _SpeedgradeTimings(tRP=13.1, tRCD=13.1, tWR=13.1, tRFC=(86, None), tFAW=(None, 50), tRAS=37.5),
}
speedgrade_timings["default"] = speedgrade_timings["1600"]
-class MT41K128M16(MT41J128M16): pass
+
+class MT41K128M16(MT41J128M16):
+ pass
+
class MT41J256M16(DDR3Module):
# geometry
nbanks = 8
- nrows = 32768
- ncols = 1024
+ nrows = 32768
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
speedgrade_timings = {
"800": _SpeedgradeTimings(tRP=13.1, tRCD=13.1, tWR=13.1, tRFC=(139, None), tFAW=(None, 50), tRAS=37.5),
"1066": _SpeedgradeTimings(tRP=13.1, tRCD=13.1, tWR=13.1, tRFC=(138, None), tFAW=(None, 50), tRAS=37.5),
}
speedgrade_timings["default"] = speedgrade_timings["1600"]
-class MT41K256M16(MT41J256M16): pass
+
+class MT41K256M16(MT41J256M16):
+ pass
+
class MT41J512M16(DDR3Module):
# geometry
nbanks = 8
- nrows = 65536
- ncols = 1024
+ nrows = 65536
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
speedgrade_timings = {
"1600": _SpeedgradeTimings(tRP=13.75, tRCD=13.75, tWR=13.75, tRFC=(280, None), tFAW=(None, 40), tRAS=39),
}
speedgrade_timings["default"] = speedgrade_timings["1600"]
-class MT41K512M16(MT41J512M16): pass
+
+class MT41K512M16(MT41J512M16):
+ pass
+
class K4B1G0446F(DDR3Module):
# geometry
nbanks = 8
- nrows = 16384
- ncols = 1024
+ nrows = 16384
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
speedgrade_timings = {
"800": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(120, None), tFAW=(None, 50), tRAS=37.5),
"1066": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(160, None), tFAW=(None, 50), tRAS=37.5),
}
speedgrade_timings["default"] = speedgrade_timings["1600"]
+
class K4B2G1646F(DDR3Module):
# geometry
nbanks = 8
- nrows = 16384
- ncols = 1024
+ nrows = 16384
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
speedgrade_timings = {
"800": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(104, None), tFAW=(None, 50), tRAS=37.5),
"1066": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(139, None), tFAW=(None, 50), tRAS=37.5),
}
speedgrade_timings["default"] = speedgrade_timings["1600"]
+
class H5TC4G63CFR(DDR3Module):
# geometry
nbanks = 8
- nrows = 16384
- ncols = 1024
+ nrows = 16384
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 7.5), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 7.5), tZQCS=(64, 80))
speedgrade_timings = {
"800": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(260, None), tFAW=(None, 40), tRAS=37.5),
}
speedgrade_timings["default"] = speedgrade_timings["800"]
+
class IS43TR16128B(DDR3Module):
# geometry
nbanks = 8
- nrows = 16384
- ncols = 1024
+ nrows = 16384
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
speedgrade_timings = {
"1600": _SpeedgradeTimings(tRP=13.75, tRCD=13.75, tWR=15, tRFC=(None, 160), tFAW=(None, 40), tRAS=35),
}
# base chip: MT41J128M8
# geometry
nbanks = 8
- nrows = 16384
- ncols = 1024
+ nrows = 16384
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
speedgrade_timings = {
"1066": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 110), tFAW=(None, 37.5), tRAS=37.5),
"1333": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 110), tFAW=(None, 30), tRAS=36),
}
speedgrade_timings["default"] = speedgrade_timings["1333"]
+
class MT8KTF51264(DDR3Module):
# base chip: MT41K512M8
# geometry
nbanks = 8
- nrows = 65536
- ncols = 1024
+ nrows = 65536
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
speedgrade_timings = {
- "800" : _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
+ "800": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
"1066": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
"1333": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=36),
"1600": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=35),
}
speedgrade_timings["default"] = speedgrade_timings["1866"]
+
class MT18KSF1G72HZ(DDR3Module):
# base chip: MT41K512M8
# geometry
nbanks = 8
- nrows = 65536
- ncols = 1024
+ nrows = 65536
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
speedgrade_timings = {
"1066": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
"1333": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=36),
}
speedgrade_timings["default"] = speedgrade_timings["1600"]
+
class AS4C256M16D3A(DDR3Module):
# geometry
nbanks = 8
- nrows = 32768
- ncols = 1024
+ nrows = 32768
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 7.5), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 7.5), tZQCS=(64, 80))
speedgrade_timings = {
"1600": _SpeedgradeTimings(tRP=13.75, tRCD=13.75, tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=35),
}
speedgrade_timings["default"] = speedgrade_timings["1600"]
+
class MT16KTF1G64HZ(DDR3Module):
# base chip: MT41K512M8
# geometry
nbanks = 8
- nrows = 65536
- ncols = 1024
+ nrows = 65536
+ ncols = 1024
# timings
- technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
+ technology_timings = _TechnologyTimings(
+ tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
speedgrade_timings = {
- "800" : _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
+ "800": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
"1066": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
"1333": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=36),
"1600": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=35),
# DDR4 (Chips) -------------------------------------------------------------------------------------
-class DDR4Module(SDRAMModule): memtype = "DDR4"
-class DDR4RegisteredModule(SDRAMRegisteredModule): memtype = "DDR4"
+class DDR4Module(SDRAMModule):
+ memtype = "DDR4"
+
+
+class DDR4RegisteredModule(SDRAMRegisteredModule):
+ memtype = "DDR4"
+
class EDY4016A(DDR4Module):
# geometry
ngroupbanks = 4
- ngroups = 2
- nbanks = ngroups * ngroupbanks
- nrows = 32768
- ncols = 1024
+ ngroups = 2
+ nbanks = ngroups * ngroupbanks
+ nrows = 32768
+ ncols = 1024
# timings
trefi = {"1x": 64e6/8192, "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
- trfc = {"1x": (None, 260), "2x": (None, 160), "4x": (None, 110)}
- technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+ trfc = {"1x": (None, 260), "2x": (None, 160), "4x": (None, 110)}
+ technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(
+ 4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
speedgrade_timings = {
"2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(28, 30), tRAS=32),
}
speedgrade_timings["default"] = speedgrade_timings["2400"]
+
class MT40A1G8(DDR4Module):
# geometry
ngroupbanks = 4
- ngroups = 4
- nbanks = ngroups * ngroupbanks
- nrows = 65536
- ncols = 1024
+ ngroups = 4
+ nbanks = ngroups * ngroupbanks
+ nrows = 65536
+ ncols = 1024
# timings
trefi = {"1x": 64e6/8192, "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
- trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
- technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6.4), tZQCS=(128, 80))
+ trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
+ technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(
+ 4, 7.5), tCCD=(4, None), tRRD=(4, 6.4), tZQCS=(128, 80))
speedgrade_timings = {
"2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=32),
"2666": _SpeedgradeTimings(tRP=13.50, tRCD=13.50, tWR=15, tRFC=trfc, tFAW=(20, 21), tRAS=32),
}
speedgrade_timings["default"] = speedgrade_timings["2400"]
+
class MT40A256M16(DDR4Module):
# geometry
ngroupbanks = 4
- ngroups = 2
- nbanks = ngroups * ngroupbanks
- nrows = 32768
- ncols = 1024
+ ngroups = 2
+ nbanks = ngroups * ngroupbanks
+ nrows = 32768
+ ncols = 1024
# timings
trefi = {"1x": 64e6/8192, "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
- trfc = {"1x": (None, 260), "2x": (None, 160), "4x": (None, 110)}
- technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+ trfc = {"1x": (None, 260), "2x": (None, 160), "4x": (None, 110)}
+ technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(
+ 4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
speedgrade_timings = {
"2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(28, 35), tRAS=32),
}
speedgrade_timings["default"] = speedgrade_timings["2400"]
+
class MT40A512M8(DDR4Module):
# geometry
ngroupbanks = 4
- ngroups = 4
- nbanks = ngroups * ngroupbanks
- nrows = 32768
- ncols = 1024
+ ngroups = 4
+ nbanks = ngroups * ngroupbanks
+ nrows = 32768
+ ncols = 1024
# timings
trefi = {"1x": 64e6/8192, "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
- trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
- technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+ trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
+ technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(
+ 4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
speedgrade_timings = {
"2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=32),
"2666": _SpeedgradeTimings(tRP=13.50, tRCD=13.50, tWR=15, tRFC=trfc, tFAW=(20, 21), tRAS=32),
}
speedgrade_timings["default"] = speedgrade_timings["2400"]
+
class MT40A512M16(DDR4Module):
# geometry
ngroupbanks = 4
- ngroups = 2
- nbanks = ngroups * ngroupbanks
- nrows = 65536
- ncols = 1024
+ ngroups = 2
+ nbanks = ngroups * ngroupbanks
+ nrows = 65536
+ ncols = 1024
# timings
trefi = {"1x": 64e6/8192, "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
- trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
- technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+ trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
+ technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(
+ 4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
speedgrade_timings = {
"2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=32),
}
# DDR4 (SO-DIMM) -----------------------------------------------------------------------------------
+
class KVR21SE15S84(DDR4Module):
# geometry
ngroupbanks = 4
- ngroups = 4
- nbanks = ngroups * ngroupbanks
- nrows = 32768
- ncols = 1024
+ ngroups = 4
+ nbanks = ngroups * ngroupbanks
+ nrows = 32768
+ ncols = 1024
# timings
trefi = {"1x": 64e6/8192, "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
- trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
- technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+ trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
+ technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(
+ 4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
speedgrade_timings = {
"2133": _SpeedgradeTimings(tRP=13.5, tRCD=13.5, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=33),
}
speedgrade_timings["default"] = speedgrade_timings["2133"]
+
class MTA4ATF51264HZ(DDR4Module):
# geometry
ngroupbanks = 4
- ngroups = 2
- nbanks = ngroups * ngroupbanks
- nrows = 65536
- ncols = 1024
+ ngroups = 2
+ nbanks = ngroups * ngroupbanks
+ nrows = 65536
+ ncols = 1024
# timings
trefi = {"1x": 64e6/8192, "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
- trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
- technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+ trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
+ technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(
+ 4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
speedgrade_timings = {
"2133": _SpeedgradeTimings(tRP=13.5, tRCD=13.5, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=33),
}
# DDR4 (RDIMM) -------------------------------------------------------------------------------------
+
class MTA18ASF2G72PZ(DDR4RegisteredModule):
# geometry
ngroupbanks = 4
- ngroups = 4
- nbanks = ngroups * ngroupbanks
- nrows = 131072
- ncols = 1024
+ ngroups = 4
+ nbanks = ngroups * ngroupbanks
+ nrows = 131072
+ ncols = 1024
# timings
trefi = {"1x": 64e6/8192, "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
- trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
- technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+ trfc = {"1x": (None, 350), "2x": (None, 260), "4x": (None, 160)}
+ technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(
+ 4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
speedgrade_timings = {
"2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=32),
}
__ALL__ = ["Interface"]
+
def phase_description(addressbits, bankbits, nranks, databits):
return [
# cmd description
def __init__(self, addressbits, bankbits, nranks, databits, nphases=1):
self.phases = []
for p in range(nphases):
- p = Record(phase_description(addressbits, bankbits, nranks, databits))
+ p = Record(phase_description(
+ addressbits, bankbits, nranks, databits))
self.phases += [p]
p.cas_n.reset = 1
p.cs_n.reset = (2**nranks-1)
def connect(self, target):
if not isinstance(target, Interface):
raise TypeError("Target must be an instance of Interface, not {!r}"
- .format(target))
+ .format(target))
ret = []
for i in range(min(len(self.phases), len(target.phases))):
# Lattice ECP5 DDR PHY Initialization --------------------------------------------------------------
+
class ECP5DDRPHYInit(Elaboratable):
def __init__(self, eclk_cd):
self.pause = Signal()
- self.stop = Signal()
+ self.stop = Signal()
self.delay = Signal()
self._eclk_cd = eclk_cd
m = Module()
new_lock = Signal()
- update = Signal()
- stop = Signal()
- freeze = Signal()
- pause = Signal()
- reset = Signal()
+ update = Signal()
+ stop = Signal()
+ freeze = Signal()
+ pause = Signal()
+ reset = Signal()
# DDRDLLA instance -------------------------------------------------------------------------
_lock = Signal()
delay = Signal()
m.submodules += Instance("DDRDLLA",
- i_CLK = ClockSignal("sys2x"),
- i_RST = ResetSignal(),
- i_UDDCNTLN = ~update,
- i_FREEZE = freeze,
- o_DDRDEL = delay,
- o_LOCK = _lock
- )
- lock = Signal()
+ i_CLK=ClockSignal("sys2x"),
+ i_RST=ResetSignal(),
+ i_UDDCNTLN=~update,
+ i_FREEZE=freeze,
+ o_DDRDEL=delay,
+ o_LOCK=_lock
+ )
+ lock = Signal()
lock_d = Signal()
m.submodules += FFSynchronizer(_lock, lock)
m.d.sync += lock_d.eq(lock)
m.d.sync += new_lock.eq(lock & ~lock_d)
# DDRDLLA/DDQBUFM/ECLK initialization sequence ---------------------------------------------
- t = 8 # in cycles
+ t = 8 # in cycles
tl = Timeline([
- (1*t, [freeze.eq(1)]), # Freeze DDRDLLA
+ (1*t, [freeze.eq(1)]), # Freeze DDRDLLA
(2*t, [stop.eq(1)]), # Stop ECLK domain
(3*t, [reset.eq(1)]), # Reset ECLK domain
(4*t, [reset.eq(0)]), # Release ECLK domain reset
(5*t, [stop.eq(0)]), # Release ECLK domain stop
- (6*t, [freeze.eq(0)]), # Release DDRDLLA freeze
+ (6*t, [freeze.eq(0)]), # Release DDRDLLA freeze
(7*t, [pause.eq(1)]), # Pause DQSBUFM
- (8*t, [update.eq(1)]), # Update DDRDLLA
- (9*t, [update.eq(0)]), # Release DDRDMMA update
+ (8*t, [update.eq(1)]), # Update DDRDLLA
+ (9*t, [update.eq(0)]), # Release DDRDMMA update
(10*t, [pause.eq(0)]), # Release DQSBUFM pause
])
m.submodules += tl
# Lattice ECP5 DDR PHY -----------------------------------------------------------------------------
+
class ECP5DDRPHY(Peripheral, Elaboratable):
def __init__(self, pads, sys_clk_freq=100e6):
super().__init__()
self._sys_clk_freq = sys_clk_freq
databits = len(self.pads.dq.o)
- assert databits%8 == 0
+ assert databits % 8 == 0
# CSR
bank = self.csr_bank()
-
+
self._dly_sel = bank.csr(databits//8, "rw")
-
+
self._rdly_dq_rst = bank.csr(1, "rw")
self._rdly_dq_inc = bank.csr(1, "rw")
self._rdly_dq_bitslip_rst = bank.csr(1, "rw")
self._zero_ev = self.event(mode="rise")
- self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
+ self._bridge = self.bridge(data_width=32, granularity=8, alignment=2)
self.bus = self._bridge.bus
self.irq = self._bridge.irq
nranks = 1 if not hasattr(self.pads, "cs_n") else len(self.pads.cs_n)
addressbits = len(self.pads.a.o)
bankbits = len(self.pads.ba.o)
- cl, cwl = get_cl_cw("DDR3", tck)
- cl_sys_latency = get_sys_latency(nphases, cl)
+ cl, cwl = get_cl_cw("DDR3", tck)
+ cl_sys_latency = get_sys_latency(nphases, cl)
cwl_sys_latency = get_sys_latency(nphases, cwl)
rdcmdphase, rdphase = get_sys_phases(nphases, cl_sys_latency, cl)
wrcmdphase, wrphase = get_sys_phases(nphases, cwl_sys_latency, cwl)
self.settings = PhySettings(
- phytype = "ECP5DDRPHY",
- memtype = "DDR3",
- databits = databits,
- dfi_databits = 4*databits,
- nranks = nranks,
- nphases = nphases,
- rdphase = rdphase,
- wrphase = wrphase,
- rdcmdphase = rdcmdphase,
- wrcmdphase = wrcmdphase,
- cl = cl,
- cwl = cwl,
- read_latency = 2 + cl_sys_latency + 2 + log2_int(4//nphases) + 4,
- write_latency = cwl_sys_latency
+ phytype="ECP5DDRPHY",
+ memtype="DDR3",
+ databits=databits,
+ dfi_databits=4*databits,
+ nranks=nranks,
+ nphases=nphases,
+ rdphase=rdphase,
+ wrphase=wrphase,
+ rdcmdphase=rdcmdphase,
+ wrcmdphase=wrcmdphase,
+ cl=cl,
+ cwl=cwl,
+ read_latency=2 + cl_sys_latency + 2 + log2_int(4//nphases) + 4,
+ write_latency=cwl_sys_latency
)
def elaborate(self, platform):
m.submodules.init = DomainRenamer("init")(ECP5DDRPHYInit("sys2x"))
# Parameters -------------------------------------------------------------------------------
- cl, cwl = get_cl_cw("DDR3", tck)
- cl_sys_latency = get_sys_latency(nphases, cl)
+ cl, cwl = get_cl_cw("DDR3", tck)
+ cl_sys_latency = get_sys_latency(nphases, cl)
cwl_sys_latency = get_sys_latency(nphases, cwl)
# Observation
# DFI Interface ----------------------------------------------------------------------------
dfi = self.dfi
- bl8_chunk = Signal()
+ bl8_chunk = Signal()
rddata_en = Signal(self.settings.read_latency)
# Clock --------------------------------------------------------------------------------
for i in range(len(self.pads.clk.o)):
sd_clk_se = Signal()
m.submodules += Instance("ODDRX2F",
- i_RST = ResetSignal("sys2x"),
- i_ECLK = ClockSignal("sys2x"),
- i_SCLK = ClockSignal(),
- i_D0 = 0,
- i_D1 = 1,
- i_D2 = 0,
- i_D3 = 1,
- o_Q = self.pads.clk.o[i]
- )
-
+ i_RST=ResetSignal("sys2x"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_SCLK=ClockSignal(),
+ i_D0=0,
+ i_D1=1,
+ i_D2=0,
+ i_D3=1,
+ o_Q=self.pads.clk.o[i]
+ )
# Addresses and Commands ---------------------------------------------------------------
for i in range(addressbits):
m.submodules += Instance("ODDRX2F",
- i_RST = ResetSignal("sys2x"),
- i_ECLK = ClockSignal("sys2x"),
- i_SCLK = ClockSignal(),
- i_D0 = dfi.phases[0].address[i],
- i_D1 = dfi.phases[0].address[i],
- i_D2 = dfi.phases[1].address[i],
- i_D3 = dfi.phases[1].address[i],
- o_Q = self.pads.a.o[i]
- )
+ i_RST=ResetSignal("sys2x"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_SCLK=ClockSignal(),
+ i_D0=dfi.phases[0].address[i],
+ i_D1=dfi.phases[0].address[i],
+ i_D2=dfi.phases[1].address[i],
+ i_D3=dfi.phases[1].address[i],
+ o_Q=self.pads.a.o[i]
+ )
for i in range(bankbits):
m.submodules += Instance("ODDRX2F",
- i_RST = ResetSignal("sys2x"),
- i_ECLK = ClockSignal("sys2x"),
- i_SCLK = ClockSignal(),
- i_D0 = dfi.phases[0].bank[i],
- i_D1 = dfi.phases[0].bank[i],
- i_D2 = dfi.phases[1].bank[i],
- i_D3 = dfi.phases[1].bank[i],
- o_Q = self.pads.ba.o[i]
- )
+ i_RST=ResetSignal("sys2x"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_SCLK=ClockSignal(),
+ i_D0=dfi.phases[0].bank[i],
+ i_D1=dfi.phases[0].bank[i],
+ i_D2=dfi.phases[1].bank[i],
+ i_D3=dfi.phases[1].bank[i],
+ o_Q=self.pads.ba.o[i]
+ )
controls = ["ras_n", "cas_n", "we_n", "cke", "odt"]
if hasattr(self.pads, "reset_n"):
controls.append("reset_n")
for name in controls:
for i in range(len(getattr(self.pads, name))):
m.submodules += Instance("ODDRX2F",
- i_RST = ResetSignal("sys2x"),
- i_ECLK = ClockSignal("sys2x"),
- i_SCLK = ClockSignal(),
- i_D0 = getattr(dfi.phases[0], name)[i],
- i_D1 = getattr(dfi.phases[0], name)[i],
- i_D2 = getattr(dfi.phases[1], name)[i],
- i_D3 = getattr(dfi.phases[1], name)[i],
- o_Q = getattr(self.pads, name)[i]
- )
+ i_RST=ResetSignal("sys2x"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_SCLK=ClockSignal(),
+ i_D0=getattr(dfi.phases[0], name)[i],
+ i_D1=getattr(dfi.phases[0], name)[i],
+ i_D2=getattr(dfi.phases[1], name)[i],
+ i_D3=getattr(dfi.phases[1], name)[i],
+ o_Q=getattr(self.pads, name)[i]
+ )
# DQ ---------------------------------------------------------------------------------------
- dq_oe = Signal()
- dqs_oe = Signal()
+ dq_oe = Signal()
+ dqs_oe = Signal()
dqs_pattern = DQSPattern()
m.submodules += dqs_pattern
for i in range(databits//8):
# DQSBUFM
- dqs_i = Signal()
- dqsr90 = Signal()
+ dqs_i = Signal()
+ dqsr90 = Signal()
dqsw270 = Signal()
- dqsw = Signal()
- rdpntr = Signal(3)
- wrpntr = Signal(3)
- rdly = Signal(7)
+ dqsw = Signal()
+ rdpntr = Signal(3)
+ wrpntr = Signal(3)
+ rdly = Signal(7)
with m.If(self._dly_sel.storage[i]):
with m.If(self._rdly_dq_rst.re):
m.d.sync += rdly.eq(0)
with m.Elif(self._rdly_dq_inc.re):
m.d.sync += rdly.eq(rdly + 1)
- datavalid = Signal()
- burstdet = Signal()
- dqs_read = Signal()
+ datavalid = Signal()
+ burstdet = Signal()
+ dqs_read = Signal()
dqs_bitslip = Signal(2)
with m.If(self._dly_sel.storage[i]):
with m.If(self._rdly_dq_bitslip_rst.re):
m.d.sync += dqs_bitslip.eq(dqs_bitslip + 1)
dqs_cases = {}
for j, b in enumerate(range(-2, 2)):
- dqs_cases[j] = dqs_read.eq(rddata_en[cl_sys_latency + b:cl_sys_latency + b + 2] != 0)
+ dqs_cases[j] = dqs_read.eq(
+ rddata_en[cl_sys_latency + b:cl_sys_latency + b + 2] != 0)
m.d.sync += Case(dqs_bitslip, dqs_cases)
m.submodules += Instance("DQSBUFM",
- p_DQS_LI_DEL_ADJ = "MINUS",
- p_DQS_LI_DEL_VAL = 1,
- p_DQS_LO_DEL_ADJ = "MINUS",
- p_DQS_LO_DEL_VAL = 4,
- # Clocks / Reset
- i_SCLK = ClockSignal("sys"),
- i_ECLK = ClockSignal("sys2x"),
- i_RST = ResetSignal("sys2x"),
- i_DDRDEL = self.init.delay,
- i_PAUSE = self.init.pause | self._dly_sel.storage[i],
-
- # Control
- # Assert LOADNs to use DDRDEL control
- i_RDLOADN = 0,
- i_RDMOVE = 0,
- i_RDDIRECTION = 1,
- i_WRLOADN = 0,
- i_WRMOVE = 0,
- i_WRDIRECTION = 1,
-
- # Reads (generate shifted DQS clock for reads)
- i_READ0 = dqs_read,
- i_READ1 = dqs_read,
- i_READCLKSEL0 = rdly[0],
- i_READCLKSEL1 = rdly[1],
- i_READCLKSEL2 = rdly[2],
- i_DQSI = dqs_i,
- o_DQSR90 = dqsr90,
- o_RDPNTR0 = rdpntr[0],
- o_RDPNTR1 = rdpntr[1],
- o_RDPNTR2 = rdpntr[2],
- o_WRPNTR0 = wrpntr[0],
- o_WRPNTR1 = wrpntr[1],
- o_WRPNTR2 = wrpntr[2],
- o_DATAVALID = self.datavalid[i],
- o_BURSTDET = burstdet,
-
- # Writes (generate shifted ECLK clock for writes)
- o_DQSW270 = dqsw270,
- o_DQSW = dqsw
- )
+ p_DQS_LI_DEL_ADJ="MINUS",
+ p_DQS_LI_DEL_VAL=1,
+ p_DQS_LO_DEL_ADJ="MINUS",
+ p_DQS_LO_DEL_VAL=4,
+ # Clocks / Reset
+ i_SCLK=ClockSignal("sys"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_RST=ResetSignal("sys2x"),
+ i_DDRDEL=self.init.delay,
+ i_PAUSE=self.init.pause | self._dly_sel.storage[i],
+
+ # Control
+ # Assert LOADNs to use DDRDEL control
+ i_RDLOADN=0,
+ i_RDMOVE=0,
+ i_RDDIRECTION=1,
+ i_WRLOADN=0,
+ i_WRMOVE=0,
+ i_WRDIRECTION=1,
+
+ # Reads (generate shifted DQS clock for reads)
+ i_READ0=dqs_read,
+ i_READ1=dqs_read,
+ i_READCLKSEL0=rdly[0],
+ i_READCLKSEL1=rdly[1],
+ i_READCLKSEL2=rdly[2],
+ i_DQSI=dqs_i,
+ o_DQSR90=dqsr90,
+ o_RDPNTR0=rdpntr[0],
+ o_RDPNTR1=rdpntr[1],
+ o_RDPNTR2=rdpntr[2],
+ o_WRPNTR0=wrpntr[0],
+ o_WRPNTR1=wrpntr[1],
+ o_WRPNTR2=wrpntr[2],
+ o_DATAVALID=self.datavalid[i],
+ o_BURSTDET=burstdet,
+
+ # Writes (generate shifted ECLK clock for writes)
+ o_DQSW270=dqsw270,
+ o_DQSW=dqsw
+ )
burstdet_d = Signal()
m.d.sync += burstdet_d.eq(burstdet)
with m.If(self._burstdet_clr.re):
m.d.sync += self._burstdet_seen.status[i].eq(1)
# DQS and DM ---------------------------------------------------------------------------
- dm_o_data = Signal(8)
- dm_o_data_d = Signal(8)
- dm_o_data_muxed = Signal(4)
+ dm_o_data = Signal(8)
+ dm_o_data_d = Signal(8)
+ dm_o_data_muxed = Signal(4)
m.d.comb += dm_o_data.eq(Cat(
dfi.phases[0].wrdata_mask[0*databits//8+i],
dfi.phases[0].wrdata_mask[1*databits//8+i],
dm_bl8_cases = {}
dm_bl8_cases[0] = dm_o_data_muxed.eq(dm_o_data[:4])
dm_bl8_cases[1] = dm_o_data_muxed.eq(dm_o_data_d[4:])
- m.d.sync += Case(bl8_chunk, dm_bl8_cases) # FIXME: use self.comb?
+ m.d.sync += Case(bl8_chunk, dm_bl8_cases) # FIXME: use self.comb?
m.submodules += Instance("ODDRX2DQA",
- i_RST = ResetSignal("sys2x"),
- i_ECLK = ClockSignal("sys2x"),
- i_SCLK = ClockSignal(),
- i_DQSW270 = dqsw270,
- i_D0 = dm_o_data_muxed[0],
- i_D1 = dm_o_data_muxed[1],
- i_D2 = dm_o_data_muxed[2],
- i_D3 = dm_o_data_muxed[3],
- o_Q = pads.dm[i]
- )
-
- dqs = Signal()
+ i_RST=ResetSignal("sys2x"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_SCLK=ClockSignal(),
+ i_DQSW270=dqsw270,
+ i_D0=dm_o_data_muxed[0],
+ i_D1=dm_o_data_muxed[1],
+ i_D2=dm_o_data_muxed[2],
+ i_D3=dm_o_data_muxed[3],
+ o_Q=pads.dm[i]
+ )
+
+ dqs = Signal()
dqs_oe_n = Signal()
m.submodules += [
Instance("ODDRX2DQSB",
- i_RST = ResetSignal("sys2x"),
- i_ECLK = ClockSignal("sys2x"),
- i_SCLK = ClockSignal(),
- i_DQSW = dqsw,
- i_D0 = 0, # FIXME: dqs_pattern.o[3],
- i_D1 = 1, # FIXME: dqs_pattern.o[2],
- i_D2 = 0, # FIXME: dqs_pattern.o[1],
- i_D3 = 1, # FIXME: dqs_pattern.o[0],
- o_Q = dqs
- ),
+ i_RST=ResetSignal("sys2x"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_SCLK=ClockSignal(),
+ i_DQSW=dqsw,
+ i_D0=0, # FIXME: dqs_pattern.o[3],
+ i_D1=1, # FIXME: dqs_pattern.o[2],
+ i_D2=0, # FIXME: dqs_pattern.o[1],
+ i_D3=1, # FIXME: dqs_pattern.o[0],
+ o_Q=dqs
+ ),
Instance("TSHX2DQSA",
- i_RST = ResetSignal("sys2x"),
- i_ECLK = ClockSignal("sys2x"),
- i_SCLK = ClockSignal(),
- i_DQSW = dqsw,
- i_T0 = ~(dqs_pattern.preamble | dqs_oe | dqs_pattern.postamble),
- i_T1 = ~(dqs_pattern.preamble | dqs_oe | dqs_pattern.postamble),
- o_Q = dqs_oe_n
- ),
+ i_RST=ResetSignal("sys2x"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_SCLK=ClockSignal(),
+ i_DQSW=dqsw,
+ i_T0=~(dqs_pattern.preamble | dqs_oe |
+ dqs_pattern.postamble),
+ i_T1=~(dqs_pattern.preamble | dqs_oe |
+ dqs_pattern.postamble),
+ o_Q=dqs_oe_n
+ ),
Tristate(pads.dqs_p[i], dqs, ~dqs_oe_n, dqs_i)
]
for j in range(8*i, 8*(i+1)):
- dq_o = Signal()
- dq_i = Signal()
- dq_oe_n = Signal()
- dq_i_delayed = Signal()
- dq_i_data = Signal(8)
- dq_o_data = Signal(8)
- dq_o_data_d = Signal(8)
+ dq_o = Signal()
+ dq_i = Signal()
+ dq_oe_n = Signal()
+ dq_i_delayed = Signal()
+ dq_i_data = Signal(8)
+ dq_o_data = Signal(8)
+ dq_o_data_d = Signal(8)
dq_o_data_muxed = Signal(4)
m.d.comb += dq_o_data.eq(Cat(
dfi.phases[0].wrdata[0*databits+j],
dq_bl8_cases = {}
dq_bl8_cases[0] = dq_o_data_muxed.eq(dq_o_data[:4])
dq_bl8_cases[1] = dq_o_data_muxed.eq(dq_o_data_d[4:])
- m.d.sync += Case(bl8_chunk, dq_bl8_cases) # FIXME: use self.comb?
+ # FIXME: use self.comb?
+ m.d.sync += Case(bl8_chunk, dq_bl8_cases)
_dq_i_data = Signal(4)
m.submodules += [
Instance("ODDRX2DQA",
- i_RST = ResetSignal("sys2x"),
- i_ECLK = ClockSignal("sys2x"),
- i_SCLK = ClockSignal(),
- i_DQSW270 = dqsw270,
- i_D0 = dq_o_data_muxed[0],
- i_D1 = dq_o_data_muxed[1],
- i_D2 = dq_o_data_muxed[2],
- i_D3 = dq_o_data_muxed[3],
- o_Q = dq_o
- ),
+ i_RST=ResetSignal("sys2x"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_SCLK=ClockSignal(),
+ i_DQSW270=dqsw270,
+ i_D0=dq_o_data_muxed[0],
+ i_D1=dq_o_data_muxed[1],
+ i_D2=dq_o_data_muxed[2],
+ i_D3=dq_o_data_muxed[3],
+ o_Q=dq_o
+ ),
Instance("DELAYF",
- p_DEL_MODE = "DQS_ALIGNED_X2",
- i_LOADN = 1,
- i_MOVE = 0,
- i_DIRECTION = 0,
- i_A = dq_i,
- o_Z = dq_i_delayed
- ),
+ p_DEL_MODE="DQS_ALIGNED_X2",
+ i_LOADN=1,
+ i_MOVE=0,
+ i_DIRECTION=0,
+ i_A=dq_i,
+ o_Z=dq_i_delayed
+ ),
Instance("IDDRX2DQA",
- i_RST = ResetSignal("sys2x"),
- i_ECLK = ClockSignal("sys2x"),
- i_SCLK = ClockSignal(),
- i_DQSR90 = dqsr90,
- i_RDPNTR0 = rdpntr[0],
- i_RDPNTR1 = rdpntr[1],
- i_RDPNTR2 = rdpntr[2],
- i_WRPNTR0 = wrpntr[0],
- i_WRPNTR1 = wrpntr[1],
- i_WRPNTR2 = wrpntr[2],
- i_D = dq_i_delayed,
- o_Q0 = _dq_i_data[0],
- o_Q1 = _dq_i_data[1],
- o_Q2 = _dq_i_data[2],
- o_Q3 = _dq_i_data[3],
- )
+ i_RST=ResetSignal("sys2x"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_SCLK=ClockSignal(),
+ i_DQSR90=dqsr90,
+ i_RDPNTR0=rdpntr[0],
+ i_RDPNTR1=rdpntr[1],
+ i_RDPNTR2=rdpntr[2],
+ i_WRPNTR0=wrpntr[0],
+ i_WRPNTR1=wrpntr[1],
+ i_WRPNTR2=wrpntr[2],
+ i_D=dq_i_delayed,
+ o_Q0=_dq_i_data[0],
+ o_Q1=_dq_i_data[1],
+ o_Q2=_dq_i_data[2],
+ o_Q3=_dq_i_data[3],
+ )
]
m.d.sync += dq_i_data[:4].eq(dq_i_data[4:])
m.d.sync += dq_i_data[4:].eq(_dq_i_data)
]
m.submodules += [
Instance("TSHX2DQA",
- i_RST = ResetSignal("sys2x"),
- i_ECLK = ClockSignal("sys2x"),
- i_SCLK = ClockSignal(),
- i_DQSW270 = dqsw270,
- i_T0 = ~(dqs_pattern.preamble | dq_oe | dqs_pattern.postamble),
- i_T1 = ~(dqs_pattern.preamble | dq_oe | dqs_pattern.postamble),
- o_Q = dq_oe_n,
- ),
+ i_RST=ResetSignal("sys2x"),
+ i_ECLK=ClockSignal("sys2x"),
+ i_SCLK=ClockSignal(),
+ i_DQSW270=dqsw270,
+ i_T0=~(dqs_pattern.preamble | dq_oe |
+ dqs_pattern.postamble),
+ i_T1=~(dqs_pattern.preamble | dq_oe |
+ dqs_pattern.postamble),
+ o_Q=dq_oe_n,
+ ),
Tristate(pads.dq[j], dq_o, ~dq_oe_n, dq_i)
]
# The read data valid is asserted for 1 sys_clk cycle when the data is available on the DFI
# interface, the latency is the sum of the ODDRX2DQA, CAS, IDDRX2DQA latencies.
rddata_en_last = Signal.like(rddata_en)
- m.d.comb += rddata_en.eq(Cat(dfi.phases[self.settings.rdphase].rddata_en, rddata_en_last))
+ m.d.comb += rddata_en.eq(
+ Cat(dfi.phases[self.settings.rdphase].rddata_en, rddata_en_last))
m.d.sync += rddata_en_last.eq(rddata_en)
- m.d.sync += [phase.rddata_valid.eq(rddata_en[-1]) for phase in dfi.phases]
+ m.d.sync += [phase.rddata_valid.eq(rddata_en[-1])
+ for phase in dfi.phases]
# Write Control Path -----------------------------------------------------------------------
# Creates a shift register of write commands coming from the DFI interface. This shift register
# FIXME: understand +2
wrdata_en = Signal(cwl_sys_latency + 5)
wrdata_en_last = Signal.like(wrdata_en)
- m.d.comb += wrdata_en.eq(Cat(dfi.phases[self.settings.wrphase].wrdata_en, wrdata_en_last))
+ m.d.comb += wrdata_en.eq(
+ Cat(dfi.phases[self.settings.wrphase].wrdata_en, wrdata_en_last))
m.d.sync += wrdata_en_last.eq(wrdata_en)
- m.d.comb += dq_oe.eq(wrdata_en[cwl_sys_latency + 2] | wrdata_en[cwl_sys_latency + 3])
+ m.d.comb += dq_oe.eq(wrdata_en[cwl_sys_latency + 2]
+ | wrdata_en[cwl_sys_latency + 3])
m.d.comb += bl8_chunk.eq(wrdata_en[cwl_sys_latency + 1])
m.d.comb += dqs_oe.eq(dq_oe)
# Generates DQS Preamble 1 cycle before the first write and Postamble 1 cycle after the last
# write. During writes, DQS tristate is configured as output for at least 4 sys_clk cycles:
# 1 for Preamble, 2 for the Write and 1 for the Postamble.
- m.d.comb += dqs_pattern.preamble.eq( wrdata_en[cwl_sys_latency + 1] & ~wrdata_en[cwl_sys_latency + 2])
- m.d.comb += dqs_pattern.postamble.eq(wrdata_en[cwl_sys_latency + 4] & ~wrdata_en[cwl_sys_latency + 3])
+ m.d.comb += dqs_pattern.preamble.eq(
+ wrdata_en[cwl_sys_latency + 1] & ~wrdata_en[cwl_sys_latency + 2])
+ m.d.comb += dqs_pattern.postamble.eq(
+ wrdata_en[cwl_sys_latency + 4] & ~wrdata_en[cwl_sys_latency + 3])
return m
attributed = set()
for f in self.payload_layout:
if f[0] in attributed:
- raise ValueError(f[0] + " already attributed in payload layout")
+ raise ValueError(
+ f[0] + " already attributed in payload layout")
if f[0] in reserved:
raise ValueError(f[0] + " cannot be used in endpoint layout")
attributed.add(f[0])
class _FIFOWrapper:
def __init__(self, payload_layout):
- self.sink = Endpoint(payload_layout)
+ self.sink = Endpoint(payload_layout)
self.source = Endpoint(payload_layout)
self.layout = Layout([
def __init__(self, layout, depth, fwft=True, buffered=False):
super().__init__(layout)
if buffered:
- self.fifo = fifo.SyncFIFOBuffered(width=len(Record(self.layout)), depth=depth, fwft=fwft)
+ self.fifo = fifo.SyncFIFOBuffered(
+ width=len(Record(self.layout)), depth=depth, fwft=fwft)
else:
- self.fifo = fifo.SyncFIFO(width=len(Record(self.layout)), depth=depth, fwft=fwft)
+ self.fifo = fifo.SyncFIFO(
+ width=len(Record(self.layout)), depth=depth, fwft=fwft)
self.depth = self.fifo.depth
self.level = self.fifo.level
r_domain=r_domain, w_domain=w_domain)
self.depth = self.fifo.depth
+
class PipeValid(Elaboratable):
"""Pipe valid/payload to cut timing path"""
+
def __init__(self, layout):
self.sink = Endpoint(layout)
self.source = Endpoint(layout)
self.source.first.eq(self.sink.first),
self.source.last.eq(self.sink.last),
self.source.payload.eq(self.sink.payload),
- #self.source.param.eq(self.sink.param), # TODO ensure this can be commented
+ # self.source.param.eq(self.sink.param), # TODO ensure this can be commented
]
m.d.comb += self.sink.ready.eq(~self.source.valid | self.source.ready)
return m
-class Buffer(PipeValid): pass # FIXME: Replace Buffer with PipeValid in codebase?
+
+class Buffer(PipeValid):
+ pass # FIXME: Replace Buffer with PipeValid in codebase?