From: Florent Kermarrec Date: Mon, 2 Mar 2015 10:35:53 +0000 (+0100) Subject: sdram: create core dir and move lasmicon/minicon in it X-Git-Tag: 24jan2021_ls180~2532 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=97331153e02d4355d07f8031ca9ef1e96b259aae;p=litex.git sdram: create core dir and move lasmicon/minicon in it --- diff --git a/misoclib/mem/sdram/core/__init__.py b/misoclib/mem/sdram/core/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/misoclib/mem/sdram/core/lasmicon/__init__.py b/misoclib/mem/sdram/core/lasmicon/__init__.py new file mode 100644 index 00000000..5fdd38cd --- /dev/null +++ b/misoclib/mem/sdram/core/lasmicon/__init__.py @@ -0,0 +1,41 @@ +from migen.fhdl.std import * + +from misoclib.mem.sdram.bus import dfi, lasmibus +from misoclib.mem.sdram.core.lasmicon.refresher import * +from misoclib.mem.sdram.core.lasmicon.bankmachine import * +from misoclib.mem.sdram.core.lasmicon.multiplexer import * + +class LASMIcon(Module): + def __init__(self, phy, geom_settings, timing_settings): + if phy.settings.memtype in ["SDR"]: + burst_length = phy.settings.nphases*1 # command multiplication*SDR + elif phy.settings.memtype in ["DDR", "LPDDR", "DDR2", "DDR3"]: + burst_length = phy.settings.nphases*2 # command multiplication*DDR + address_align = log2_int(burst_length) + + self.dfi = dfi.Interface(geom_settings.mux_a, + geom_settings.bank_a, + phy.settings.dfi_d, + phy.settings.nphases) + self.lasmic = lasmibus.Interface( + aw=geom_settings.row_a + geom_settings.col_a - address_align, + dw=phy.settings.dfi_d*phy.settings.nphases, + nbanks=2**geom_settings.bank_a, + req_queue_size=timing_settings.req_queue_size, + read_latency=phy.settings.read_latency+1, + write_latency=phy.settings.write_latency+1) + self.nrowbits = geom_settings.col_a - address_align + + ### + + self.submodules.refresher = Refresher(geom_settings.mux_a, geom_settings.bank_a, + timing_settings.tRP, timing_settings.tREFI, timing_settings.tRFC) + self.submodules.bank_machines = [BankMachine(geom_settings, timing_settings, address_align, i, + getattr(self.lasmic, "bank"+str(i))) + for i in range(2**geom_settings.bank_a)] + self.submodules.multiplexer = Multiplexer(phy, geom_settings, timing_settings, + self.bank_machines, self.refresher, + self.dfi, self.lasmic) + + def get_csrs(self): + return self.multiplexer.get_csrs() diff --git a/misoclib/mem/sdram/core/lasmicon/bankmachine.py b/misoclib/mem/sdram/core/lasmicon/bankmachine.py new file mode 100644 index 00000000..3397bb36 --- /dev/null +++ b/misoclib/mem/sdram/core/lasmicon/bankmachine.py @@ -0,0 +1,145 @@ +from migen.fhdl.std import * +from migen.genlib.roundrobin import * +from migen.genlib.fsm import FSM, NextState +from migen.genlib.misc import optree +from migen.genlib.fifo import SyncFIFO + +from misoclib.mem.sdram.core.lasmicon.multiplexer import * + +class _AddressSlicer: + def __init__(self, col_a, address_align): + self.col_a = col_a + self.address_align = address_align + + def row(self, address): + split = self.col_a - self.address_align + if isinstance(address, int): + return address >> split + else: + return address[split:] + + def col(self, address): + split = self.col_a - self.address_align + if isinstance(address, int): + return (address & (2**split - 1)) << self.address_align + else: + return Cat(Replicate(0, self.address_align), address[:split]) + +class BankMachine(Module): + def __init__(self, geom_settings, timing_settings, address_align, bankn, req): + self.refresh_req = Signal() + self.refresh_gnt = Signal() + self.cmd = CommandRequestRW(geom_settings.mux_a, geom_settings.bank_a) + + ### + + # Request FIFO + self.submodules.req_fifo = SyncFIFO([("we", 1), ("adr", flen(req.adr))], timing_settings.req_queue_size) + self.comb += [ + self.req_fifo.din.we.eq(req.we), + self.req_fifo.din.adr.eq(req.adr), + self.req_fifo.we.eq(req.stb), + req.req_ack.eq(self.req_fifo.writable), + + self.req_fifo.re.eq(req.dat_w_ack | req.dat_r_ack), + req.lock.eq(self.req_fifo.readable) + ] + reqf = self.req_fifo.dout + + slicer = _AddressSlicer(geom_settings.col_a, address_align) + + # Row tracking + has_openrow = Signal() + openrow = Signal(geom_settings.row_a) + hit = Signal() + self.comb += hit.eq(openrow == slicer.row(reqf.adr)) + track_open = Signal() + track_close = Signal() + self.sync += [ + If(track_open, + has_openrow.eq(1), + openrow.eq(slicer.row(reqf.adr)) + ), + If(track_close, + has_openrow.eq(0) + ) + ] + + # Address generation + s_row_adr = Signal() + self.comb += [ + self.cmd.ba.eq(bankn), + If(s_row_adr, + self.cmd.a.eq(slicer.row(reqf.adr)) + ).Else( + self.cmd.a.eq(slicer.col(reqf.adr)) + ) + ] + + # Respect write-to-precharge specification + precharge_ok = Signal() + t_unsafe_precharge = 2 + timing_settings.tWR - 1 + unsafe_precharge_count = Signal(max=t_unsafe_precharge+1) + self.comb += precharge_ok.eq(unsafe_precharge_count == 0) + self.sync += [ + If(self.cmd.stb & self.cmd.ack & self.cmd.is_write, + unsafe_precharge_count.eq(t_unsafe_precharge) + ).Elif(~precharge_ok, + unsafe_precharge_count.eq(unsafe_precharge_count-1) + ) + ] + + # Control and command generation FSM + fsm = FSM() + self.submodules += fsm + fsm.act("REGULAR", + If(self.refresh_req, + NextState("REFRESH") + ).Elif(self.req_fifo.readable, + If(has_openrow, + If(hit, + # NB: write-to-read specification is enforced by multiplexer + self.cmd.stb.eq(1), + req.dat_w_ack.eq(self.cmd.ack & reqf.we), + req.dat_r_ack.eq(self.cmd.ack & ~reqf.we), + self.cmd.is_read.eq(~reqf.we), + self.cmd.is_write.eq(reqf.we), + self.cmd.cas_n.eq(0), + self.cmd.we_n.eq(~reqf.we) + ).Else( + NextState("PRECHARGE") + ) + ).Else( + NextState("ACTIVATE") + ) + ) + ) + fsm.act("PRECHARGE", + # Notes: + # 1. we are presenting the column address, A10 is always low + # 2. since we always go to the ACTIVATE state, we do not need + # to assert track_close. + If(precharge_ok, + self.cmd.stb.eq(1), + If(self.cmd.ack, NextState("TRP")), + self.cmd.ras_n.eq(0), + self.cmd.we_n.eq(0), + self.cmd.is_cmd.eq(1) + ) + ) + fsm.act("ACTIVATE", + s_row_adr.eq(1), + track_open.eq(1), + self.cmd.stb.eq(1), + self.cmd.is_cmd.eq(1), + If(self.cmd.ack, NextState("TRCD")), + self.cmd.ras_n.eq(0) + ) + fsm.act("REFRESH", + self.refresh_gnt.eq(precharge_ok), + track_close.eq(1), + self.cmd.is_cmd.eq(1), + If(~self.refresh_req, NextState("REGULAR")) + ) + fsm.delayed_enter("TRP", "ACTIVATE", timing_settings.tRP-1) + fsm.delayed_enter("TRCD", "REGULAR", timing_settings.tRCD-1) diff --git a/misoclib/mem/sdram/core/lasmicon/crossbar.py b/misoclib/mem/sdram/core/lasmicon/crossbar.py new file mode 100644 index 00000000..e4414900 --- /dev/null +++ b/misoclib/mem/sdram/core/lasmicon/crossbar.py @@ -0,0 +1,176 @@ +from migen.fhdl.std import * +from migen.genlib import roundrobin +from migen.genlib.record import * +from migen.genlib.misc import optree + +from misoclib.mem.sdram.bus.lasmibus import Interface + +def _getattr_all(l, attr): + it = iter(l) + r = getattr(next(it), attr) + for e in it: + if getattr(e, attr) != r: + raise ValueError + return r + +class Crossbar(Module): + def __init__(self, controllers, cba_shift): + self._controllers = controllers + self._cba_shift = cba_shift + + self._rca_bits = _getattr_all(controllers, "aw") + self._dw = _getattr_all(controllers, "dw") + self._nbanks = _getattr_all(controllers, "nbanks") + self._req_queue_size = _getattr_all(controllers, "req_queue_size") + self._read_latency = _getattr_all(controllers, "read_latency") + self._write_latency = _getattr_all(controllers, "write_latency") + + self._bank_bits = log2_int(self._nbanks, False) + self._controller_bits = log2_int(len(self._controllers), False) + + self._masters = [] + + def get_master(self): + if self.finalized: + raise FinalizeError + lasmi_master = Interface(self._rca_bits + self._bank_bits + self._controller_bits, + self._dw, 1, self._req_queue_size, self._read_latency, self._write_latency) + self._masters.append(lasmi_master) + return lasmi_master + + def do_finalize(self): + nmasters = len(self._masters) + + m_ca, m_ba, m_rca = self._split_master_addresses(self._controller_bits, + self._bank_bits, self._rca_bits, self._cba_shift) + + for nc, controller in enumerate(self._controllers): + if self._controller_bits: + controller_selected = [ca == nc for ca in m_ca] + else: + controller_selected = [1]*nmasters + master_req_acks = [0]*nmasters + master_dat_w_acks = [0]*nmasters + master_dat_r_acks = [0]*nmasters + + rrs = [roundrobin.RoundRobin(nmasters, roundrobin.SP_CE) for n in range(self._nbanks)] + self.submodules += rrs + for nb, rr in enumerate(rrs): + bank = getattr(controller, "bank"+str(nb)) + + # for each master, determine if another bank locks it + master_locked = [] + for nm, master in enumerate(self._masters): + locked = 0 + for other_nb, other_rr in enumerate(rrs): + if other_nb != nb: + other_bank = getattr(controller, "bank"+str(other_nb)) + locked = locked | (other_bank.lock & (other_rr.grant == nm)) + master_locked.append(locked) + + # arbitrate + bank_selected = [cs & (ba == nb) & ~locked for cs, ba, locked in zip(controller_selected, m_ba, master_locked)] + bank_requested = [bs & master.stb for bs, master in zip(bank_selected, self._masters)] + self.comb += [ + rr.request.eq(Cat(*bank_requested)), + rr.ce.eq(~bank.stb & ~bank.lock) + ] + + # route requests + self.comb += [ + bank.adr.eq(Array(m_rca)[rr.grant]), + bank.we.eq(Array(self._masters)[rr.grant].we), + bank.stb.eq(Array(bank_requested)[rr.grant]) + ] + master_req_acks = [master_req_ack | ((rr.grant == nm) & bank_selected[nm] & bank.req_ack) + for nm, master_req_ack in enumerate(master_req_acks)] + master_dat_w_acks = [master_dat_w_ack | ((rr.grant == nm) & bank.dat_w_ack) + for nm, master_dat_w_ack in enumerate(master_dat_w_acks)] + master_dat_r_acks = [master_dat_r_ack | ((rr.grant == nm) & bank.dat_r_ack) + for nm, master_dat_r_ack in enumerate(master_dat_r_acks)] + + for nm, master_dat_w_ack in enumerate(master_dat_w_acks): + for i in range(self._write_latency): + new_master_dat_w_ack = Signal() + self.sync += new_master_dat_w_ack.eq(master_dat_w_ack) + master_dat_w_ack = new_master_dat_w_ack + master_dat_w_acks[nm] = master_dat_w_ack + + for nm, master_dat_r_ack in enumerate(master_dat_r_acks): + for i in range(self._read_latency): + new_master_dat_r_ack = Signal() + self.sync += new_master_dat_r_ack.eq(master_dat_r_ack) + master_dat_r_ack = new_master_dat_r_ack + master_dat_r_acks[nm] = master_dat_r_ack + + self.comb += [master.req_ack.eq(master_req_ack) for master, master_req_ack in zip(self._masters, master_req_acks)] + self.comb += [master.dat_w_ack.eq(master_dat_w_ack) for master, master_dat_w_ack in zip(self._masters, master_dat_w_acks)] + self.comb += [master.dat_r_ack.eq(master_dat_r_ack) for master, master_dat_r_ack in zip(self._masters, master_dat_r_acks)] + + # route data writes + controller_selected_wl = controller_selected + for i in range(self._write_latency): + n_controller_selected_wl = [Signal() for i in range(nmasters)] + self.sync += [n.eq(o) for n, o in zip(n_controller_selected_wl, controller_selected_wl)] + controller_selected_wl = n_controller_selected_wl + dat_w_maskselect = [] + dat_we_maskselect = [] + for master, selected in zip(self._masters, controller_selected_wl): + o_dat_w = Signal(self._dw) + o_dat_we = Signal(self._dw//8) + self.comb += If(selected, + o_dat_w.eq(master.dat_w), + o_dat_we.eq(master.dat_we) + ) + dat_w_maskselect.append(o_dat_w) + dat_we_maskselect.append(o_dat_we) + self.comb += [ + controller.dat_w.eq(optree("|", dat_w_maskselect)), + controller.dat_we.eq(optree("|", dat_we_maskselect)) + ] + + # route data reads + if self._controller_bits: + for master in self._masters: + controller_sel = Signal(self._controller_bits) + for nc, controller in enumerate(self._controllers): + for nb in range(nbanks): + bank = getattr(controller, "bank"+str(nb)) + self.comb += If(bank.stb & bank.ack, controller_sel.eq(nc)) + for i in range(self._read_latency): + n_controller_sel = Signal(self._controller_bits) + self.sync += n_controller_sel.eq(controller_sel) + controller_sel = n_controller_sel + self.comb += master.dat_r.eq(Array(self._controllers)[controller_sel].dat_r) + else: + self.comb += [master.dat_r.eq(self._controllers[0].dat_r) for master in self._masters] + + def _split_master_addresses(self, controller_bits, bank_bits, rca_bits, cba_shift): + m_ca = [] # controller address + m_ba = [] # bank address + m_rca = [] # row and column address + for master in self._masters: + cba = Signal(self._controller_bits + self._bank_bits) + rca = Signal(self._rca_bits) + cba_upper = cba_shift + controller_bits + bank_bits + self.comb += cba.eq(master.adr[cba_shift:cba_upper]) + if cba_shift < self._rca_bits: + if cba_shift: + self.comb += rca.eq(Cat(master.adr[:cba_shift], master.adr[cba_upper:])) + else: + self.comb += rca.eq(master.adr[cba_upper:]) + else: + self.comb += rca.eq(master.adr[:cba_shift]) + + if self._controller_bits: + ca = Signal(self._controller_bits) + ba = Signal(self._bank_bits) + self.comb += Cat(ba, ca).eq(cba) + else: + ca = None + ba = cba + + m_ca.append(ca) + m_ba.append(ba) + m_rca.append(rca) + return m_ca, m_ba, m_rca diff --git a/misoclib/mem/sdram/core/lasmicon/multiplexer.py b/misoclib/mem/sdram/core/lasmicon/multiplexer.py new file mode 100644 index 00000000..c896480a --- /dev/null +++ b/misoclib/mem/sdram/core/lasmicon/multiplexer.py @@ -0,0 +1,214 @@ +from migen.fhdl.std import * +from migen.genlib.roundrobin import * +from migen.genlib.misc import optree +from migen.genlib.fsm import FSM, NextState +from migen.bank.description import AutoCSR + +from misoclib.mem.sdram.core.lasmicon.perf import Bandwidth + +class CommandRequest: + def __init__(self, a, ba): + self.a = Signal(a) + self.ba = Signal(ba) + self.cas_n = Signal(reset=1) + self.ras_n = Signal(reset=1) + self.we_n = Signal(reset=1) + +class CommandRequestRW(CommandRequest): + def __init__(self, a, ba): + CommandRequest.__init__(self, a, ba) + self.stb = Signal() + self.ack = Signal() + self.is_cmd = Signal() + self.is_read = Signal() + self.is_write = Signal() + +class _CommandChooser(Module): + def __init__(self, requests): + self.want_reads = Signal() + self.want_writes = Signal() + self.want_cmds = Signal() + # NB: cas_n/ras_n/we_n are 1 when stb is inactive + self.cmd = CommandRequestRW(flen(requests[0].a), flen(requests[0].ba)) + + ### + + rr = RoundRobin(len(requests), SP_CE) + self.submodules += rr + + self.comb += [rr.request[i].eq(req.stb & ((req.is_cmd & self.want_cmds) | ((req.is_read == self.want_reads) | (req.is_write == self.want_writes)))) + for i, req in enumerate(requests)] + + stb = Signal() + self.comb += stb.eq(Array(req.stb for req in requests)[rr.grant]) + for name in ["a", "ba", "is_read", "is_write", "is_cmd"]: + choices = Array(getattr(req, name) for req in requests) + self.comb += getattr(self.cmd, name).eq(choices[rr.grant]) + for name in ["cas_n", "ras_n", "we_n"]: + # we should only assert those signals when stb is 1 + choices = Array(getattr(req, name) for req in requests) + self.comb += If(self.cmd.stb, getattr(self.cmd, name).eq(choices[rr.grant])) + self.comb += self.cmd.stb.eq(stb \ + & ((self.cmd.is_cmd & self.want_cmds) | ((self.cmd.is_read == self.want_reads) \ + & (self.cmd.is_write == self.want_writes)))) + + self.comb += [If(self.cmd.stb & self.cmd.ack & (rr.grant == i), req.ack.eq(1)) + for i, req in enumerate(requests)] + self.comb += rr.ce.eq(self.cmd.ack) + +class _Steerer(Module): + def __init__(self, commands, dfi): + ncmd = len(commands) + nph = len(dfi.phases) + self.sel = [Signal(max=ncmd) for i in range(nph)] + + ### + + def stb_and(cmd, attr): + if not hasattr(cmd, "stb"): + return 0 + else: + return cmd.stb & getattr(cmd, attr) + for phase, sel in zip(dfi.phases, self.sel): + self.comb += [ + phase.cke.eq(1), + phase.cs_n.eq(0) + ] + if hasattr(phase, "odt"): + self.comb += phase.odt.eq(1) + if hasattr(phase, "reset_n"): + self.comb += phase.reset_n.eq(1) + self.sync += [ + phase.address.eq(Array(cmd.a for cmd in commands)[sel]), + phase.bank.eq(Array(cmd.ba for cmd in commands)[sel]), + phase.cas_n.eq(Array(cmd.cas_n for cmd in commands)[sel]), + phase.ras_n.eq(Array(cmd.ras_n for cmd in commands)[sel]), + phase.we_n.eq(Array(cmd.we_n for cmd in commands)[sel]), + phase.rddata_en.eq(Array(stb_and(cmd, "is_read") for cmd in commands)[sel]), + phase.wrdata_en.eq(Array(stb_and(cmd, "is_write") for cmd in commands)[sel]) + ] + +class Multiplexer(Module, AutoCSR): + def __init__(self, phy, geom_settings, timing_settings, bank_machines, refresher, dfi, lasmic): + assert(phy.settings.nphases == len(dfi.phases)) + + # Command choosing + requests = [bm.cmd for bm in bank_machines] + choose_cmd = _CommandChooser(requests) + choose_req = _CommandChooser(requests) + self.comb += [ + choose_cmd.want_reads.eq(0), + choose_cmd.want_writes.eq(0) + ] + if phy.settings.nphases == 1: + self.comb += [ + choose_cmd.want_cmds.eq(1), + choose_req.want_cmds.eq(1) + ] + self.submodules += choose_cmd, choose_req + + # Command steering + nop = CommandRequest(geom_settings.mux_a, geom_settings.bank_a) + commands = [nop, choose_cmd.cmd, choose_req.cmd, refresher.cmd] # nop must be 1st + (STEER_NOP, STEER_CMD, STEER_REQ, STEER_REFRESH) = range(4) + steerer = _Steerer(commands, dfi) + self.submodules += steerer + + # Read/write turnaround + read_available = Signal() + write_available = Signal() + self.comb += [ + read_available.eq(optree("|", [req.stb & req.is_read for req in requests])), + write_available.eq(optree("|", [req.stb & req.is_write for req in requests])) + ] + + def anti_starvation(timeout): + en = Signal() + max_time = Signal() + if timeout: + t = timeout - 1 + time = Signal(max=t+1) + self.comb += max_time.eq(time == 0) + self.sync += If(~en, + time.eq(t) + ).Elif(~max_time, + time.eq(time - 1) + ) + else: + self.comb += max_time.eq(0) + return en, max_time + read_time_en, max_read_time = anti_starvation(timing_settings.read_time) + write_time_en, max_write_time = anti_starvation(timing_settings.write_time) + + # Refresh + self.comb += [bm.refresh_req.eq(refresher.req) for bm in bank_machines] + go_to_refresh = Signal() + self.comb += go_to_refresh.eq(optree("&", [bm.refresh_gnt for bm in bank_machines])) + + # Datapath + all_rddata = [p.rddata for p in dfi.phases] + all_wrdata = [p.wrdata for p in dfi.phases] + all_wrdata_mask = [p.wrdata_mask for p in dfi.phases] + self.comb += [ + lasmic.dat_r.eq(Cat(*all_rddata)), + Cat(*all_wrdata).eq(lasmic.dat_w), + Cat(*all_wrdata_mask).eq(~lasmic.dat_we) + ] + + # Control FSM + fsm = FSM() + self.submodules += fsm + + def steerer_sel(steerer, phy, r_w_n): + r = [] + for i in range(phy.settings.nphases): + s = steerer.sel[i].eq(STEER_NOP) + if r_w_n == "read": + if i == phy.settings.rdphase: + s = steerer.sel[i].eq(STEER_REQ) + elif i == phy.settings.rdcmdphase: + s = steerer.sel[i].eq(STEER_CMD) + elif r_w_n == "write": + if i == phy.settings.wrphase: + s = steerer.sel[i].eq(STEER_REQ) + elif i == phy.settings.wrcmdphase: + s = steerer.sel[i].eq(STEER_CMD) + else: + raise ValueError + r.append(s) + return r + + fsm.act("READ", + read_time_en.eq(1), + choose_req.want_reads.eq(1), + choose_cmd.cmd.ack.eq(1), + choose_req.cmd.ack.eq(1), + steerer_sel(steerer, phy, "read"), + If(write_available, + # TODO: switch only after several cycles of ~read_available? + If(~read_available | max_read_time, NextState("RTW")) + ), + If(go_to_refresh, NextState("REFRESH")) + ) + fsm.act("WRITE", + write_time_en.eq(1), + choose_req.want_writes.eq(1), + choose_cmd.cmd.ack.eq(1), + choose_req.cmd.ack.eq(1), + steerer_sel(steerer, phy, "write"), + If(read_available, + If(~write_available | max_write_time, NextState("WTR")) + ), + If(go_to_refresh, NextState("REFRESH")) + ) + fsm.act("REFRESH", + steerer.sel[0].eq(STEER_REFRESH), + If(~refresher.req, NextState("READ")) + ) + fsm.delayed_enter("RTW", "WRITE", phy.settings.read_latency-1) # FIXME: reduce this, actual limit is around (cl+1)/nphases + fsm.delayed_enter("WTR", "READ", timing_settings.tWTR-1) + # FIXME: workaround for zero-delay loop simulation problem with Icarus Verilog + fsm.finalize() + self.comb += refresher.ack.eq(fsm.state == fsm.encoding["REFRESH"]) + + self.submodules.bandwidth = Bandwidth(choose_req.cmd) diff --git a/misoclib/mem/sdram/core/lasmicon/perf.py b/misoclib/mem/sdram/core/lasmicon/perf.py new file mode 100644 index 00000000..910d831a --- /dev/null +++ b/misoclib/mem/sdram/core/lasmicon/perf.py @@ -0,0 +1,44 @@ +from migen.fhdl.std import * +from migen.bank.description import * + +class Bandwidth(Module, AutoCSR): + def __init__(self, cmd, period_bits=24): + self._update = CSR() + self._nreads = CSRStatus(period_bits) + self._nwrites = CSRStatus(period_bits) + + ### + + cmd_stb = Signal() + cmd_ack = Signal() + cmd_is_read = Signal() + cmd_is_write = Signal() + self.sync += [ + cmd_stb.eq(cmd.stb), + cmd_ack.eq(cmd.ack), + cmd_is_read.eq(cmd.is_read), + cmd_is_write.eq(cmd.is_write) + ] + + counter = Signal(period_bits) + period = Signal() + nreads = Signal(period_bits) + nwrites = Signal(period_bits) + nreads_r = Signal(period_bits) + nwrites_r = Signal(period_bits) + self.sync += [ + Cat(counter, period).eq(counter + 1), + If(period, + nreads_r.eq(nreads), + nwrites_r.eq(nwrites), + nreads.eq(0), + nwrites.eq(0) + ).Elif(cmd_stb & cmd_ack, + If(cmd_is_read, nreads.eq(nreads + 1)), + If(cmd_is_write, nwrites.eq(nwrites + 1)), + ), + If(self._update.re, + self._nreads.status.eq(nreads_r), + self._nwrites.status.eq(nwrites_r) + ) + ] diff --git a/misoclib/mem/sdram/core/lasmicon/refresher.py b/misoclib/mem/sdram/core/lasmicon/refresher.py new file mode 100644 index 00000000..e9c9d13a --- /dev/null +++ b/misoclib/mem/sdram/core/lasmicon/refresher.py @@ -0,0 +1,68 @@ +from migen.fhdl.std import * +from migen.genlib.misc import timeline +from migen.genlib.fsm import FSM + +from misoclib.mem.sdram.core.lasmicon.multiplexer import * + +class Refresher(Module): + def __init__(self, a, ba, tRP, tREFI, tRFC): + self.req = Signal() + self.ack = Signal() # 1st command 1 cycle after assertion of ack + self.cmd = CommandRequest(a, ba) + + ### + + # Refresh sequence generator: + # PRECHARGE ALL --(tRP)--> AUTO REFRESH --(tRFC)--> done + seq_start = Signal() + seq_done = Signal() + self.sync += [ + self.cmd.a.eq(2**10), + self.cmd.ba.eq(0), + self.cmd.cas_n.eq(1), + self.cmd.ras_n.eq(1), + self.cmd.we_n.eq(1), + seq_done.eq(0) + ] + self.sync += timeline(seq_start, [ + (1, [ + self.cmd.ras_n.eq(0), + self.cmd.we_n.eq(0) + ]), + (1+tRP, [ + self.cmd.cas_n.eq(0), + self.cmd.ras_n.eq(0) + ]), + (1+tRP+tRFC, [ + seq_done.eq(1) + ]) + ]) + + # Periodic refresh counter + counter = Signal(max=tREFI) + start = Signal() + self.sync += [ + start.eq(0), + If(counter == 0, + start.eq(1), + counter.eq(tREFI - 1) + ).Else( + counter.eq(counter - 1) + ) + ] + + # Control FSM + fsm = FSM() + self.submodules += fsm + fsm.act("IDLE", If(start, NextState("WAIT_GRANT"))) + fsm.act("WAIT_GRANT", + self.req.eq(1), + If(self.ack, + seq_start.eq(1), + NextState("WAIT_SEQ") + ) + ) + fsm.act("WAIT_SEQ", + self.req.eq(1), + If(seq_done, NextState("IDLE")) + ) diff --git a/misoclib/mem/sdram/core/minicon/__init__.py b/misoclib/mem/sdram/core/minicon/__init__.py new file mode 100644 index 00000000..c03f93d4 --- /dev/null +++ b/misoclib/mem/sdram/core/minicon/__init__.py @@ -0,0 +1,193 @@ +from migen.fhdl.std import * +from migen.bus import wishbone +from migen.genlib.fsm import FSM, NextState + +from misoclib.mem.sdram.bus import dfi as dfibus + +class _AddressSlicer: + def __init__(self, col_a, bank_a, row_a, address_align): + self.col_a = col_a + self.bank_a = bank_a + self.row_a = row_a + self.max_a = col_a + row_a + bank_a + self.address_align = address_align + + def row(self, address): + split = self.bank_a + self.col_a + if isinstance(address, int): + return address >> split + else: + return address[split:self.max_a] + + def bank(self, address): + mask = 2**(self.bank_a + self.col_a) - 1 + shift = self.col_a + if isinstance(address, int): + return (address & mask) >> shift + else: + return address[self.col_a:self.col_a+self.bank_a] + + def col(self, address): + split = self.col_a + if isinstance(address, int): + return (address & (2**split - 1)) << self.address_align + else: + return Cat(Replicate(0, self.address_align), address[:split]) + +class Minicon(Module): + def __init__(self, phy, geom_settings, timing_settings): + if phy.settings.memtype in ["SDR"]: + burst_length = phy.settings.nphases*1 # command multiplication*SDR + elif phy.settings.memtype in ["DDR", "LPDDR", "DDR2", "DDR3"]: + burst_length = phy.settings.nphases*2 # command multiplication*DDR + address_align = log2_int(burst_length) + + nbanks = range(2**geom_settings.bank_a) + A10_ENABLED = 0 + COLUMN = 1 + ROW = 2 + rdphase = phy.settings.rdphase + wrphase = phy.settings.wrphase + + self.dfi = dfi = dfibus.Interface(geom_settings.mux_a, + geom_settings.bank_a, + phy.settings.dfi_d, + phy.settings.nphases) + + self.bus = bus = wishbone.Interface(data_width=phy.settings.nphases*flen(dfi.phases[rdphase].rddata)) + slicer = _AddressSlicer(geom_settings.col_a, geom_settings.bank_a, geom_settings.row_a, address_align) + refresh_req = Signal() + refresh_ack = Signal() + refresh_counter = Signal(max=timing_settings.tREFI+1) + hit = Signal() + row_open = Signal() + row_closeall = Signal() + addr_sel = Signal(max=3, reset=A10_ENABLED) + has_curbank_openrow = Signal() + + # Extra bit means row is active when asserted + self.openrow = openrow = Array(Signal(geom_settings.row_a + 1) for b in nbanks) + + self.comb += [ + hit.eq(openrow[slicer.bank(bus.adr)] == Cat(slicer.row(bus.adr), 1)), + has_curbank_openrow.eq(openrow[slicer.bank(bus.adr)][-1]), + bus.dat_r.eq(Cat(phase.rddata for phase in dfi.phases)), + Cat(phase.wrdata for phase in dfi.phases).eq(bus.dat_w), + Cat(phase.wrdata_mask for phase in dfi.phases).eq(~bus.sel), + ] + + for phase in dfi.phases: + self.comb += [ + phase.cke.eq(1), + phase.cs_n.eq(0), + phase.address.eq(Array([2**10, slicer.col(bus.adr), slicer.row(bus.adr)])[addr_sel]), + phase.bank.eq(slicer.bank(bus.adr)) + ] + + for b in nbanks: + self.sync += [ + If(row_open & (b == slicer.bank(bus.adr)), + openrow[b].eq(Cat(slicer.row(bus.adr), 1)), + ), + If(row_closeall, + openrow[b][-1].eq(0) + ) + ] + + self.sync += [ + If(refresh_ack, + refresh_req.eq(0) + ), + If(refresh_counter == 0, + refresh_counter.eq(timing_settings.tREFI), + refresh_req.eq(1) + ).Else( + refresh_counter.eq(refresh_counter - 1) + ) + ] + + fsm = FSM() + self.submodules += fsm + fsm.act("IDLE", + If(refresh_req, + NextState("PRECHARGEALL") + ).Elif(bus.stb & bus.cyc, + If(hit & bus.we, + NextState("WRITE") + ), + If(hit & ~bus.we, + NextState("READ") + ), + If(has_curbank_openrow & ~hit, + NextState("PRECHARGE") + ), + If(~has_curbank_openrow, + NextState("ACTIVATE") + ), + ) + ) + fsm.act("READ", + # We output Column bits at address pins so A10 is 0 + # to disable row Auto-Precharge + dfi.phases[rdphase].ras_n.eq(1), + dfi.phases[rdphase].cas_n.eq(0), + dfi.phases[rdphase].we_n.eq(1), + dfi.phases[rdphase].rddata_en.eq(1), + addr_sel.eq(COLUMN), + NextState("READ-WAIT-ACK"), + ) + fsm.act("READ-WAIT-ACK", + If(dfi.phases[rdphase].rddata_valid, + NextState("IDLE"), + bus.ack.eq(1) + ).Else( + NextState("READ-WAIT-ACK") + ) + ) + fsm.act("WRITE", + dfi.phases[wrphase].ras_n.eq(1), + dfi.phases[wrphase].cas_n.eq(0), + dfi.phases[wrphase].we_n.eq(0), + dfi.phases[wrphase].wrdata_en.eq(1), + addr_sel.eq(COLUMN), + bus.ack.eq(1), + NextState("IDLE") + ) + fsm.act("PRECHARGEALL", + row_closeall.eq(1), + dfi.phases[rdphase].ras_n.eq(0), + dfi.phases[rdphase].cas_n.eq(1), + dfi.phases[rdphase].we_n.eq(0), + addr_sel.eq(A10_ENABLED), + NextState("PRE-REFRESH") + ) + fsm.act("PRECHARGE", + # Notes: + # 1. we are presenting the column address so that A10 is low + # 2. since we always go to the ACTIVATE state, we do not need + # to assert row_close because it will be reopen right after. + NextState("TRP"), + addr_sel.eq(COLUMN), + dfi.phases[rdphase].ras_n.eq(0), + dfi.phases[rdphase].cas_n.eq(1), + dfi.phases[rdphase].we_n.eq(0) + ) + fsm.act("ACTIVATE", + row_open.eq(1), + NextState("TRCD"), + dfi.phases[rdphase].ras_n.eq(0), + dfi.phases[rdphase].cas_n.eq(1), + dfi.phases[rdphase].we_n.eq(1), + addr_sel.eq(ROW) + ) + fsm.act("REFRESH", + refresh_ack.eq(1), + dfi.phases[rdphase].ras_n.eq(0), + dfi.phases[rdphase].cas_n.eq(0), + dfi.phases[rdphase].we_n.eq(1), + NextState("POST-REFRESH") + ) + fsm.delayed_enter("TRP", "ACTIVATE", timing_settings.tRP-1) + fsm.delayed_enter("PRE-REFRESH", "REFRESH", timing_settings.tRP-1) + fsm.delayed_enter("TRCD", "IDLE", timing_settings.tRCD-1) + fsm.delayed_enter("POST-REFRESH", "IDLE", timing_settings.tRFC-1) diff --git a/misoclib/mem/sdram/lasmicon/__init__.py b/misoclib/mem/sdram/lasmicon/__init__.py deleted file mode 100644 index 94735d5b..00000000 --- a/misoclib/mem/sdram/lasmicon/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -from migen.fhdl.std import * - -from misoclib.mem.sdram.bus import dfi, lasmibus -from misoclib.mem.sdram.lasmicon.refresher import * -from misoclib.mem.sdram.lasmicon.bankmachine import * -from misoclib.mem.sdram.lasmicon.multiplexer import * - -class LASMIcon(Module): - def __init__(self, phy, geom_settings, timing_settings): - if phy.settings.memtype in ["SDR"]: - burst_length = phy.settings.nphases*1 # command multiplication*SDR - elif phy.settings.memtype in ["DDR", "LPDDR", "DDR2", "DDR3"]: - burst_length = phy.settings.nphases*2 # command multiplication*DDR - address_align = log2_int(burst_length) - - self.dfi = dfi.Interface(geom_settings.mux_a, - geom_settings.bank_a, - phy.settings.dfi_d, - phy.settings.nphases) - self.lasmic = lasmibus.Interface( - aw=geom_settings.row_a + geom_settings.col_a - address_align, - dw=phy.settings.dfi_d*phy.settings.nphases, - nbanks=2**geom_settings.bank_a, - req_queue_size=timing_settings.req_queue_size, - read_latency=phy.settings.read_latency+1, - write_latency=phy.settings.write_latency+1) - self.nrowbits = geom_settings.col_a - address_align - - ### - - self.submodules.refresher = Refresher(geom_settings.mux_a, geom_settings.bank_a, - timing_settings.tRP, timing_settings.tREFI, timing_settings.tRFC) - self.submodules.bank_machines = [BankMachine(geom_settings, timing_settings, address_align, i, - getattr(self.lasmic, "bank"+str(i))) - for i in range(2**geom_settings.bank_a)] - self.submodules.multiplexer = Multiplexer(phy, geom_settings, timing_settings, - self.bank_machines, self.refresher, - self.dfi, self.lasmic) - - def get_csrs(self): - return self.multiplexer.get_csrs() diff --git a/misoclib/mem/sdram/lasmicon/bankmachine.py b/misoclib/mem/sdram/lasmicon/bankmachine.py deleted file mode 100644 index 3ee6c083..00000000 --- a/misoclib/mem/sdram/lasmicon/bankmachine.py +++ /dev/null @@ -1,145 +0,0 @@ -from migen.fhdl.std import * -from migen.genlib.roundrobin import * -from migen.genlib.fsm import FSM, NextState -from migen.genlib.misc import optree -from migen.genlib.fifo import SyncFIFO - -from misoclib.mem.sdram.lasmicon.multiplexer import * - -class _AddressSlicer: - def __init__(self, col_a, address_align): - self.col_a = col_a - self.address_align = address_align - - def row(self, address): - split = self.col_a - self.address_align - if isinstance(address, int): - return address >> split - else: - return address[split:] - - def col(self, address): - split = self.col_a - self.address_align - if isinstance(address, int): - return (address & (2**split - 1)) << self.address_align - else: - return Cat(Replicate(0, self.address_align), address[:split]) - -class BankMachine(Module): - def __init__(self, geom_settings, timing_settings, address_align, bankn, req): - self.refresh_req = Signal() - self.refresh_gnt = Signal() - self.cmd = CommandRequestRW(geom_settings.mux_a, geom_settings.bank_a) - - ### - - # Request FIFO - self.submodules.req_fifo = SyncFIFO([("we", 1), ("adr", flen(req.adr))], timing_settings.req_queue_size) - self.comb += [ - self.req_fifo.din.we.eq(req.we), - self.req_fifo.din.adr.eq(req.adr), - self.req_fifo.we.eq(req.stb), - req.req_ack.eq(self.req_fifo.writable), - - self.req_fifo.re.eq(req.dat_w_ack | req.dat_r_ack), - req.lock.eq(self.req_fifo.readable) - ] - reqf = self.req_fifo.dout - - slicer = _AddressSlicer(geom_settings.col_a, address_align) - - # Row tracking - has_openrow = Signal() - openrow = Signal(geom_settings.row_a) - hit = Signal() - self.comb += hit.eq(openrow == slicer.row(reqf.adr)) - track_open = Signal() - track_close = Signal() - self.sync += [ - If(track_open, - has_openrow.eq(1), - openrow.eq(slicer.row(reqf.adr)) - ), - If(track_close, - has_openrow.eq(0) - ) - ] - - # Address generation - s_row_adr = Signal() - self.comb += [ - self.cmd.ba.eq(bankn), - If(s_row_adr, - self.cmd.a.eq(slicer.row(reqf.adr)) - ).Else( - self.cmd.a.eq(slicer.col(reqf.adr)) - ) - ] - - # Respect write-to-precharge specification - precharge_ok = Signal() - t_unsafe_precharge = 2 + timing_settings.tWR - 1 - unsafe_precharge_count = Signal(max=t_unsafe_precharge+1) - self.comb += precharge_ok.eq(unsafe_precharge_count == 0) - self.sync += [ - If(self.cmd.stb & self.cmd.ack & self.cmd.is_write, - unsafe_precharge_count.eq(t_unsafe_precharge) - ).Elif(~precharge_ok, - unsafe_precharge_count.eq(unsafe_precharge_count-1) - ) - ] - - # Control and command generation FSM - fsm = FSM() - self.submodules += fsm - fsm.act("REGULAR", - If(self.refresh_req, - NextState("REFRESH") - ).Elif(self.req_fifo.readable, - If(has_openrow, - If(hit, - # NB: write-to-read specification is enforced by multiplexer - self.cmd.stb.eq(1), - req.dat_w_ack.eq(self.cmd.ack & reqf.we), - req.dat_r_ack.eq(self.cmd.ack & ~reqf.we), - self.cmd.is_read.eq(~reqf.we), - self.cmd.is_write.eq(reqf.we), - self.cmd.cas_n.eq(0), - self.cmd.we_n.eq(~reqf.we) - ).Else( - NextState("PRECHARGE") - ) - ).Else( - NextState("ACTIVATE") - ) - ) - ) - fsm.act("PRECHARGE", - # Notes: - # 1. we are presenting the column address, A10 is always low - # 2. since we always go to the ACTIVATE state, we do not need - # to assert track_close. - If(precharge_ok, - self.cmd.stb.eq(1), - If(self.cmd.ack, NextState("TRP")), - self.cmd.ras_n.eq(0), - self.cmd.we_n.eq(0), - self.cmd.is_cmd.eq(1) - ) - ) - fsm.act("ACTIVATE", - s_row_adr.eq(1), - track_open.eq(1), - self.cmd.stb.eq(1), - self.cmd.is_cmd.eq(1), - If(self.cmd.ack, NextState("TRCD")), - self.cmd.ras_n.eq(0) - ) - fsm.act("REFRESH", - self.refresh_gnt.eq(precharge_ok), - track_close.eq(1), - self.cmd.is_cmd.eq(1), - If(~self.refresh_req, NextState("REGULAR")) - ) - fsm.delayed_enter("TRP", "ACTIVATE", timing_settings.tRP-1) - fsm.delayed_enter("TRCD", "REGULAR", timing_settings.tRCD-1) diff --git a/misoclib/mem/sdram/lasmicon/crossbar.py b/misoclib/mem/sdram/lasmicon/crossbar.py deleted file mode 100644 index e4414900..00000000 --- a/misoclib/mem/sdram/lasmicon/crossbar.py +++ /dev/null @@ -1,176 +0,0 @@ -from migen.fhdl.std import * -from migen.genlib import roundrobin -from migen.genlib.record import * -from migen.genlib.misc import optree - -from misoclib.mem.sdram.bus.lasmibus import Interface - -def _getattr_all(l, attr): - it = iter(l) - r = getattr(next(it), attr) - for e in it: - if getattr(e, attr) != r: - raise ValueError - return r - -class Crossbar(Module): - def __init__(self, controllers, cba_shift): - self._controllers = controllers - self._cba_shift = cba_shift - - self._rca_bits = _getattr_all(controllers, "aw") - self._dw = _getattr_all(controllers, "dw") - self._nbanks = _getattr_all(controllers, "nbanks") - self._req_queue_size = _getattr_all(controllers, "req_queue_size") - self._read_latency = _getattr_all(controllers, "read_latency") - self._write_latency = _getattr_all(controllers, "write_latency") - - self._bank_bits = log2_int(self._nbanks, False) - self._controller_bits = log2_int(len(self._controllers), False) - - self._masters = [] - - def get_master(self): - if self.finalized: - raise FinalizeError - lasmi_master = Interface(self._rca_bits + self._bank_bits + self._controller_bits, - self._dw, 1, self._req_queue_size, self._read_latency, self._write_latency) - self._masters.append(lasmi_master) - return lasmi_master - - def do_finalize(self): - nmasters = len(self._masters) - - m_ca, m_ba, m_rca = self._split_master_addresses(self._controller_bits, - self._bank_bits, self._rca_bits, self._cba_shift) - - for nc, controller in enumerate(self._controllers): - if self._controller_bits: - controller_selected = [ca == nc for ca in m_ca] - else: - controller_selected = [1]*nmasters - master_req_acks = [0]*nmasters - master_dat_w_acks = [0]*nmasters - master_dat_r_acks = [0]*nmasters - - rrs = [roundrobin.RoundRobin(nmasters, roundrobin.SP_CE) for n in range(self._nbanks)] - self.submodules += rrs - for nb, rr in enumerate(rrs): - bank = getattr(controller, "bank"+str(nb)) - - # for each master, determine if another bank locks it - master_locked = [] - for nm, master in enumerate(self._masters): - locked = 0 - for other_nb, other_rr in enumerate(rrs): - if other_nb != nb: - other_bank = getattr(controller, "bank"+str(other_nb)) - locked = locked | (other_bank.lock & (other_rr.grant == nm)) - master_locked.append(locked) - - # arbitrate - bank_selected = [cs & (ba == nb) & ~locked for cs, ba, locked in zip(controller_selected, m_ba, master_locked)] - bank_requested = [bs & master.stb for bs, master in zip(bank_selected, self._masters)] - self.comb += [ - rr.request.eq(Cat(*bank_requested)), - rr.ce.eq(~bank.stb & ~bank.lock) - ] - - # route requests - self.comb += [ - bank.adr.eq(Array(m_rca)[rr.grant]), - bank.we.eq(Array(self._masters)[rr.grant].we), - bank.stb.eq(Array(bank_requested)[rr.grant]) - ] - master_req_acks = [master_req_ack | ((rr.grant == nm) & bank_selected[nm] & bank.req_ack) - for nm, master_req_ack in enumerate(master_req_acks)] - master_dat_w_acks = [master_dat_w_ack | ((rr.grant == nm) & bank.dat_w_ack) - for nm, master_dat_w_ack in enumerate(master_dat_w_acks)] - master_dat_r_acks = [master_dat_r_ack | ((rr.grant == nm) & bank.dat_r_ack) - for nm, master_dat_r_ack in enumerate(master_dat_r_acks)] - - for nm, master_dat_w_ack in enumerate(master_dat_w_acks): - for i in range(self._write_latency): - new_master_dat_w_ack = Signal() - self.sync += new_master_dat_w_ack.eq(master_dat_w_ack) - master_dat_w_ack = new_master_dat_w_ack - master_dat_w_acks[nm] = master_dat_w_ack - - for nm, master_dat_r_ack in enumerate(master_dat_r_acks): - for i in range(self._read_latency): - new_master_dat_r_ack = Signal() - self.sync += new_master_dat_r_ack.eq(master_dat_r_ack) - master_dat_r_ack = new_master_dat_r_ack - master_dat_r_acks[nm] = master_dat_r_ack - - self.comb += [master.req_ack.eq(master_req_ack) for master, master_req_ack in zip(self._masters, master_req_acks)] - self.comb += [master.dat_w_ack.eq(master_dat_w_ack) for master, master_dat_w_ack in zip(self._masters, master_dat_w_acks)] - self.comb += [master.dat_r_ack.eq(master_dat_r_ack) for master, master_dat_r_ack in zip(self._masters, master_dat_r_acks)] - - # route data writes - controller_selected_wl = controller_selected - for i in range(self._write_latency): - n_controller_selected_wl = [Signal() for i in range(nmasters)] - self.sync += [n.eq(o) for n, o in zip(n_controller_selected_wl, controller_selected_wl)] - controller_selected_wl = n_controller_selected_wl - dat_w_maskselect = [] - dat_we_maskselect = [] - for master, selected in zip(self._masters, controller_selected_wl): - o_dat_w = Signal(self._dw) - o_dat_we = Signal(self._dw//8) - self.comb += If(selected, - o_dat_w.eq(master.dat_w), - o_dat_we.eq(master.dat_we) - ) - dat_w_maskselect.append(o_dat_w) - dat_we_maskselect.append(o_dat_we) - self.comb += [ - controller.dat_w.eq(optree("|", dat_w_maskselect)), - controller.dat_we.eq(optree("|", dat_we_maskselect)) - ] - - # route data reads - if self._controller_bits: - for master in self._masters: - controller_sel = Signal(self._controller_bits) - for nc, controller in enumerate(self._controllers): - for nb in range(nbanks): - bank = getattr(controller, "bank"+str(nb)) - self.comb += If(bank.stb & bank.ack, controller_sel.eq(nc)) - for i in range(self._read_latency): - n_controller_sel = Signal(self._controller_bits) - self.sync += n_controller_sel.eq(controller_sel) - controller_sel = n_controller_sel - self.comb += master.dat_r.eq(Array(self._controllers)[controller_sel].dat_r) - else: - self.comb += [master.dat_r.eq(self._controllers[0].dat_r) for master in self._masters] - - def _split_master_addresses(self, controller_bits, bank_bits, rca_bits, cba_shift): - m_ca = [] # controller address - m_ba = [] # bank address - m_rca = [] # row and column address - for master in self._masters: - cba = Signal(self._controller_bits + self._bank_bits) - rca = Signal(self._rca_bits) - cba_upper = cba_shift + controller_bits + bank_bits - self.comb += cba.eq(master.adr[cba_shift:cba_upper]) - if cba_shift < self._rca_bits: - if cba_shift: - self.comb += rca.eq(Cat(master.adr[:cba_shift], master.adr[cba_upper:])) - else: - self.comb += rca.eq(master.adr[cba_upper:]) - else: - self.comb += rca.eq(master.adr[:cba_shift]) - - if self._controller_bits: - ca = Signal(self._controller_bits) - ba = Signal(self._bank_bits) - self.comb += Cat(ba, ca).eq(cba) - else: - ca = None - ba = cba - - m_ca.append(ca) - m_ba.append(ba) - m_rca.append(rca) - return m_ca, m_ba, m_rca diff --git a/misoclib/mem/sdram/lasmicon/multiplexer.py b/misoclib/mem/sdram/lasmicon/multiplexer.py deleted file mode 100644 index 3f977efb..00000000 --- a/misoclib/mem/sdram/lasmicon/multiplexer.py +++ /dev/null @@ -1,214 +0,0 @@ -from migen.fhdl.std import * -from migen.genlib.roundrobin import * -from migen.genlib.misc import optree -from migen.genlib.fsm import FSM, NextState -from migen.bank.description import AutoCSR - -from misoclib.mem.sdram.lasmicon.perf import Bandwidth - -class CommandRequest: - def __init__(self, a, ba): - self.a = Signal(a) - self.ba = Signal(ba) - self.cas_n = Signal(reset=1) - self.ras_n = Signal(reset=1) - self.we_n = Signal(reset=1) - -class CommandRequestRW(CommandRequest): - def __init__(self, a, ba): - CommandRequest.__init__(self, a, ba) - self.stb = Signal() - self.ack = Signal() - self.is_cmd = Signal() - self.is_read = Signal() - self.is_write = Signal() - -class _CommandChooser(Module): - def __init__(self, requests): - self.want_reads = Signal() - self.want_writes = Signal() - self.want_cmds = Signal() - # NB: cas_n/ras_n/we_n are 1 when stb is inactive - self.cmd = CommandRequestRW(flen(requests[0].a), flen(requests[0].ba)) - - ### - - rr = RoundRobin(len(requests), SP_CE) - self.submodules += rr - - self.comb += [rr.request[i].eq(req.stb & ((req.is_cmd & self.want_cmds) | ((req.is_read == self.want_reads) | (req.is_write == self.want_writes)))) - for i, req in enumerate(requests)] - - stb = Signal() - self.comb += stb.eq(Array(req.stb for req in requests)[rr.grant]) - for name in ["a", "ba", "is_read", "is_write", "is_cmd"]: - choices = Array(getattr(req, name) for req in requests) - self.comb += getattr(self.cmd, name).eq(choices[rr.grant]) - for name in ["cas_n", "ras_n", "we_n"]: - # we should only assert those signals when stb is 1 - choices = Array(getattr(req, name) for req in requests) - self.comb += If(self.cmd.stb, getattr(self.cmd, name).eq(choices[rr.grant])) - self.comb += self.cmd.stb.eq(stb \ - & ((self.cmd.is_cmd & self.want_cmds) | ((self.cmd.is_read == self.want_reads) \ - & (self.cmd.is_write == self.want_writes)))) - - self.comb += [If(self.cmd.stb & self.cmd.ack & (rr.grant == i), req.ack.eq(1)) - for i, req in enumerate(requests)] - self.comb += rr.ce.eq(self.cmd.ack) - -class _Steerer(Module): - def __init__(self, commands, dfi): - ncmd = len(commands) - nph = len(dfi.phases) - self.sel = [Signal(max=ncmd) for i in range(nph)] - - ### - - def stb_and(cmd, attr): - if not hasattr(cmd, "stb"): - return 0 - else: - return cmd.stb & getattr(cmd, attr) - for phase, sel in zip(dfi.phases, self.sel): - self.comb += [ - phase.cke.eq(1), - phase.cs_n.eq(0) - ] - if hasattr(phase, "odt"): - self.comb += phase.odt.eq(1) - if hasattr(phase, "reset_n"): - self.comb += phase.reset_n.eq(1) - self.sync += [ - phase.address.eq(Array(cmd.a for cmd in commands)[sel]), - phase.bank.eq(Array(cmd.ba for cmd in commands)[sel]), - phase.cas_n.eq(Array(cmd.cas_n for cmd in commands)[sel]), - phase.ras_n.eq(Array(cmd.ras_n for cmd in commands)[sel]), - phase.we_n.eq(Array(cmd.we_n for cmd in commands)[sel]), - phase.rddata_en.eq(Array(stb_and(cmd, "is_read") for cmd in commands)[sel]), - phase.wrdata_en.eq(Array(stb_and(cmd, "is_write") for cmd in commands)[sel]) - ] - -class Multiplexer(Module, AutoCSR): - def __init__(self, phy, geom_settings, timing_settings, bank_machines, refresher, dfi, lasmic): - assert(phy.settings.nphases == len(dfi.phases)) - - # Command choosing - requests = [bm.cmd for bm in bank_machines] - choose_cmd = _CommandChooser(requests) - choose_req = _CommandChooser(requests) - self.comb += [ - choose_cmd.want_reads.eq(0), - choose_cmd.want_writes.eq(0) - ] - if phy.settings.nphases == 1: - self.comb += [ - choose_cmd.want_cmds.eq(1), - choose_req.want_cmds.eq(1) - ] - self.submodules += choose_cmd, choose_req - - # Command steering - nop = CommandRequest(geom_settings.mux_a, geom_settings.bank_a) - commands = [nop, choose_cmd.cmd, choose_req.cmd, refresher.cmd] # nop must be 1st - (STEER_NOP, STEER_CMD, STEER_REQ, STEER_REFRESH) = range(4) - steerer = _Steerer(commands, dfi) - self.submodules += steerer - - # Read/write turnaround - read_available = Signal() - write_available = Signal() - self.comb += [ - read_available.eq(optree("|", [req.stb & req.is_read for req in requests])), - write_available.eq(optree("|", [req.stb & req.is_write for req in requests])) - ] - - def anti_starvation(timeout): - en = Signal() - max_time = Signal() - if timeout: - t = timeout - 1 - time = Signal(max=t+1) - self.comb += max_time.eq(time == 0) - self.sync += If(~en, - time.eq(t) - ).Elif(~max_time, - time.eq(time - 1) - ) - else: - self.comb += max_time.eq(0) - return en, max_time - read_time_en, max_read_time = anti_starvation(timing_settings.read_time) - write_time_en, max_write_time = anti_starvation(timing_settings.write_time) - - # Refresh - self.comb += [bm.refresh_req.eq(refresher.req) for bm in bank_machines] - go_to_refresh = Signal() - self.comb += go_to_refresh.eq(optree("&", [bm.refresh_gnt for bm in bank_machines])) - - # Datapath - all_rddata = [p.rddata for p in dfi.phases] - all_wrdata = [p.wrdata for p in dfi.phases] - all_wrdata_mask = [p.wrdata_mask for p in dfi.phases] - self.comb += [ - lasmic.dat_r.eq(Cat(*all_rddata)), - Cat(*all_wrdata).eq(lasmic.dat_w), - Cat(*all_wrdata_mask).eq(~lasmic.dat_we) - ] - - # Control FSM - fsm = FSM() - self.submodules += fsm - - def steerer_sel(steerer, phy, r_w_n): - r = [] - for i in range(phy.settings.nphases): - s = steerer.sel[i].eq(STEER_NOP) - if r_w_n == "read": - if i == phy.settings.rdphase: - s = steerer.sel[i].eq(STEER_REQ) - elif i == phy.settings.rdcmdphase: - s = steerer.sel[i].eq(STEER_CMD) - elif r_w_n == "write": - if i == phy.settings.wrphase: - s = steerer.sel[i].eq(STEER_REQ) - elif i == phy.settings.wrcmdphase: - s = steerer.sel[i].eq(STEER_CMD) - else: - raise ValueError - r.append(s) - return r - - fsm.act("READ", - read_time_en.eq(1), - choose_req.want_reads.eq(1), - choose_cmd.cmd.ack.eq(1), - choose_req.cmd.ack.eq(1), - steerer_sel(steerer, phy, "read"), - If(write_available, - # TODO: switch only after several cycles of ~read_available? - If(~read_available | max_read_time, NextState("RTW")) - ), - If(go_to_refresh, NextState("REFRESH")) - ) - fsm.act("WRITE", - write_time_en.eq(1), - choose_req.want_writes.eq(1), - choose_cmd.cmd.ack.eq(1), - choose_req.cmd.ack.eq(1), - steerer_sel(steerer, phy, "write"), - If(read_available, - If(~write_available | max_write_time, NextState("WTR")) - ), - If(go_to_refresh, NextState("REFRESH")) - ) - fsm.act("REFRESH", - steerer.sel[0].eq(STEER_REFRESH), - If(~refresher.req, NextState("READ")) - ) - fsm.delayed_enter("RTW", "WRITE", phy.settings.read_latency-1) # FIXME: reduce this, actual limit is around (cl+1)/nphases - fsm.delayed_enter("WTR", "READ", timing_settings.tWTR-1) - # FIXME: workaround for zero-delay loop simulation problem with Icarus Verilog - fsm.finalize() - self.comb += refresher.ack.eq(fsm.state == fsm.encoding["REFRESH"]) - - self.submodules.bandwidth = Bandwidth(choose_req.cmd) diff --git a/misoclib/mem/sdram/lasmicon/perf.py b/misoclib/mem/sdram/lasmicon/perf.py deleted file mode 100644 index 910d831a..00000000 --- a/misoclib/mem/sdram/lasmicon/perf.py +++ /dev/null @@ -1,44 +0,0 @@ -from migen.fhdl.std import * -from migen.bank.description import * - -class Bandwidth(Module, AutoCSR): - def __init__(self, cmd, period_bits=24): - self._update = CSR() - self._nreads = CSRStatus(period_bits) - self._nwrites = CSRStatus(period_bits) - - ### - - cmd_stb = Signal() - cmd_ack = Signal() - cmd_is_read = Signal() - cmd_is_write = Signal() - self.sync += [ - cmd_stb.eq(cmd.stb), - cmd_ack.eq(cmd.ack), - cmd_is_read.eq(cmd.is_read), - cmd_is_write.eq(cmd.is_write) - ] - - counter = Signal(period_bits) - period = Signal() - nreads = Signal(period_bits) - nwrites = Signal(period_bits) - nreads_r = Signal(period_bits) - nwrites_r = Signal(period_bits) - self.sync += [ - Cat(counter, period).eq(counter + 1), - If(period, - nreads_r.eq(nreads), - nwrites_r.eq(nwrites), - nreads.eq(0), - nwrites.eq(0) - ).Elif(cmd_stb & cmd_ack, - If(cmd_is_read, nreads.eq(nreads + 1)), - If(cmd_is_write, nwrites.eq(nwrites + 1)), - ), - If(self._update.re, - self._nreads.status.eq(nreads_r), - self._nwrites.status.eq(nwrites_r) - ) - ] diff --git a/misoclib/mem/sdram/lasmicon/refresher.py b/misoclib/mem/sdram/lasmicon/refresher.py deleted file mode 100644 index 6313b2a1..00000000 --- a/misoclib/mem/sdram/lasmicon/refresher.py +++ /dev/null @@ -1,68 +0,0 @@ -from migen.fhdl.std import * -from migen.genlib.misc import timeline -from migen.genlib.fsm import FSM - -from misoclib.mem.sdram.lasmicon.multiplexer import * - -class Refresher(Module): - def __init__(self, a, ba, tRP, tREFI, tRFC): - self.req = Signal() - self.ack = Signal() # 1st command 1 cycle after assertion of ack - self.cmd = CommandRequest(a, ba) - - ### - - # Refresh sequence generator: - # PRECHARGE ALL --(tRP)--> AUTO REFRESH --(tRFC)--> done - seq_start = Signal() - seq_done = Signal() - self.sync += [ - self.cmd.a.eq(2**10), - self.cmd.ba.eq(0), - self.cmd.cas_n.eq(1), - self.cmd.ras_n.eq(1), - self.cmd.we_n.eq(1), - seq_done.eq(0) - ] - self.sync += timeline(seq_start, [ - (1, [ - self.cmd.ras_n.eq(0), - self.cmd.we_n.eq(0) - ]), - (1+tRP, [ - self.cmd.cas_n.eq(0), - self.cmd.ras_n.eq(0) - ]), - (1+tRP+tRFC, [ - seq_done.eq(1) - ]) - ]) - - # Periodic refresh counter - counter = Signal(max=tREFI) - start = Signal() - self.sync += [ - start.eq(0), - If(counter == 0, - start.eq(1), - counter.eq(tREFI - 1) - ).Else( - counter.eq(counter - 1) - ) - ] - - # Control FSM - fsm = FSM() - self.submodules += fsm - fsm.act("IDLE", If(start, NextState("WAIT_GRANT"))) - fsm.act("WAIT_GRANT", - self.req.eq(1), - If(self.ack, - seq_start.eq(1), - NextState("WAIT_SEQ") - ) - ) - fsm.act("WAIT_SEQ", - self.req.eq(1), - If(seq_done, NextState("IDLE")) - ) diff --git a/misoclib/mem/sdram/minicon/__init__.py b/misoclib/mem/sdram/minicon/__init__.py deleted file mode 100644 index c03f93d4..00000000 --- a/misoclib/mem/sdram/minicon/__init__.py +++ /dev/null @@ -1,193 +0,0 @@ -from migen.fhdl.std import * -from migen.bus import wishbone -from migen.genlib.fsm import FSM, NextState - -from misoclib.mem.sdram.bus import dfi as dfibus - -class _AddressSlicer: - def __init__(self, col_a, bank_a, row_a, address_align): - self.col_a = col_a - self.bank_a = bank_a - self.row_a = row_a - self.max_a = col_a + row_a + bank_a - self.address_align = address_align - - def row(self, address): - split = self.bank_a + self.col_a - if isinstance(address, int): - return address >> split - else: - return address[split:self.max_a] - - def bank(self, address): - mask = 2**(self.bank_a + self.col_a) - 1 - shift = self.col_a - if isinstance(address, int): - return (address & mask) >> shift - else: - return address[self.col_a:self.col_a+self.bank_a] - - def col(self, address): - split = self.col_a - if isinstance(address, int): - return (address & (2**split - 1)) << self.address_align - else: - return Cat(Replicate(0, self.address_align), address[:split]) - -class Minicon(Module): - def __init__(self, phy, geom_settings, timing_settings): - if phy.settings.memtype in ["SDR"]: - burst_length = phy.settings.nphases*1 # command multiplication*SDR - elif phy.settings.memtype in ["DDR", "LPDDR", "DDR2", "DDR3"]: - burst_length = phy.settings.nphases*2 # command multiplication*DDR - address_align = log2_int(burst_length) - - nbanks = range(2**geom_settings.bank_a) - A10_ENABLED = 0 - COLUMN = 1 - ROW = 2 - rdphase = phy.settings.rdphase - wrphase = phy.settings.wrphase - - self.dfi = dfi = dfibus.Interface(geom_settings.mux_a, - geom_settings.bank_a, - phy.settings.dfi_d, - phy.settings.nphases) - - self.bus = bus = wishbone.Interface(data_width=phy.settings.nphases*flen(dfi.phases[rdphase].rddata)) - slicer = _AddressSlicer(geom_settings.col_a, geom_settings.bank_a, geom_settings.row_a, address_align) - refresh_req = Signal() - refresh_ack = Signal() - refresh_counter = Signal(max=timing_settings.tREFI+1) - hit = Signal() - row_open = Signal() - row_closeall = Signal() - addr_sel = Signal(max=3, reset=A10_ENABLED) - has_curbank_openrow = Signal() - - # Extra bit means row is active when asserted - self.openrow = openrow = Array(Signal(geom_settings.row_a + 1) for b in nbanks) - - self.comb += [ - hit.eq(openrow[slicer.bank(bus.adr)] == Cat(slicer.row(bus.adr), 1)), - has_curbank_openrow.eq(openrow[slicer.bank(bus.adr)][-1]), - bus.dat_r.eq(Cat(phase.rddata for phase in dfi.phases)), - Cat(phase.wrdata for phase in dfi.phases).eq(bus.dat_w), - Cat(phase.wrdata_mask for phase in dfi.phases).eq(~bus.sel), - ] - - for phase in dfi.phases: - self.comb += [ - phase.cke.eq(1), - phase.cs_n.eq(0), - phase.address.eq(Array([2**10, slicer.col(bus.adr), slicer.row(bus.adr)])[addr_sel]), - phase.bank.eq(slicer.bank(bus.adr)) - ] - - for b in nbanks: - self.sync += [ - If(row_open & (b == slicer.bank(bus.adr)), - openrow[b].eq(Cat(slicer.row(bus.adr), 1)), - ), - If(row_closeall, - openrow[b][-1].eq(0) - ) - ] - - self.sync += [ - If(refresh_ack, - refresh_req.eq(0) - ), - If(refresh_counter == 0, - refresh_counter.eq(timing_settings.tREFI), - refresh_req.eq(1) - ).Else( - refresh_counter.eq(refresh_counter - 1) - ) - ] - - fsm = FSM() - self.submodules += fsm - fsm.act("IDLE", - If(refresh_req, - NextState("PRECHARGEALL") - ).Elif(bus.stb & bus.cyc, - If(hit & bus.we, - NextState("WRITE") - ), - If(hit & ~bus.we, - NextState("READ") - ), - If(has_curbank_openrow & ~hit, - NextState("PRECHARGE") - ), - If(~has_curbank_openrow, - NextState("ACTIVATE") - ), - ) - ) - fsm.act("READ", - # We output Column bits at address pins so A10 is 0 - # to disable row Auto-Precharge - dfi.phases[rdphase].ras_n.eq(1), - dfi.phases[rdphase].cas_n.eq(0), - dfi.phases[rdphase].we_n.eq(1), - dfi.phases[rdphase].rddata_en.eq(1), - addr_sel.eq(COLUMN), - NextState("READ-WAIT-ACK"), - ) - fsm.act("READ-WAIT-ACK", - If(dfi.phases[rdphase].rddata_valid, - NextState("IDLE"), - bus.ack.eq(1) - ).Else( - NextState("READ-WAIT-ACK") - ) - ) - fsm.act("WRITE", - dfi.phases[wrphase].ras_n.eq(1), - dfi.phases[wrphase].cas_n.eq(0), - dfi.phases[wrphase].we_n.eq(0), - dfi.phases[wrphase].wrdata_en.eq(1), - addr_sel.eq(COLUMN), - bus.ack.eq(1), - NextState("IDLE") - ) - fsm.act("PRECHARGEALL", - row_closeall.eq(1), - dfi.phases[rdphase].ras_n.eq(0), - dfi.phases[rdphase].cas_n.eq(1), - dfi.phases[rdphase].we_n.eq(0), - addr_sel.eq(A10_ENABLED), - NextState("PRE-REFRESH") - ) - fsm.act("PRECHARGE", - # Notes: - # 1. we are presenting the column address so that A10 is low - # 2. since we always go to the ACTIVATE state, we do not need - # to assert row_close because it will be reopen right after. - NextState("TRP"), - addr_sel.eq(COLUMN), - dfi.phases[rdphase].ras_n.eq(0), - dfi.phases[rdphase].cas_n.eq(1), - dfi.phases[rdphase].we_n.eq(0) - ) - fsm.act("ACTIVATE", - row_open.eq(1), - NextState("TRCD"), - dfi.phases[rdphase].ras_n.eq(0), - dfi.phases[rdphase].cas_n.eq(1), - dfi.phases[rdphase].we_n.eq(1), - addr_sel.eq(ROW) - ) - fsm.act("REFRESH", - refresh_ack.eq(1), - dfi.phases[rdphase].ras_n.eq(0), - dfi.phases[rdphase].cas_n.eq(0), - dfi.phases[rdphase].we_n.eq(1), - NextState("POST-REFRESH") - ) - fsm.delayed_enter("TRP", "ACTIVATE", timing_settings.tRP-1) - fsm.delayed_enter("PRE-REFRESH", "REFRESH", timing_settings.tRP-1) - fsm.delayed_enter("TRCD", "IDLE", timing_settings.tRCD-1) - fsm.delayed_enter("POST-REFRESH", "IDLE", timing_settings.tRFC-1) diff --git a/misoclib/mem/sdram/test/bankmachine_tb.py b/misoclib/mem/sdram/test/bankmachine_tb.py index 12bdcd20..12ae5da0 100644 --- a/misoclib/mem/sdram/test/bankmachine_tb.py +++ b/misoclib/mem/sdram/test/bankmachine_tb.py @@ -2,7 +2,7 @@ from migen.fhdl.std import * from migen.sim.generic import run_simulation from misoclib.mem.sdram.bus import lasmibus -from misoclib.mem.sdram.lasmicon.bankmachine import * +from misoclib.mem.sdram.core.lasmicon.bankmachine import * from common import sdram_phy, sdram_geom, sdram_timing, CommandLogger diff --git a/misoclib/mem/sdram/test/lasmicon_df_tb.py b/misoclib/mem/sdram/test/lasmicon_df_tb.py index 7f2f886b..e06722f0 100644 --- a/misoclib/mem/sdram/test/lasmicon_df_tb.py +++ b/misoclib/mem/sdram/test/lasmicon_df_tb.py @@ -2,7 +2,7 @@ from migen.fhdl.std import * from migen.sim.generic import run_simulation from misoclib.mem.sdram.bus import lasmibus -from misoclib.mem.sdram.lasmicon import * +from misoclib.mem.sdram.core.lasmicon import * from misoclib.mem.sdram.frontend import dma_lasmi from common import sdram_phy, sdram_geom, sdram_timing, DFILogger diff --git a/misoclib/mem/sdram/test/lasmicon_tb.py b/misoclib/mem/sdram/test/lasmicon_tb.py index af792bbf..a102b07c 100644 --- a/misoclib/mem/sdram/test/lasmicon_tb.py +++ b/misoclib/mem/sdram/test/lasmicon_tb.py @@ -2,7 +2,7 @@ from migen.fhdl.std import * from migen.sim.generic import run_simulation from misoclib.mem.sdram.bus import lasmibus -from misoclib.mem.sdram.lasmicon import * +from misoclib.mem.sdram.core.lasmicon import * from common import sdram_phy, sdram_geom, sdram_timing, DFILogger diff --git a/misoclib/mem/sdram/test/lasmicon_wb.py b/misoclib/mem/sdram/test/lasmicon_wb.py index 53fc7b87..978655dc 100644 --- a/misoclib/mem/sdram/test/lasmicon_wb.py +++ b/misoclib/mem/sdram/test/lasmicon_wb.py @@ -4,7 +4,7 @@ from migen.bus.transactions import * from migen.sim.generic import run_simulation from misoclib.mem.sdram.bus import lasmibus -from misoclib.mem.sdram.lasmicon import * +from misoclib.mem.sdram.core.lasmicon import * from misoclib.mem.sdram.frontend import wishbone2lasmi from common import sdram_phy, sdram_geom, sdram_timing, DFILogger diff --git a/misoclib/mem/sdram/test/minicon_tb.py b/misoclib/mem/sdram/test/minicon_tb.py index abfb5e03..d309e669 100644 --- a/misoclib/mem/sdram/test/minicon_tb.py +++ b/misoclib/mem/sdram/test/minicon_tb.py @@ -5,7 +5,7 @@ from migen.sim.generic import Simulator from migen.sim import icarus from mibuild.platforms import papilio_pro as board from misoclib import sdram -from misoclib.mem.sdram.minicon import Minicon +from misoclib.mem.sdram.core.minicon import Minicon from misoclib.mem.sdram.phy import gensdrphy from itertools import chain from os.path import isfile diff --git a/misoclib/mem/sdram/test/refresher.py b/misoclib/mem/sdram/test/refresher.py index 7845bf90..00caa497 100644 --- a/misoclib/mem/sdram/test/refresher.py +++ b/misoclib/mem/sdram/test/refresher.py @@ -3,7 +3,7 @@ from random import Random from migen.fhdl.std import * from migen.sim.generic import run_simulation -from misoclib.mem.sdram.lasmicon.refresher import * +from misoclib.mem.sdram.core.lasmicon.refresher import * from common import CommandLogger diff --git a/misoclib/soc/sdram.py b/misoclib/soc/sdram.py index 5f49af59..62c9b70c 100644 --- a/misoclib/soc/sdram.py +++ b/misoclib/soc/sdram.py @@ -3,8 +3,8 @@ from migen.bus import wishbone, csr from misoclib.mem.sdram.bus import dfi, lasmibus from misoclib.mem.sdram.phy import dfii -from misoclib.mem.sdram import minicon, lasmicon -from misoclib.mem.sdram.lasmicon import crossbar +from misoclib.mem.sdram.core import minicon, lasmicon +from misoclib.mem.sdram.core.lasmicon import crossbar from misoclib.mem.sdram.frontend import memtest, wishbone2lasmi from misoclib.soc import SoC, mem_decoder