Initial commit
authorJean THOMAS <git0@pub.jeanthomas.me>
Wed, 3 Jun 2020 18:58:22 +0000 (20:58 +0200)
committerJean THOMAS <git0@pub.jeanthomas.me>
Wed, 3 Jun 2020 18:58:22 +0000 (20:58 +0200)
78 files changed:
.gitignore [new file with mode: 0644]
.sim-test.py [new file with mode: 0755]
.travis.yml [new file with mode: 0644]
CONTRIBUTORS [new file with mode: 0644]
LICENSE [new file with mode: 0644]
README.md [new file with mode: 0644]
examples/customecpix5.py [new file with mode: 0644]
examples/ecpix5.py [new file with mode: 0644]
gram/__init__.py [new file with mode: 0644]
gram/common.py [new file with mode: 0644]
gram/compat.py [new file with mode: 0644]
gram/core/__init__.py [new file with mode: 0644]
gram/core/bandwidth.py [new file with mode: 0644]
gram/core/bankmachine.py [new file with mode: 0644]
gram/core/controller.py [new file with mode: 0644]
gram/core/crossbar.py [new file with mode: 0644]
gram/core/multiplexer.py [new file with mode: 0644]
gram/core/refresher.py [new file with mode: 0644]
gram/dfii.py [new file with mode: 0644]
gram/frontend/__init__.py [new file with mode: 0644]
gram/frontend/adaptation.py [new file with mode: 0644]
gram/frontend/axi.py [new file with mode: 0644]
gram/frontend/bist.py [new file with mode: 0644]
gram/frontend/dma.py [new file with mode: 0644]
gram/frontend/ecc.py [new file with mode: 0644]
gram/frontend/fifo.py [new file with mode: 0644]
gram/frontend/wishbone.py [new file with mode: 0644]
gram/gen.py [new file with mode: 0755]
gram/init.py [new file with mode: 0644]
gram/modules.py [new file with mode: 0644]
gram/phy/__init__.py [new file with mode: 0644]
gram/phy/dfi.py [new file with mode: 0644]
gram/phy/ecp5ddrphy.py [new file with mode: 0644]
gram/phy/model.py [new file with mode: 0644]
gram/stream.py [new file with mode: 0644]
setup.py [new file with mode: 0755]
test/__init__.py [new file with mode: 0644]
test/access_pattern.csv [new file with mode: 0644]
test/benchmark.py [new file with mode: 0755]
test/benchmarks.yml [new file with mode: 0644]
test/common.py [new file with mode: 0644]
test/gen_access_pattern.py [new file with mode: 0755]
test/gen_config.py [new file with mode: 0755]
test/reference/ddr3_init.h [new file with mode: 0644]
test/reference/ddr3_init.py [new file with mode: 0644]
test/reference/ddr4_init.h [new file with mode: 0644]
test/reference/ddr4_init.py [new file with mode: 0644]
test/reference/sdr_init.h [new file with mode: 0644]
test/reference/sdr_init.py [new file with mode: 0644]
test/run_benchmarks.py [new file with mode: 0755]
test/spd_data/MT16KTF1G64HZ-1G6P1.csv [new file with mode: 0644]
test/spd_data/MT16KTF1G64HZ-1G9E1.csv [new file with mode: 0644]
test/spd_data/MT18KSF1G72HZ-1G4E2.csv [new file with mode: 0644]
test/spd_data/MT18KSF1G72HZ-1G6E2.csv [new file with mode: 0644]
test/spd_data/MT8JTF12864AZ-1G4G1.csv [new file with mode: 0644]
test/spd_data/MT8KTF51264HZ-1G4E1.csv [new file with mode: 0644]
test/spd_data/MT8KTF51264HZ-1G6E1.csv [new file with mode: 0644]
test/spd_data/MT8KTF51264HZ-1G9P1.csv [new file with mode: 0644]
test/summary/summary.css [new file with mode: 0644]
test/summary/summary.html.jinja2 [new file with mode: 0644]
test/test_adaptation.py [new file with mode: 0644]
test/test_axi.py [new file with mode: 0644]
test/test_bandwidth.py [new file with mode: 0644]
test/test_bankmachine.py [new file with mode: 0644]
test/test_bist.py [new file with mode: 0644]
test/test_command_chooser.py [new file with mode: 0644]
test/test_crossbar.py [new file with mode: 0644]
test/test_dma.py [new file with mode: 0644]
test/test_ecc.py [new file with mode: 0644]
test/test_examples.py [new file with mode: 0644]
test/test_fifo.py [new file with mode: 0644]
test/test_init.py [new file with mode: 0644]
test/test_modules.py [new file with mode: 0644]
test/test_multiplexer.py [new file with mode: 0644]
test/test_refresh.py [new file with mode: 0644]
test/test_steerer.py [new file with mode: 0644]
test/test_timing.py [new file with mode: 0644]
test/test_wishbone.py [new file with mode: 0644]

diff --git a/.gitignore b/.gitignore
new file mode 100644 (file)
index 0000000..72364f9
--- /dev/null
@@ -0,0 +1,89 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+env/
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+*.egg-info/
+.installed.cfg
+*.egg
+
+# PyInstaller
+#  Usually these files are written by a python script from a template
+#  before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*,cover
+.hypothesis/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+target/
+
+# IPython Notebook
+.ipynb_checkpoints
+
+# pyenv
+.python-version
+
+# celery beat schedule file
+celerybeat-schedule
+
+# dotenv
+.env
+
+# virtualenv
+venv/
+ENV/
+
+# Spyder project settings
+.spyderproject
+
+# Rope project settings
+.ropeproject
diff --git a/.sim-test.py b/.sim-test.py
new file mode 100755 (executable)
index 0000000..104b250
--- /dev/null
@@ -0,0 +1,77 @@
+#!/usr/bin/env python3
+import os
+import sys
+import pexpect
+import time
+from argparse import ArgumentParser
+
+
+parser = ArgumentParser()
+parser.add_argument("--sdram-module", type=str)
+args = parser.parse_args()
+
+
+tests = [
+    {
+        'id':      'litex_sim',
+        'command': f'python3 -m litex.tools.litex_sim --with-sdram --sdram-module {args.sdram_module}',
+        'cwd':     os.getcwd(),
+        'checkpoints': [
+            { 'timeout': 240, 'good': [b'\n\\s*BIOS built on[^\n]+\n'] },
+            { 'timeout': 30,  'good': [b'Memtest OK'],
+                              'bad':  [b'(Memory initialization failed|Booting from)'] },
+        ]
+    }
+]
+
+
+def run_test(id, command, cwd, checkpoints):
+    print(f'*** Test ID: {id}')
+    print(f'*** CWD:     {cwd}')
+    print(f'*** Command: {command}')
+    os.chdir(cwd)
+    p = pexpect.spawn(command, timeout=None, logfile=sys.stdout.buffer)
+
+    checkpoint_id = 0
+    for cp in checkpoints:
+        good = cp.get('good', [])
+        bad = cp.get('bad', [])
+        patterns = good + bad
+        timeout = cp.get('timeout', None)
+
+        timediff = time.time()
+        try:
+            match_id = p.expect(patterns, timeout=timeout)
+        except pexpect.EOF:
+            print(f'\n*** {id}: premature termination')
+            return False;
+        except pexpect.TIMEOUT:
+            timediff = time.time() - timediff
+            print(f'\n*** {id}: timeout (checkpoint {checkpoint_id}: +{int(timediff)}s)')
+            return False;
+        timediff = time.time() - timediff
+
+        if match_id >= len(good):
+            break
+
+        sys.stdout.buffer.write(b'<<checkpoint %d: +%ds>>' % (checkpoint_id, int(timediff)))
+        checkpoint_id += 1
+
+    is_success = checkpoint_id == len(checkpoints)
+
+    # Let it print rest of line
+    match_id = p.expect_exact([b'\n', pexpect.TIMEOUT, pexpect.EOF], timeout=1)
+    p.terminate(force=True)
+
+    line_break = '\n' if match_id != 0 else ''
+    print(f'{line_break}*** {id}: {"success" if is_success else "failure"}')
+
+    return is_success
+
+
+for test in tests:
+    success = run_test(**test)
+    if not success:
+        sys.exit(1)
+
+sys.exit(0)
diff --git a/.travis.yml b/.travis.yml
new file mode 100644 (file)
index 0000000..81faa10
--- /dev/null
@@ -0,0 +1,82 @@
+language: python
+dist: bionic
+python: "3.6"
+
+before_install:
+  - sudo apt-get update
+  - sudo apt-get -y install verilator libevent-dev libjson-c-dev
+  - pip install pexpect numpy matplotlib pandas jinja2
+
+install:
+  # Get Migen / LiteX / Cores
+  - cd ~/
+  - wget https://raw.githubusercontent.com/enjoy-digital/litex/master/litex_setup.py
+  - python3 litex_setup.py init install
+  # Install the version being tested
+  - cd $TRAVIS_BUILD_DIR
+  - python3 setup.py install
+
+before_script:
+  # Get RISC-V toolchain
+  - wget https://static.dev.sifive.com/dev-tools/riscv64-unknown-elf-gcc-20171231-x86_64-linux-centos6.tar.gz
+  - tar -xvf riscv64-unknown-elf-gcc-20171231-x86_64-linux-centos6.tar.gz
+  - export PATH=$PATH:$PWD/riscv64-unknown-elf-gcc-20171231-x86_64-linux-centos6/bin/
+
+script: ./.sim-test.py --sdram-module="$SDRAM_MODULE"
+
+jobs:
+  include:
+    - stage: "Unit tests"
+      script:
+        - cd $TRAVIS_BUILD_DIR
+        - python setup.py test
+
+    - stage: "Simulations"
+      env: SDRAM_MODULE=IS42S16160
+    - env: SDRAM_MODULE=IS42S16320
+    - env: SDRAM_MODULE=MT48LC4M16
+    - env: SDRAM_MODULE=MT48LC16M16
+    - env: SDRAM_MODULE=AS4C16M16
+    - env: SDRAM_MODULE=AS4C32M16
+    - env: SDRAM_MODULE=AS4C32M8
+    - env: SDRAM_MODULE=M12L64322A
+    - env: SDRAM_MODULE=M12L16161A
+    - env: SDRAM_MODULE=MT46V32M16
+    - env: SDRAM_MODULE=MT46H32M16
+    - env: SDRAM_MODULE=MT46H32M32
+    - env: SDRAM_MODULE=MT47H128M8
+    - env: SDRAM_MODULE=MT47H32M16
+    - env: SDRAM_MODULE=MT47H64M16
+    - env: SDRAM_MODULE=P3R1GE4JGF
+    - env: SDRAM_MODULE=MT41K64M16
+    - env: SDRAM_MODULE=MT41J128M16
+    - env: SDRAM_MODULE=MT41J256M16
+    - env: SDRAM_MODULE=K4B1G0446F
+    - env: SDRAM_MODULE=K4B2G1646F
+    - env: SDRAM_MODULE=H5TC4G63CFR
+    - env: SDRAM_MODULE=IS43TR16128B
+    - env: SDRAM_MODULE=MT8JTF12864
+    - env: SDRAM_MODULE=MT8KTF51264
+    - env: SDRAM_MODULE=MT18KSF1G72HZ
+    - env: SDRAM_MODULE=AS4C256M16D3A
+    - env: SDRAM_MODULE=MT16KTF1G64HZ
+    - env: SDRAM_MODULE=EDY4016A
+    - env: SDRAM_MODULE=MT40A1G8
+    - env: SDRAM_MODULE=MT40A512M16
+
+    - stage: Benchmarks
+      script:
+      - python3 -m test.run_benchmarks test/benchmarks.yml --results-cache cache.json --html --heartbeat 60 --timeout 540
+      # move benchmark artifacts to gh-pages/ directory that will be pushed to gh-pages branch
+      - mkdir -p gh-pages
+      - mv html/summary.html gh-pages/index.html
+      - mv cache.json gh-pages/cache.json
+      - touch gh-pages/.nojekyll
+      deploy:
+        provider: pages
+        skip_cleanup: true
+        token: $GITHUB_TOKEN
+        keep_history: true
+        local_dir: gh-pages
+        on:
+          branch: master
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
new file mode 100644 (file)
index 0000000..75feeeb
--- /dev/null
@@ -0,0 +1,26 @@
+LiteX ecosystem would not exist without the collaborative work of contributors! Here is below the
+list of all the LiteDRAM contributors.
+
+In the source code, each file list the main authors/contributors:
+- author(s) that created the initial content.
+- contributor(s) that added essential features/improvements.
+
+If you think you should be in this list and don't find yourself, write to florent@enjoy-digital.fr
+and we'll fix it!
+
+Contributors:
+Copyright (c) 2019 Ambroz Bizjak <abizjak.pro@gmail.com>
+Copyright (c) 2019 Antony Pavlov <antonynpavlov@gmail.com>
+Copyright (c) 2018 bunnie <bunnie@kosagi.com>
+Copyright (c) 2018-2019 David Shah <dave@ds0.me>
+Copyright (c) 2020 Drew Fustini <drew@pdp7.com>
+Copyright (c) 2019 Ewout ter Hoeven <E.M.terHoeven@student.tudelft.nl>
+Copyright (c) 2018 Felix Held <felix-github@felixheld.de>
+Copyright (c) 2015-2020 Florent Kermarrec <florent@enjoy-digital.fr>
+Copyright (c) 2019 Gabriel L. Somlo <gsomlo@gmail.com>
+Copyright (c) 2018 John Sully <john@csquare.ca>
+Copyright (c) 2019 Antmicro <www.antmicro.com>
+Copyright (c) 2020 Michael Welling <mwelling@ieee.org>
+Copyright (c) 2019 Pierre-Olivier Vauboin <po@lambdaconcept>
+Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+Copyright (c) 2016-2016 Tim 'mithro' Ansell <me@mith.ro>
diff --git a/LICENSE b/LICENSE
new file mode 100644 (file)
index 0000000..991cbcf
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,31 @@
+Unless otherwise noted, Gram is Copyright 2020 / LambdaConcept
+
+Initial development is based on MiSoC's LASMICON / Copyright 2007-2016 / M-Labs
+                                LiteDRAM / Copyright 2012-2018 / EnjoyDigital
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Other authors retain ownership of their contributions. If a submission can
+reasonably be considered independently copyrightable, it's yours and we
+encourage you to claim it with appropriate copyright notices. This submission
+then falls under the "otherwise noted" category. All submissions are strongly
+encouraged to use the two-clause BSD license reproduced above.
diff --git a/README.md b/README.md
new file mode 100644 (file)
index 0000000..e66713d
--- /dev/null
+++ b/README.md
@@ -0,0 +1,7 @@
+# Gram
+
+Gram is an nMigen+LambdaSoC port of the [LiteDRAM]() core by [enjoy-digital](). It currently only targets ECP5+DDR3.
+
+## License
+
+
diff --git a/examples/customecpix5.py b/examples/customecpix5.py
new file mode 100644 (file)
index 0000000..e77baef
--- /dev/null
@@ -0,0 +1,170 @@
+import os
+import subprocess
+
+from nmigen.build import *
+from nmigen.vendor.lattice_ecp5 import *
+from nmigen_boards.resources import *
+
+
+__all__ = ["ECPIX5Platform"]
+
+
+class ECPIX5Platform(LatticeECP5Platform):
+    device      = "LFE5UM5G-85F"
+    package     = "BG554"
+    speed       = "8"
+    default_clk = "clk100"
+    default_rst = "rst"
+
+    resources   = [
+        Resource("rst", 0, PinsN("AB1", dir="i"), Attrs(IO_TYPE="LVCMOS33")),
+        Resource("clk100", 0, Pins("K23", dir="i"), Clock(100e6), Attrs(IO_TYPE="LVCMOS33")),
+
+        RGBLEDResource(0, r="U21", g="W21", b="T24", attrs=Attrs(IO_TYPE="LVCMOS33")),
+        RGBLEDResource(1, r="T23", g="R21", b="T22", attrs=Attrs(IO_TYPE="LVCMOS33")),
+        RGBLEDResource(2, r="P21", g="R23", b="P22", attrs=Attrs(IO_TYPE="LVCMOS33")),
+        RGBLEDResource(3, r="K21", g="K24", b="M21", attrs=Attrs(IO_TYPE="LVCMOS33")),
+
+        UARTResource(0,
+            rx="R26", tx="R24",
+            attrs=Attrs(IO_TYPE="LVCMOS33", PULLMODE="UP")
+        ),
+
+        *SPIFlashResources(0,
+            cs="AA2", clk="AE3", miso="AE2", mosi="AD2", wp="AF2", hold="AE1",
+            attrs=Attrs(IO_TYPE="LVCMOS33")
+        ),
+
+        Resource("eth_rgmii", 0,
+            Subsignal("rst",     PinsN("C13", dir="o")),
+            Subsignal("mdio",    Pins("A13", dir="io")),
+            Subsignal("mdc",     Pins("C11", dir="o")),
+            Subsignal("tx_clk",  Pins("A12", dir="o")),
+            Subsignal("tx_ctrl", Pins("C9", dir="o")),
+            Subsignal("tx_data", Pins("D8 C8 B8 A8", dir="o")),
+            Subsignal("rx_clk",  Pins("E11", dir="i")),
+            Subsignal("rx_ctrl", Pins("A11", dir="i")),
+            Subsignal("rx_data", Pins("B11 A10 B10 A9", dir="i")),
+            Attrs(IO_TYPE="LVCMOS33")
+        ),
+        Resource("eth_int", 0, PinsN("B13", dir="i"), Attrs(IO_TYPE="LVCMOS33")),
+
+        *SDCardResources(0,
+            clk="P24", cmd="M24", dat0="N26", dat1="N25", dat2="N23", dat3="N21", cd="L22",
+            # TODO
+            # clk_fb="P25", cmd_dir="M23", dat0_dir="N24", dat123_dir="P26",
+            attrs=Attrs(IO_TYPE="LVCMOS33"),
+        ),
+
+        Resource("ddr3", 0,
+            Subsignal("clk",    DiffPairs("H3", "J3", dir="o"), Attrs(IO_TYPE="SSTL135D_I")),
+            Subsignal("cke", Pins("P1", dir="o")),
+            Subsignal("we_n",     Pins("R3", dir="o")),
+            Subsignal("ras_n",    Pins("T3", dir="o")),
+            Subsignal("cas_n",    Pins("P2", dir="o")),
+            Subsignal("a",      Pins("T5 M3 L3 V6 K2 W6 K3 L1 H2 L2 N1 J1 M1 K1", dir="o")),
+            Subsignal("ba",     Pins("U6 N3 N4", dir="o")),
+            Subsignal("dqs",    DiffPairs("V4 V1", "U5 U2", dir="io"), Attrs(IO_TYPE="SSTL135D_I")),
+            Subsignal("dq",     Pins("T4 W4 R4 W5 R6 P6 P5 P4 R1 W3 T2 V3 U3 W1 T1 W2", dir="io")),
+            Subsignal("dm",     Pins("J4 H5", dir="o")),
+            Subsignal("odt",    Pins("P3", dir="o")),
+            Attrs(IO_TYPE="SSTL135_I")
+        ),
+
+        Resource("hdmi", 0,
+            Subsignal("rst",   PinsN("N6", dir="o")),
+            Subsignal("scl",   Pins("C17", dir="io")),
+            Subsignal("sda",   Pins("E17", dir="io")),
+            Subsignal("pclk",  Pins("C1", dir="o")),
+            Subsignal("vsync", Pins("A4", dir="o")),
+            Subsignal("hsync", Pins("B4", dir="o")),
+            Subsignal("de",    Pins("A3", dir="o")),
+            Subsignal("d",
+                Subsignal("b", Pins("AD25 AC26 AB24 AB25  B3  C3  D3  B1  C2  D2 D1 E3", dir="o")),
+                Subsignal("g", Pins("AA23 AA22 AA24 AA25  E1  F2  F1 D17 D16 E16 J6 H6", dir="o")),
+                Subsignal("r", Pins("AD26 AE25 AF25 AE26 E10 D11 D10 C10  D9  E8 H5 J4", dir="o")),
+            ),
+            Subsignal("mclk",  Pins("E19", dir="o")),
+            Subsignal("sck",   Pins("D6", dir="o")),
+            Subsignal("ws",    Pins("C6", dir="o")),
+            Subsignal("i2s",   Pins("A6 B6 A5 C5", dir="o")),
+            Subsignal("int",   PinsN("C4", dir="i")),
+            Attrs(IO_TYPE="LVTTL33")
+        ),
+
+        Resource("sata", 0,
+            Subsignal("tx", DiffPairs("AD16", "AD17", dir="o")),
+            Subsignal("rx", DiffPairs("AF15", "AF16", dir="i")),
+            Attrs(IO_TYPE="LVDS")
+        ),
+
+        Resource("ulpi", 0,
+            Subsignal("rst",  Pins("E23", dir="o")),
+            Subsignal("clk",  Pins("H24", dir="i")),
+            Subsignal("dir",  Pins("F22", dir="i")),
+            Subsignal("nxt",  Pins("F23", dir="i")),
+            Subsignal("stp",  Pins("H23", dir="o")),
+            Subsignal("data", Pins("M26 L25 L26 K25 K26 J23 J26 H25", dir="io")),
+            Attrs(IO_TYPE="LVCMOS33")
+        ),
+
+        Resource("usbc_cfg", 0,
+            Subsignal("scl", Pins("D24", dir="io")),
+            Subsignal("sda", Pins("C24", dir="io")),
+            Subsignal("dir", Pins("B23", dir="i")),
+            Subsignal("id",  Pins("D23", dir="i")),
+            Subsignal("int", PinsN("B24", dir="i")),
+            Attrs(IO_TYPE="LVCMOS33")
+        ),
+        Resource("usbc_mux", 0,
+            Subsignal("en",    Pins("C23", dir="oe")),
+            Subsignal("amsel", Pins("B26", dir="oe")),
+            Subsignal("pol",   Pins("D26", dir="o")),
+            Subsignal("lna",   DiffPairs( "AF9", "AF10", dir="i"), Attrs(IO_TYPE="LVCMOS18D")),
+            Subsignal("lnb",   DiffPairs("AD10", "AD11", dir="o"), Attrs(IO_TYPE="LVCMOS18D")),
+            Subsignal("lnc",   DiffPairs( "AD7",  "AD8", dir="o"), Attrs(IO_TYPE="LVCMOS18D")),
+            Subsignal("lnd",   DiffPairs( "AF6",  "AF7", dir="i"), Attrs(IO_TYPE="LVCMOS18D")),
+            Attrs(IO_TYPE="LVCMOS33")
+        ),
+    ]
+
+    connectors  = [
+        Connector("pmod", 0, "T25 U25 U24 V24 - - T26 U26 V26 W26 - -"),
+        Connector("pmod", 1, "U23 V23 U22 V21 - - W25 W24 W23 W22 - -"),
+        Connector("pmod", 2, "J24 H22 E21 D18 - - K22 J21 H21 D22 - -"),
+        Connector("pmod", 3, " E4  F4  E6  H4 - -  F3  D4  D5  F5 - -"),
+        Connector("pmod", 4, "E26 D25 F26 F25 - - A25 A24 C26 C25 - -"),
+        Connector("pmod", 5, "D19 C21 B21 C22 - - D21 A21 A22 A23 - -"),
+        Connector("pmod", 6, "C16 B17 C18 B19 - - A17 A18 A19 C19 - -"),
+        Connector("pmod", 7, "D14 B14 E14 B16 - - C14 A14 A15 A16 - -"),
+    ]
+
+    @property
+    def file_templates(self):
+        return {
+            **super().file_templates,
+            "{{name}}-openocd.cfg": r"""
+            interface ftdi
+            ftdi_vid_pid 0x0403 0x6010
+            ftdi_channel 0
+            ftdi_layout_init 0xfff8 0xfffb
+            reset_config none
+            adapter_khz 25000
+
+            jtag newtap ecp5 tap -irlen 8 -expected-id 0x81113043
+            """
+        }
+
+    def toolchain_program(self, products, name):
+        openocd = os.environ.get("OPENOCD", "openocd")
+        with products.extract("{}-openocd.cfg".format(name), "{}.svf".format(name)) \
+                as (config_filename, vector_filename):
+            subprocess.check_call([openocd,
+                "-f", config_filename,
+                "-c", "transport select jtag; init; svf -quiet {}; exit".format(vector_filename)
+            ])
+
+
+# if __name__ == "__main__":
+#     from .test.blinky import Blinky
+#     ECPIX5Platform().build(Blinky(), do_program=True)
diff --git a/examples/ecpix5.py b/examples/ecpix5.py
new file mode 100644 (file)
index 0000000..3c6c132
--- /dev/null
@@ -0,0 +1,184 @@
+from nmigen import *
+from nmigen_soc import wishbone, memory
+
+from lambdasoc.cpu.minerva import MinervaCPU
+from lambdasoc.periph.intc import GenericInterruptController
+from lambdasoc.periph.serial import AsyncSerialPeripheral
+from lambdasoc.periph.sram import SRAMPeripheral
+from lambdasoc.periph.timer import TimerPeripheral
+from lambdasoc.periph import Peripheral
+from lambdasoc.soc.cpu import CPUSoC
+
+from gram.phy.ecp5ddrphy import ECP5DDRPHY
+
+from customecpix5 import ECPIX5Platform
+
+class PLL(Elaboratable):
+    def __init__(self, clkin, clksel=Signal(shape=2, reset=2), clkout1=Signal(), clkout2=Signal(), clkout3=Signal(), clkout4=Signal(), lock=Signal(), CLKI_DIV=1, CLKFB_DIV=1, CLK1_DIV=3, CLK2_DIV=4, CLK3_DIV=5, CLK4_DIV=6):
+        self.clkin = clkin
+        self.clkout1 = clkout1
+        self.clkout2 = clkout2
+        self.clkout3 = clkout3
+        self.clkout4 = clkout4
+        self.clksel = clksel
+        self.lock = lock
+        self.CLKI_DIV = CLKI_DIV
+        self.CLKFB_DIV = CLKFB_DIV
+        self.CLKOP_DIV = CLK1_DIV
+        self.CLKOS_DIV = CLK2_DIV
+        self.CLKOS2_DIV = CLK3_DIV
+        self.CLKOS3_DIV = CLK4_DIV
+        self.ports = [
+            self.clkin,
+            self.clkout1,
+            self.clkout2,
+            self.clkout3,
+            self.clkout4,
+            self.clksel,
+            self.lock,
+        ]
+
+    def elaborate(self, platform):
+        clkfb = Signal()
+        pll = Instance("EHXPLLL",
+            p_PLLRST_ENA='DISABLED',
+            p_INTFB_WAKE='DISABLED',
+            p_STDBY_ENABLE='DISABLED',
+            p_CLKOP_FPHASE=0,
+            p_CLKOP_CPHASE=11,
+            p_OUTDIVIDER_MUXA='DIVA',
+            p_CLKOP_ENABLE='ENABLED',
+            p_CLKOP_DIV=self.CLKOP_DIV, #Max 948 MHz at OP=79 FB=1 I=1 F_in=12 MHz, Min 30 MHz (28 MHz locks sometimes, lock LED blinks) Hmm... /3*82/25
+            p_CLKOS_DIV=self.CLKOS_DIV,
+            p_CLKOS2_DIV=self.CLKOS2_DIV,
+            p_CLKOS3_DIV=self.CLKOS3_DIV,
+            p_CLKFB_DIV=self.CLKFB_DIV, #25
+            p_CLKI_DIV=self.CLKI_DIV, #6
+            p_FEEDBK_PATH='USERCLOCK',
+            i_CLKI=self.clkin,
+            i_CLKFB=clkfb,
+            i_RST=0,
+            i_STDBY=0,
+            i_PHASESEL0=0,
+            i_PHASESEL1=0,
+            i_PHASEDIR=0,
+            i_PHASESTEP=0,
+            i_PLLWAKESYNC=0,
+            i_ENCLKOP=0,
+            i_ENCLKOS=0,
+            i_ENCLKOS2=0,
+            i_ENCLKOS3=0,
+            o_CLKOP=self.clkout1,
+            o_CLKOS=self.clkout2,
+            o_CLKOS2=self.clkout3,
+            o_CLKOS3=self.clkout4,
+            o_LOCK=self.lock,
+            #o_LOCK=pll_lock
+            )
+        m = Module()
+        m.submodules += pll
+        with m.If(self.clksel == 0):
+            m.d.comb += clkfb.eq(self.clkout1)
+        with m.Elif(self.clksel == 1):
+            m.d.comb += clkfb.eq(self.clkout2)
+        with m.Elif(self.clksel == 2):
+            m.d.comb += clkfb.eq(self.clkout3)
+        with m.Else():
+            m.d.comb += clkfb.eq(self.clkout4)
+        return m
+
+class SysClocker(Elaboratable):
+       def elaborate(self, platform):
+               m = Module()
+
+               m.submodules.pll = pll = PLL(ClockSignal("sync"), CLKI_DIV=1, CLKFB_DIV=2, CLK_DIV=2)
+               cd_sys2x = ClockDomain("sys2x", local=False)
+               m.d.comb += cd_sys2x.clk.eq(pll.clkout1)
+               m.domains += cd_sys2x
+
+               cd_init = ClockDomain("init", local=False)
+               m.d.comb += cd_init.clk.eq(pll.clkout2)
+               m.domains += cd_init
+
+               return m
+
+class DDR3SoC(CPUSoC, Elaboratable):
+       def __init__(self, *, reset_addr, clk_freq,
+                                rom_addr, rom_size,
+                                ram_addr, ram_size,
+                                uart_addr, uart_divisor, uart_pins,
+                                timer_addr, timer_width,
+                                ddrphy_addr):
+               self._arbiter = wishbone.Arbiter(addr_width=30, data_width=32, granularity=8,
+                                                                                features={"cti", "bte"})
+               self._decoder = wishbone.Decoder(addr_width=30, data_width=32, granularity=8,
+                                                                                features={"cti", "bte"})
+
+               self.cpu = MinervaCPU(reset_address=reset_addr)
+               self._arbiter.add(self.cpu.ibus)
+               self._arbiter.add(self.cpu.dbus)
+
+               self.rom = SRAMPeripheral(size=rom_size, writable=False)
+               self._decoder.add(self.rom.bus, addr=rom_addr)
+
+               self.ram = SRAMPeripheral(size=ram_size)
+               self._decoder.add(self.ram.bus, addr=ram_addr)
+
+               self.uart = AsyncSerialPeripheral(divisor=uart_divisor, pins=uart_pins)
+               self._decoder.add(self.uart.bus, addr=uart_addr)
+
+               self.timer = TimerPeripheral(width=timer_width)
+               self._decoder.add(self.timer.bus, addr=timer_addr)
+
+               self.intc = GenericInterruptController(width=len(self.cpu.ip))
+               self.intc.add_irq(self.timer.irq, 0)
+               self.intc.add_irq(self.uart .irq, 1)
+
+               self.ddrphy = ECP5DDRPHY(platform.request("ddr3", 0))
+               self._decoder.add(self.ddrphy.bus, addr=ddrphy_addr)
+
+               self.memory_map = self._decoder.bus.memory_map
+
+               self.clk_freq = clk_freq
+
+       def elaborate(self, platform):
+               m = Module()
+
+               m.submodules.arbiter = self._arbiter
+               m.submodules.cpu     = self.cpu
+
+               m.submodules.decoder = self._decoder
+               m.submodules.rom     = self.rom
+               m.submodules.ram     = self.ram
+               m.submodules.uart    = self.uart
+               m.submodules.timer   = self.timer
+               m.submodules.intc    = self.intc
+               m.submodules.ddrphy  = self.ddrphy
+
+               m.submodules.sys2x = Sys2X()
+
+               m.d.comb += [
+                       self._arbiter.bus.connect(self._decoder.bus),
+                       self.cpu.ip.eq(self.intc.ip),
+               ]
+
+               return m
+
+
+if __name__ == "__main__":
+       platform = ECPIX5Platform()
+
+       uart_divisor = int(platform.default_clk_frequency // 115200)
+       uart_pins = platform.request("uart", 0)
+
+       soc = DDR3SoC(
+                reset_addr=0x00000000, clk_freq=int(platform.default_clk_frequency),
+                  rom_addr=0x00000000, rom_size=0x4000,
+                  ram_addr=0x00004000, ram_size=0x1000,
+                 uart_addr=0x00005000, uart_divisor=uart_divisor, uart_pins=uart_pins,
+                timer_addr=0x00006000, timer_width=32,
+               ddrphy_addr=0x00007000
+       )
+
+       soc.build(do_build=True, do_init=True)
+       platform.build(soc, do_program=True)
diff --git a/gram/__init__.py b/gram/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/gram/common.py b/gram/common.py
new file mode 100644 (file)
index 0000000..8b7f0a6
--- /dev/null
@@ -0,0 +1,366 @@
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2018 John Sully <john@csquare.ca>
+# This file is Copyright (c) 2018 bunnie <bunnie@kosagi.com>
+# License: BSD
+
+import math
+from functools import reduce
+from operator import add
+from collections import OrderedDict
+
+from nmigen import *
+import gram.stream as stream
+
+# Helpers ------------------------------------------------------------------------------------------
+
+burst_lengths = {
+    "SDR":   1,
+    "DDR":   4,
+    "LPDDR": 4,
+    "DDR2":  4,
+    "DDR3":  8,
+    "DDR4":  8
+}
+
+def get_cl_cw(memtype, tck):
+    f_to_cl_cwl = OrderedDict()
+    if memtype == "DDR2":
+        f_to_cl_cwl[400e6]  = (3, 2)
+        f_to_cl_cwl[533e6]  = (4, 3)
+        f_to_cl_cwl[677e6]  = (5, 4)
+        f_to_cl_cwl[800e6]  = (6, 5)
+        f_to_cl_cwl[1066e6] = (7, 5)
+    elif memtype == "DDR3":
+        f_to_cl_cwl[800e6]  = ( 6, 5)
+        f_to_cl_cwl[1066e6] = ( 7, 6)
+        f_to_cl_cwl[1333e6] = (10, 7)
+        f_to_cl_cwl[1600e6] = (11, 8)
+    elif memtype == "DDR4":
+        f_to_cl_cwl[1600e6] = (11,  9)
+    else:
+        raise ValueError
+    for f, (cl, cwl) in f_to_cl_cwl.items():
+        if tck >= 2/f:
+            return cl, cwl
+    raise ValueError
+
+def get_sys_latency(nphases, cas_latency):
+    return math.ceil(cas_latency/nphases)
+
+def get_sys_phases(nphases, sys_latency, cas_latency):
+    dat_phase = sys_latency*nphases - cas_latency
+    cmd_phase = (dat_phase - 1)%nphases
+    return cmd_phase, dat_phase
+
+# PHY Pads Transformers ----------------------------------------------------------------------------
+
+class PHYPadsReducer:
+    """PHY Pads Reducer
+
+    Reduce DRAM pads to only use specific modules.
+
+    For testing purposes, we often need to use only some of the DRAM modules. PHYPadsReducer allows
+    selecting specific modules and avoid re-definining dram pins in the Platform for this.
+    """
+    def __init__(self, pads, modules):
+        self.pads    = pads
+        self.modules = modules
+
+    def __getattr__(self, name):
+        if name in ["dq"]:
+            return Array([getattr(self.pads, name)[8*i + j]
+                for i in self.modules
+                for j in range(8)])
+        if name in ["dm", "dqs", "dqs_p", "dqs_n"]:
+            return Array([getattr(self.pads, name)[i] for i in self.modules])
+        else:
+            return getattr(self.pads, name)
+
+class PHYPadsCombiner:
+    """PHY Pads Combiner
+
+    Combine DRAM pads from fully dissociated chips in a unique DRAM pads structure.
+
+    Most generally, DRAM chips are sharing command/address lines between chips (using a fly-by
+    topology since DDR3). On some boards, the DRAM chips are using separate command/address lines
+    and this combiner can be used to re-create a single pads structure (that will be compatible with
+    LiteDRAM's PHYs) to create a single DRAM controller from multiple fully dissociated DRAMs chips.
+    """
+    def __init__(self, pads):
+        if not isinstance(pads, list):
+            self.groups = [pads]
+        else:
+            self.groups = pads
+        self.sel = 0
+
+    def sel_group(self, n):
+        self.sel = n
+
+    def __getattr__(self, name):
+        if name in ["dm", "dq", "dqs", "dqs_p", "dqs_n"]:
+            return Array([getattr(self.groups[j], name)[i]
+                for i in range(len(getattr(self.groups[0], name)))
+                for j in range(len(self.groups))])
+        else:
+            return getattr(self.groups[self.sel], name)
+
+# BitSlip ------------------------------------------------------------------------------------------
+
+class BitSlip(Elaboratable):
+    def __init__(self, dw, rst=None, slp=None, cycles=1):
+        self.i = Signal(dw)
+        self.o = Signal(dw)
+        self.rst = Signal() if rst is None else rst
+        self.slp = Signal() if slp is None else slp
+        self._cycles = cycles
+
+    def elaborate(self, platform):
+        m = Module()
+
+        value = Signal(max=self._cycles*dw)
+        with m.If(self.slp):
+            m.d.sync += value.eq(value+1)
+        with m.Elif(self.rst):
+            m.d.sync += value.eq(0)
+
+        r = Signal((self._cycles+1)*dw, reset_less=True)
+        m.d.sync += r.eq(Cat(r[dw:], self.i))
+        cases = {}
+        for i in range(self._cycles*dw):
+            cases[i] = self.o.eq(r[i:dw+i])
+        m.d.comb += Case(value, cases)
+
+        return m
+
+# DQS Pattern --------------------------------------------------------------------------------------
+
+class DQSPattern(Elaboratable):
+    def __init__(self, preamble=None, postamble=None, wlevel_en=0, wlevel_strobe=0, register=False):
+        self.preamble  = Signal() if preamble  is None else preamble
+        self.postamble = Signal() if postamble is None else postamble
+        self.o = Signal(8)
+        self._wlevel_en = wlevel_en
+        self._wlevel_strobe = wlevel_strobe
+        self._register = register
+
+    def elaborate(self, platform):
+        m = Module()
+
+        with m.If(self.preamble):
+            m.d.comb += self.o.eq(0b00010101)
+        with m.Elif(self.postamble):
+            m.d.comb += self.o.eq(0b01010100)
+        with m.Elif(self._wlevel_en):
+            with m.If(self._wlevel_strobe):
+                m.d.comb += self.o.eq(0b00000001)
+            with m.Else():
+                m.d.comb += self.o.eq(0b00000000)
+        with m.Else():
+            m.d.comb += self.o.eq(0b01010101)
+
+        if self._register:
+            o = Signal.like(self.o)
+            m.d.sync += o.eq(self.o)
+            self.o = o
+
+        return m
+
+# Settings -----------------------------------------------------------------------------------------
+
+class Settings:
+    def set_attributes(self, attributes):
+        for k, v in attributes.items():
+            setattr(self, k, v)
+
+
+class PhySettings(Settings):
+    def __init__(self, phytype, memtype, databits, dfi_databits,
+                 nphases,
+                 rdphase, wrphase,
+                 rdcmdphase, wrcmdphase,
+                 cl, read_latency, write_latency, nranks=1, cwl=None):
+        self.set_attributes(locals())
+        self.cwl = cl if cwl is None else cwl
+        self.is_rdimm = False
+
+    # Optional DDR3/DDR4 electrical settings:
+    # rtt_nom: Non-Writes on-die termination impedance
+    # rtt_wr: Writes on-die termination impedance
+    # ron: Output driver impedance
+    def add_electrical_settings(self, rtt_nom, rtt_wr, ron):
+        assert self.memtype in ["DDR3", "DDR4"]
+        self.set_attributes(locals())
+
+    # Optional RDIMM configuration
+    def set_rdimm(self, tck, rcd_pll_bypass, rcd_ca_cs_drive, rcd_odt_cke_drive, rcd_clk_drive):
+        assert self.memtype == "DDR4"
+        self.is_rdimm = True
+        self.set_attributes(locals())
+
+class GeomSettings(Settings):
+    def __init__(self, bankbits, rowbits, colbits):
+        self.set_attributes(locals())
+        self.addressbits = max(rowbits, colbits)
+
+
+class TimingSettings(Settings):
+    def __init__(self, tRP, tRCD, tWR, tWTR, tREFI, tRFC, tFAW, tCCD, tRRD, tRC, tRAS, tZQCS):
+        self.set_attributes(locals())
+
+# Layouts/Interface --------------------------------------------------------------------------------
+
+def cmd_layout(address_width):
+    return [
+        ("valid",            1, DIR_FANOUT),
+        ("ready",            1, DIR_FANIN),
+        ("we",               1, DIR_FANOUT),
+        ("addr", address_width, DIR_FANOUT),
+        ("lock",             1, DIR_FANIN), # only used internally
+
+        ("wdata_ready",      1, DIR_FANIN),
+        ("rdata_valid",      1, DIR_FANIN)
+    ]
+
+def data_layout(data_width):
+    return [
+        ("wdata",       data_width, DIR_FANOUT),
+        ("wdata_we", data_width//8, DIR_FANOUT),
+        ("rdata",       data_width, DIR_FANIN)
+    ]
+
+def cmd_description(address_width):
+    return [
+        ("we",   1),
+        ("addr", address_width)
+    ]
+
+def wdata_description(data_width):
+    return [
+        ("data", data_width),
+        ("we",   data_width//8)
+    ]
+
+def rdata_description(data_width):
+    return [("data", data_width)]
+
+def cmd_request_layout(a, ba):
+    return [
+        ("a",     a),
+        ("ba",   ba),
+        ("cas",   1),
+        ("ras",   1),
+        ("we",    1)
+    ]
+
+def cmd_request_rw_layout(a, ba):
+    return cmd_request_layout(a, ba) + [
+        ("is_cmd", 1),
+        ("is_read", 1),
+        ("is_write", 1)
+    ]
+
+
+class LiteDRAMInterface(Record):
+    def __init__(self, address_align, settings):
+        rankbits = log2_int(settings.phy.nranks)
+        self.address_align = address_align
+        self.address_width = settings.geom.rowbits + settings.geom.colbits + rankbits - address_align
+        self.data_width    = settings.phy.dfi_databits*settings.phy.nphases
+        self.nbanks   = settings.phy.nranks*(2**settings.geom.bankbits)
+        self.nranks   = settings.phy.nranks
+        self.settings = settings
+
+        layout = [("bank"+str(i), cmd_layout(self.address_width)) for i in range(self.nbanks)]
+        layout += data_layout(self.data_width)
+        Record.__init__(self, layout)
+
+# Ports --------------------------------------------------------------------------------------------
+
+class LiteDRAMNativePort(Settings):
+    def __init__(self, mode, address_width, data_width, clock_domain="sys", id=0):
+        self.set_attributes(locals())
+
+        self.lock = Signal()
+
+        self.cmd   = stream.Endpoint(cmd_description(address_width))
+        self.wdata = stream.Endpoint(wdata_description(data_width))
+        self.rdata = stream.Endpoint(rdata_description(data_width))
+
+        self.flush = Signal()
+
+        # retro-compatibility # FIXME: remove
+        self.aw = self.address_width
+        self.dw = self.data_width
+        self.cd = self.clock_domain
+
+    def get_bank_address(self, bank_bits, cba_shift):
+        cba_upper = cba_shift + bank_bits
+        return self.cmd.addr[cba_shift:cba_upper]
+
+    def get_row_column_address(self, bank_bits, rca_bits, cba_shift):
+        cba_upper = cba_shift + bank_bits
+        if cba_shift < rca_bits:
+            if cba_shift:
+                return Cat(self.cmd.addr[:cba_shift], self.cmd.addr[cba_upper:])
+            else:
+                return self.cmd.addr[cba_upper:]
+        else:
+            return self.cmd.addr[:cba_shift]
+
+
+class LiteDRAMNativeWritePort(LiteDRAMNativePort):
+    def __init__(self, *args, **kwargs):
+        LiteDRAMNativePort.__init__(self, "write", *args, **kwargs)
+
+
+class LiteDRAMNativeReadPort(LiteDRAMNativePort):
+    def __init__(self, *args, **kwargs):
+        LiteDRAMNativePort.__init__(self, "read", *args, **kwargs)
+
+
+# Timing Controllers -------------------------------------------------------------------------------
+
+class tXXDController(Elaboratable):
+    def __init__(self, txxd):
+        self.valid = Signal()
+        self.ready = ready = Signal(reset=txxd is None)
+        #ready.attr.add("no_retiming") TODO
+
+    def elaborate(self, platform):
+        m = Module()
+
+        if txxd is not None:
+            count = Signal(range(max(txxd, 2)))
+            with m.If(self.valid):
+                m.d.sync += [
+                    count.eq(txxd-1),
+                    self.ready.eq((txxd - 1) == 0),
+                ]
+            with m.Else():
+                m.d.sync += count.eq(count-1)
+                with m.If(count == 1):
+                    m.d.sync += self.ready.eq(1)
+        return m
+
+
+class tFAWController(Elaboratable):
+    def __init__(self, tfaw):
+        self.valid = Signal()
+        self.ready = Signal(reset=1)
+        #ready.attr.add("no_retiming") TODO
+
+    def elaborate(self, platform):
+        m = Module()
+
+        if tfaw is not None:
+            count  = Signal(range(max(tfaw, 2)))
+            window = Signal(tfaw)
+            m.d.sync += window.eq(Cat(self.valid, window))
+            m.d.comb += count.eq(reduce(add, [window[i] for i in range(tfaw)]))
+            with m.If(count < 4):
+                with m.If(count == 3):
+                    m.d.sync += self.ready.eq(~self.valid)
+                with m.Else():
+                    m.d.sync += self.ready.eq(1)
+
+        return m
diff --git a/gram/compat.py b/gram/compat.py
new file mode 100644 (file)
index 0000000..938ce67
--- /dev/null
@@ -0,0 +1,101 @@
+from nmigen import *
+
+__ALL__ = ["delayed_enter", "RoundRobin", "Timeline"]
+
+def delayed_enter(m, src, dst, delay):
+    assert delay > 0
+
+    for i in range(delay):
+        if i == 0:
+            statename = src
+        else:
+            statename = "{}-{}".format(src, i)
+
+        if i == delay-1:
+            deststate = dst
+        else:
+            deststate = "{}-{}".format(src, i+1)
+
+        with m.State(statename):
+            m.next = deststate
+
+(SP_WITHDRAW, SP_CE) = range(2)
+
+class RoundRobin(Elaboratable):
+    def __init__(self, n, switch_policy=SP_WITHDRAW):
+        self.request = Signal(n)
+        self.grant = Signal(max=max(2, n))
+        self.switch_policy = switch_policy
+        if self.switch_policy == SP_CE:
+            self.ce = Signal()
+
+    def elaborate(self, platform):
+        m = Module()
+
+        # TODO: fix
+
+        if n > 1:
+            cases = {}
+            for i in range(n):
+                switch = []
+                for j in reversed(range(i+1, i+n)):
+                    t = j % n
+                    switch = [
+                        If(self.request[t],
+                            self.grant.eq(t)
+                        ).Else(
+                            *switch
+                        )
+                    ]
+                if self.switch_policy == SP_WITHDRAW:
+                    case = [If(~self.request[i], *switch)]
+                else:
+                    case = switch
+                cases[i] = case
+            statement = Case(self.grant, cases)
+            if self.switch_policy == SP_CE:
+                with m.If(self.ce):
+                    m.d.sync += statement
+            else:
+                m.d.sync += statement
+        else:
+            m.d.comb += self.grant.eq(0)
+
+        return m
+
+class Timeline(Elaboratable):
+    def __init__(self, events):
+        self.trigger = Signal()
+        self._events = events
+
+    def elaborate(self, platform):
+        m = Module()
+
+        lastevent = max([e[0] for e in self._events])
+        counter = Signal(range(lastevent+1))
+
+        # Counter incrementation
+        # (with overflow handling)
+        if (lastevent & (lastevent + 1)) != 0:
+            with m.If(counter == lastevent):
+                m.d.sync += counter.eq(0)
+            with m.Else():
+                with m.If(counter != 0):
+                    m.d.sync += counter.eq(counter+1)
+                with m.Elif(self.trigger):
+                    m.d.sync += counter.eq(1)
+        else:
+            with m.If(counter != 0):
+                m.d.sync += counter.eq(counter+1)
+            with m.Elif(self.trigger):
+                m.d.sync += counter.eq(1)
+
+        for e in self._events:
+            if e[0] == 0:
+                with m.If(self.trigger & (counter == 0)):
+                    m.d.sync += e[1]
+            else:
+                with m.If(counter == e[0]):
+                    m.d.sync += e[1]
+
+        return m
diff --git a/gram/core/__init__.py b/gram/core/__init__.py
new file mode 100644 (file)
index 0000000..86aaac6
--- /dev/null
@@ -0,0 +1,29 @@
+from migen import *
+
+from litex.soc.interconnect.csr import AutoCSR
+
+from litedram.dfii import DFIInjector
+from litedram.core.controller import ControllerSettings, LiteDRAMController
+from litedram.core.crossbar import LiteDRAMCrossbar
+
+# Core ---------------------------------------------------------------------------------------------
+
+class LiteDRAMCore(Module, AutoCSR):
+    def __init__(self, phy, geom_settings, timing_settings, clk_freq, **kwargs):
+        self.submodules.dfii = DFIInjector(
+            addressbits = geom_settings.addressbits,
+            bankbits    = geom_settings.bankbits,
+            nranks      = phy.settings.nranks,
+            databits    = phy.settings.dfi_databits,
+            nphases     = phy.settings.nphases)
+        self.comb += self.dfii.master.connect(phy.dfi)
+
+        self.submodules.controller = controller = LiteDRAMController(
+            phy_settings    = phy.settings,
+            geom_settings   = geom_settings,
+            timing_settings = timing_settings,
+            clk_freq        = clk_freq,
+            **kwargs)
+        self.comb += controller.dfi.connect(self.dfii.slave)
+
+        self.submodules.crossbar = LiteDRAMCrossbar(controller.interface)
diff --git a/gram/core/bandwidth.py b/gram/core/bandwidth.py
new file mode 100644 (file)
index 0000000..58f7400
--- /dev/null
@@ -0,0 +1,101 @@
+# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2018 John Sully <john@csquare.ca>
+# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
+# License: BSD
+
+"""LiteDRAM Bandwidth."""
+
+from nmigen import *
+
+from lambdasoc.periph import Peripheral
+
+__ALL__ = ["Bandwidth"]
+
+# Bandwidth ----------------------------------------------------------------------------------------
+
+class Bandwidth(Peripheral, Elaboratable):
+    """Measures LiteDRAM bandwidth
+
+    This module works by counting the number of read/write commands issued by
+    the controller during a fixed time period. To copy the values registered
+    during the last finished period, user must write to the `update` register.
+
+    Parameters
+    ----------
+    cmd : Endpoint(cmd_request_rw_layout)
+        Multiplexer endpoint on which all read/write requests are being sent
+    data_width : int, in
+        Data width that can be read back from CSR
+    period_bits : int, in
+        Defines length of bandwidth measurement period = 2^period_bits
+
+    Attributes
+    ----------
+    update : CSR, in
+        Copy the values from last finished period to the status registers
+    nreads : CSRStatus, out
+        Number of READ commands issued during a period
+    nwrites : CSRStatus, out
+        Number of WRITE commands issued during a period
+    data_width : CSRStatus, out
+        Can be read to calculate bandwidth in bits/sec as:
+            bandwidth = (nreads+nwrites) * data_width / period
+    """
+    def __init__(self, cmd, data_width, period_bits=24):
+        self.update     = CSR()
+        self.nreads     = CSRStatus(period_bits + 1)
+        self.nwrites    = CSRStatus(period_bits + 1)
+        self.data_width = CSRStatus(bits_for(data_width), reset=data_width)
+        self._period_bits = period_bits
+
+    def elaborate(self, platform):
+        m = Module()
+
+        cmd_valid    = Signal()
+        cmd_ready    = Signal()
+        cmd_is_read  = Signal()
+        cmd_is_write = Signal()
+        self.sync += [
+            cmd_valid.eq(cmd.valid),
+            cmd_ready.eq(cmd.ready),
+            cmd_is_read.eq(cmd.is_read),
+            cmd_is_write.eq(cmd.is_write)
+        ]
+
+        counter   = Signal(self._period_bits)
+        period    = Signal()
+        nreads    = Signal(self._period_bits + 1)
+        nwrites   = Signal(self._period_bits + 1)
+        nreads_r  = Signal(self._period_bits + 1)
+        nwrites_r = Signal(self._period_bits + 1)
+        m.d.sync += Cat(counter, period).eq(counter + 1)
+
+        with m.If(period):
+            m.d.sync += [
+                nreads_r.eq(nreads),
+                nwrites_r.eq(nwrites),
+                nreads.eq(0),
+                nwrites.eq(0),
+            ]
+
+            with m.If(cmd_valid & cmd_ready):
+                with m.If(cmd_is_read):
+                    m.d.sync += nreads.eq(1)
+
+                with m.If(cmd_is_write):
+                    m.d.sync += nwrites.eq(1)
+        with m.Elif(cmd_valid & cmd_ready):
+            with m.If(cmd_is_read):
+                m.d.sync += nreads.eq(nreads + 1)
+
+            with m.If(cmd_is_write):
+                m.d.sync += nwrites.eq(nwrites + 1)
+
+        with m.If(self.update.re):
+            m.d.sync += [
+                self.nreads.status.eq(nreads_r),
+                self.nwrites.status.eq(nwrites_r),
+            ]
+
+        return m
diff --git a/gram/core/bankmachine.py b/gram/core/bankmachine.py
new file mode 100644 (file)
index 0000000..b22a996
--- /dev/null
@@ -0,0 +1,238 @@
+# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
+# License: BSD
+
+"""LiteDRAM BankMachine (Rows/Columns management)."""
+
+import math
+
+from nmigen import *
+
+from gram.common import *
+from gram.core.multiplexer import *
+from gram.compat import delayed_enter
+import gram.stream as stream
+
+# AddressSlicer ------------------------------------------------------------------------------------
+
+class _AddressSlicer:
+    """Helper for extracting row/col from address
+
+    Column occupies lower bits of the address, row - higher bits. Address has
+    a forced alignment, so column does not contain alignment bits.
+    """
+    def __init__(self, colbits, address_align):
+        self.colbits       = colbits
+        self.address_align = address_align
+
+    def row(self, address):
+        split = self.colbits - self.address_align
+        return address[split:]
+
+    def col(self, address):
+        split = self.colbits - self.address_align
+        return Cat(Replicate(0, self.address_align), address[:split])
+
+# BankMachine --------------------------------------------------------------------------------------
+
+class BankMachine(Elaboratable):
+    """Converts requests from ports into DRAM commands
+
+    BankMachine abstracts single DRAM bank by keeping track of the currently
+    selected row. It converts requests from LiteDRAMCrossbar to targetted
+    to that bank into DRAM commands that go to the Multiplexer, inserting any
+    needed activate/precharge commands (with optional auto-precharge). It also
+    keeps track and enforces some DRAM timings (other timings are enforced in
+    the Multiplexer).
+
+    BankMachines work independently from the data path (which connects
+    LiteDRAMCrossbar with the Multiplexer directly).
+
+    Stream of requests from LiteDRAMCrossbar is being queued, so that reqeust
+    can be "looked ahead", and auto-precharge can be performed (if enabled in
+    settings).
+
+    Lock (cmd_layout.lock) is used to synchronise with LiteDRAMCrossbar. It is
+    being held when:
+     - there is a valid command awaiting in `cmd_buffer_lookahead` - this buffer
+       becomes ready simply when the next data gets fetched to the `cmd_buffer`
+     - there is a valid command in `cmd_buffer` - `cmd_buffer` becomes ready
+       when the BankMachine sends wdata_ready/rdata_valid back to the crossbar
+
+    Parameters
+    ----------
+    n : int
+        Bank number
+    address_width : int
+        LiteDRAMInterface address width
+    address_align : int
+        Address alignment depending on burst length
+    nranks : int
+        Number of separate DRAM chips (width of chip select)
+    settings : ControllerSettings
+        LiteDRAMController settings
+
+    Attributes
+    ----------
+    req : Record(cmd_layout)
+        Stream of requests from LiteDRAMCrossbar
+    refresh_req : Signal(), in
+        Indicates that refresh needs to be done, connects to Refresher.cmd.valid
+    refresh_gnt : Signal(), out
+        Indicates that refresh permission has been granted, satisfying timings
+    cmd : Endpoint(cmd_request_rw_layout)
+        Stream of commands to the Multiplexer
+    """
+    def __init__(self, n, address_width, address_align, nranks, settings):
+        self.req = req = Record(cmd_layout(address_width))
+        self.refresh_req = refresh_req = Signal()
+        self.refresh_gnt = refresh_gnt = Signal()
+
+        a  = settings.geom.addressbits
+        ba = settings.geom.bankbits + log2_int(nranks)
+        self.cmd = cmd = stream.Endpoint(cmd_request_rw_layout(a, ba))
+
+    def elaborate(self, platform):
+        m = Module()
+
+        auto_precharge = Signal()
+
+        # Command buffer ---------------------------------------------------------------------------
+        cmd_buffer_layout    = [("we", 1), ("addr", len(req.addr))]
+        cmd_buffer_lookahead = stream.SyncFIFO(
+            cmd_buffer_layout, settings.cmd_buffer_depth,
+            buffered=settings.cmd_buffer_buffered)
+        cmd_buffer = stream.Buffer(cmd_buffer_layout) # 1 depth buffer to detect row change
+        m.submodules += cmd_buffer_lookahead, cmd_buffer
+        m.d.comb += [
+            req.connect(cmd_buffer_lookahead.sink, keep={"valid", "ready", "we", "addr"}),
+            cmd_buffer_lookahead.source.connect(cmd_buffer.sink),
+            cmd_buffer.source.ready.eq(req.wdata_ready | req.rdata_valid),
+            req.lock.eq(cmd_buffer_lookahead.source.valid | cmd_buffer.source.valid),
+        ]
+
+        slicer = _AddressSlicer(settings.geom.colbits, address_align)
+
+        # Row tracking -----------------------------------------------------------------------------
+        row        = Signal(settings.geom.rowbits)
+        row_opened = Signal()
+        row_hit    = Signal()
+        row_open   = Signal()
+        row_close  = Signal()
+        m.d.comb += row_hit.eq(row == slicer.row(cmd_buffer.source.addr))
+        with m.If(row_close):
+            m.d.sync += row_opened.eq(0)
+        with m.Elif(row_open):
+            m.d.sync += [
+                row_opened.eq(1),
+                row.eq(slicer.row(cmd_buffer.source.addr)),
+            ]
+
+        # Address generation -----------------------------------------------------------------------
+        row_col_n_addr_sel = Signal()
+        m.d.comb += cmd.ba.eq(n)
+        with m.If(row_col_n_addr_sel):
+            m.d.comb += cmd.a.eq(slicer.row(cmd_buffer.source.addr))
+        with m.Else():
+            m.d.comb += cmd.a.eq((auto_precharge << 10) | slicer.col(cmd_buffer.source.addr))
+
+        # tWTP (write-to-precharge) controller -----------------------------------------------------
+        write_latency = math.ceil(settings.phy.cwl / settings.phy.nphases)
+        precharge_time = write_latency + settings.timing.tWR + settings.timing.tCCD # AL=0
+        m.submodules.twtpcon = twtpcon = tXXDController(precharge_time)
+        m.d.comb += twtpcon.valid.eq(cmd.valid & cmd.ready & cmd.is_write)
+
+        # tRC (activate-activate) controller -------------------------------------------------------
+        m.submodules.trccon = trccon = tXXDController(settings.timing.tRC)
+        m.d.comb += trccon.valid.eq(cmd.valid & cmd.ready & row_open)
+
+        # tRAS (activate-precharge) controller -----------------------------------------------------
+        m.submodules.trascon = trascon = tXXDController(settings.timing.tRAS)
+        m.d.comb += trascon.valid.eq(cmd.valid & cmd.ready & row_open)
+
+        # Auto Precharge generation ----------------------------------------------------------------
+        # generate auto precharge when current and next cmds are to different rows
+        if settings.with_auto_precharge:
+            with m.If(cmd_buffer_lookahead.source.valid & cmd_buffer.source.valid):
+                with m.If(slicer.row(cmd_buffer_lookahead.source.addr) != slicer.row(cmd_buffer.source.addr)):
+                    m.d.comb += auto_precharge.eq(row_close == 0)
+
+        # Control and command generation FSM -------------------------------------------------------
+        # Note: tRRD, tFAW, tCCD, tWTR timings are enforced by the multiplexer
+        with m.FSM():
+            with m.State("Regular"):
+                with m.If(refresh_req):
+                    m.next = "Refresh"
+                with m.Elif(cmd_buffer.source.valid):
+                    with m.If(row_opened):
+                        with m.If(row_hit):
+                            m.d.comb += [
+                                cmd.valid.eq(1),
+                                cmd.cas.eq(1),
+                            ]
+                            with m.If(cmd_buffer.source.we):
+                                m.d.comb += [
+                                    req.wdata_ready.eq(cmd.ready),
+                                    cmd.is_write.eq(1),
+                                    cmd.we.eq(1),
+                                ]
+                            with m.Else():
+                                m.d.comb += [
+                                    req.rdata_valid.eq(cmd.ready),
+                                    cmd.is_read.eq(1),
+                                ]
+                            with m.If(cmd.ready & auto_precharge):
+                                m.next = "Autoprecharge"
+                        with m.Else():
+                            m.next = "Precharge"
+                    with m.Else():
+                        m.next = "Activate"
+
+            with m.State("Precharge"):
+                m.d.comb += row_close.eq(1)
+
+                with m.If(twtpcon.ready & trascon.ready):
+                    m.d.comb += [
+                        cmd.valid.eq(1),
+                        cmd.ras.eq(1),
+                        cmd.we.eq(1),
+                        cmd.is_cmd.eq(1),
+                    ]
+
+                    with m.If(cmd.ready):
+                        m.next = "tRP"
+
+            with m.State("Autoprecharge"):
+                m.d.comb += row_close.eq(1)
+
+                with m.If(twtpcon.ready & trascon.ready):
+                    m.next = "tRP"
+
+            with m.State("Activate"):
+                with m.If(trccon.ready):
+                    m.d.comb += [
+                        row_col_n_addr_sel.eq(1),
+                        row_open.eq(1),
+                        cmd.valid.eq(1),
+                        cmd.is_cmd.eq(1),
+                        cmd.ras.eq(1),
+                    ]
+                    with m.If(cmd.ready):
+                        m.next = "tRCD"
+
+            with m.State("Refresh"):
+                m.d.comb += [
+                    row_close.eq(1),
+                    cmd.is_cmd.eq(1),
+                ]
+
+                with m.If(twtpcon.ready):
+                    m.d.comb += refresh_gnt.eq(1)
+                with m.If(~refresh_req):
+                    m.next = "Regular"
+
+            delayed_enter(m, "TRP", "ACTIVATE", settings.timing.tRP - 1)
+            delayed_enter(m, "TRCD", "REGULAR", settings.timing.tRCD - 1)
+
+        return m
diff --git a/gram/core/controller.py b/gram/core/controller.py
new file mode 100644 (file)
index 0000000..7e6719d
--- /dev/null
@@ -0,0 +1,100 @@
+# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
+# License: BSD
+
+"""LiteDRAM Controller."""
+
+from nmigen import *
+
+from gram.common import *
+from gram.phy import dfi
+from gram.core.refresher import Refresher
+from gram.core.bankmachine import BankMachine
+from gram.core.multiplexer import Multiplexer
+
+# Settings -----------------------------------------------------------------------------------------
+
+class ControllerSettings(Settings):
+    def __init__(self,
+        # Command buffers
+        cmd_buffer_depth    = 8,
+        cmd_buffer_buffered = False,
+
+        # Read/Write times
+        read_time           = 32,
+        write_time          = 16,
+
+        # Bandwidth
+        with_bandwidth      = False,
+
+        # Refresh
+        with_refresh        = True,
+        refresh_cls         = Refresher,
+        refresh_zqcs_freq   = 1e0,
+        refresh_postponing  = 1,
+
+        # Auto-Precharge
+        with_auto_precharge = True,
+
+        # Address mapping
+        address_mapping     = "ROW_BANK_COL"):
+        self.set_attributes(locals())
+
+# Controller ---------------------------------------------------------------------------------------
+
+class LiteDRAMController(Module):
+    def __init__(self, phy_settings, geom_settings, timing_settings, clk_freq,
+        controller_settings=ControllerSettings()):
+        address_align = log2_int(burst_lengths[phy_settings.memtype])
+
+        # Settings ---------------------------------------------------------------------------------
+        self.settings        = controller_settings
+        self.settings.phy    = phy_settings
+        self.settings.geom   = geom_settings
+        self.settings.timing = timing_settings
+
+        nranks = phy_settings.nranks
+        nbanks = 2**geom_settings.bankbits
+
+        # LiteDRAM Interface (User) ----------------------------------------------------------------
+        self.interface = interface = LiteDRAMInterface(address_align, self.settings)
+
+        # DFI Interface (Memory) -------------------------------------------------------------------
+        self.dfi = dfi.Interface(
+            addressbits = geom_settings.addressbits,
+            bankbits    = geom_settings.bankbits,
+            nranks      = phy_settings.nranks,
+            databits    = phy_settings.dfi_databits,
+            nphases     = phy_settings.nphases)
+
+        # # #
+
+        # Refresher --------------------------------------------------------------------------------
+        self.submodules.refresher = self.settings.refresh_cls(self.settings,
+            clk_freq   = clk_freq,
+            zqcs_freq  = self.settings.refresh_zqcs_freq,
+            postponing = self.settings.refresh_postponing)
+
+        # Bank Machines ----------------------------------------------------------------------------
+        bank_machines = []
+        for n in range(nranks*nbanks):
+            bank_machine = BankMachine(n,
+                address_width = interface.address_width,
+                address_align = address_align,
+                nranks        = nranks,
+                settings      = self.settings)
+            bank_machines.append(bank_machine)
+            self.submodules += bank_machine
+            self.comb += getattr(interface, "bank"+str(n)).connect(bank_machine.req)
+
+        # Multiplexer ------------------------------------------------------------------------------
+        self.submodules.multiplexer = Multiplexer(
+            settings      = self.settings,
+            bank_machines = bank_machines,
+            refresher     = self.refresher,
+            dfi           = self.dfi,
+            interface     = interface)
+
+    def get_csrs(self):
+        return self.multiplexer.get_csrs()
diff --git a/gram/core/crossbar.py b/gram/core/crossbar.py
new file mode 100644 (file)
index 0000000..81200b5
--- /dev/null
@@ -0,0 +1,210 @@
+# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2018 John Sully <john@csquare.ca>
+# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
+# License: BSD
+
+"""LiteDRAM Crossbar."""
+
+from functools import reduce
+from operator import or_
+
+from nmigen import *
+
+from gram.common import *
+from gram.core.controller import *
+from gram.frontend.adaptation import *
+from gram.compat import RoundRobin
+import gram.stream as stream
+
+# LiteDRAMCrossbar ---------------------------------------------------------------------------------
+
+class LiteDRAMCrossbar(Module):
+    """Multiplexes LiteDRAMController (slave) between ports (masters)
+
+    To get a port to LiteDRAM, use the `get_port` method. It handles data width
+    conversion and clock domain crossing, returning LiteDRAMNativePort.
+
+    The crossbar routes requests from masters to the BankMachines
+    (bankN.cmd_layout) and connects data path directly to the Multiplexer
+    (data_layout). It performs address translation based on chosen
+    `controller.settings.address_mapping`.
+    Internally, all masters are multiplexed between controller banks based on
+    the bank address (extracted from the presented address). Each bank has
+    a RoundRobin arbiter, that selects from masters that want to access this
+    bank and are not already locked.
+
+    Locks (cmd_layout.lock) make sure that, when a master starts a transaction
+    with given bank (which may include multiple reads/writes), no other bank
+    will be assigned to it during this time.
+    Arbiter (of a bank) considers given master as a candidate for selection if:
+     - given master's command is valid
+     - given master addresses the arbiter's bank
+     - given master is not locked
+       * i.e. it is not during transaction with another bank
+       * i.e. no other bank's arbiter granted permission for this master (with
+         bank.lock being active)
+
+    Data ready/valid signals for banks are routed from bankmachines with
+    a latency that synchronizes them with the data coming over datapath.
+
+    Parameters
+    ----------
+    controller : LiteDRAMInterface
+        Interface to LiteDRAMController
+
+    Attributes
+    ----------
+    masters : [LiteDRAMNativePort, ...]
+        LiteDRAM memory ports
+    """
+    def __init__(self, controller):
+        self.controller = controller
+
+        self.rca_bits         = controller.address_width
+        self.nbanks           = controller.nbanks
+        self.nranks           = controller.nranks
+        self.cmd_buffer_depth = controller.settings.cmd_buffer_depth
+        self.read_latency     = controller.settings.phy.read_latency + 1
+        self.write_latency    = controller.settings.phy.write_latency + 1
+
+        self.bank_bits = log2_int(self.nbanks, False)
+        self.rank_bits = log2_int(self.nranks, False)
+
+        self.masters = []
+
+    def get_port(self, mode="both", data_width=None, clock_domain="sys", reverse=False):
+        if self.finalized:
+            raise FinalizeError
+
+        if data_width is None:
+            # use internal data_width when no width adaptation is requested
+            data_width = self.controller.data_width
+
+        # Crossbar port ----------------------------------------------------------------------------
+        port = LiteDRAMNativePort(
+            mode          = mode,
+            address_width = self.rca_bits + self.bank_bits - self.rank_bits,
+            data_width    = self.controller.data_width,
+            clock_domain  = "sys",
+            id            = len(self.masters))
+        self.masters.append(port)
+
+        # Clock domain crossing --------------------------------------------------------------------
+        if clock_domain != "sys":
+            new_port = LiteDRAMNativePort(
+                mode          = mode,
+                address_width = port.address_width,
+                data_width    = port.data_width,
+                clock_domain  = clock_domain,
+                id            = port.id)
+            self.submodules += LiteDRAMNativePortCDC(new_port, port)
+            port = new_port
+
+        # Data width convertion --------------------------------------------------------------------
+        if data_width != self.controller.data_width:
+            if data_width > self.controller.data_width:
+                addr_shift = -log2_int(data_width//self.controller.data_width)
+            else:
+                addr_shift = log2_int(self.controller.data_width//data_width)
+            new_port = LiteDRAMNativePort(
+                mode          = mode,
+                address_width = port.address_width + addr_shift,
+                data_width    = data_width,
+                clock_domain  = clock_domain,
+                id            = port.id)
+            self.submodules += ClockDomainsRenamer(clock_domain)(
+                LiteDRAMNativePortConverter(new_port, port, reverse))
+            port = new_port
+
+        return port
+
+    def do_finalize(self):
+        controller = self.controller
+        nmasters   = len(self.masters)
+
+        # Address mapping --------------------------------------------------------------------------
+        cba_shifts = {"ROW_BANK_COL": controller.settings.geom.colbits - controller.address_align}
+        cba_shift = cba_shifts[controller.settings.address_mapping]
+        m_ba      = [m.get_bank_address(self.bank_bits, cba_shift)for m in self.masters]
+        m_rca     = [m.get_row_column_address(self.bank_bits, self.rca_bits, cba_shift) for m in self.masters]
+
+        master_readys       = [0]*nmasters
+        master_wdata_readys = [0]*nmasters
+        master_rdata_valids = [0]*nmasters
+
+        arbiters = [roundrobin.RoundRobin(nmasters, roundrobin.SP_CE) for n in range(self.nbanks)]
+        self.submodules += arbiters
+
+        for nb, arbiter in enumerate(arbiters):
+            bank = getattr(controller, "bank"+str(nb))
+
+            # For each master, determine if another bank locks it ----------------------------------
+            master_locked = []
+            for nm, master in enumerate(self.masters):
+                locked = Signal()
+                for other_nb, other_arbiter in enumerate(arbiters):
+                    if other_nb != nb:
+                        other_bank = getattr(controller, "bank"+str(other_nb))
+                        locked = locked | (other_bank.lock & (other_arbiter.grant == nm))
+                master_locked.append(locked)
+
+            # Arbitrate ----------------------------------------------------------------------------
+            bank_selected  = [(ba == nb) & ~locked for ba, locked in zip(m_ba, master_locked)]
+            bank_requested = [bs & master.cmd.valid for bs, master in zip(bank_selected, self.masters)]
+            self.comb += [
+                arbiter.request.eq(Cat(*bank_requested)),
+                arbiter.ce.eq(~bank.valid & ~bank.lock)
+            ]
+
+            # Route requests -----------------------------------------------------------------------
+            self.comb += [
+                bank.addr.eq(Array(m_rca)[arbiter.grant]),
+                bank.we.eq(Array(self.masters)[arbiter.grant].cmd.we),
+                bank.valid.eq(Array(bank_requested)[arbiter.grant])
+            ]
+            master_readys = [master_ready | ((arbiter.grant == nm) & bank_selected[nm] & bank.ready)
+                for nm, master_ready in enumerate(master_readys)]
+            master_wdata_readys = [master_wdata_ready | ((arbiter.grant == nm) & bank.wdata_ready)
+                for nm, master_wdata_ready in enumerate(master_wdata_readys)]
+            master_rdata_valids = [master_rdata_valid | ((arbiter.grant == nm) & bank.rdata_valid)
+                for nm, master_rdata_valid in enumerate(master_rdata_valids)]
+
+        # Delay write/read signals based on their latency
+        for nm, master_wdata_ready in enumerate(master_wdata_readys):
+            for i in range(self.write_latency):
+                new_master_wdata_ready = Signal()
+                self.sync += new_master_wdata_ready.eq(master_wdata_ready)
+                master_wdata_ready = new_master_wdata_ready
+            master_wdata_readys[nm] = master_wdata_ready
+
+        for nm, master_rdata_valid in enumerate(master_rdata_valids):
+            for i in range(self.read_latency):
+                new_master_rdata_valid = Signal()
+                self.sync += new_master_rdata_valid.eq(master_rdata_valid)
+                master_rdata_valid = new_master_rdata_valid
+            master_rdata_valids[nm] = master_rdata_valid
+
+        for master, master_ready in zip(self.masters, master_readys):
+            self.comb += master.cmd.ready.eq(master_ready)
+        for master, master_wdata_ready in zip(self.masters, master_wdata_readys):
+            self.comb += master.wdata.ready.eq(master_wdata_ready)
+        for master, master_rdata_valid in zip(self.masters, master_rdata_valids):
+            self.comb += master.rdata.valid.eq(master_rdata_valid)
+
+        # Route data writes ------------------------------------------------------------------------
+        wdata_cases = {}
+        for nm, master in enumerate(self.masters):
+            wdata_cases[2**nm] = [
+                controller.wdata.eq(master.wdata.data),
+                controller.wdata_we.eq(master.wdata.we)
+            ]
+        wdata_cases["default"] = [
+            controller.wdata.eq(0),
+            controller.wdata_we.eq(0)
+        ]
+        self.comb += Case(Cat(*master_wdata_readys), wdata_cases)
+
+        # Route data reads -------------------------------------------------------------------------
+        for master in self.masters:
+            self.comb += master.rdata.data.eq(controller.rdata)
diff --git a/gram/core/multiplexer.py b/gram/core/multiplexer.py
new file mode 100644 (file)
index 0000000..3a9203d
--- /dev/null
@@ -0,0 +1,416 @@
+# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2018 John Sully <john@csquare.ca>
+# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
+# License: BSD
+
+"""LiteDRAM Multiplexer."""
+
+import math
+from functools import reduce
+from operator import or_, and_
+
+from nmigen import *
+
+from lambdasoc.periph import Peripheral
+
+from gram.common import *
+from gram.core.bandwidth import Bandwidth
+import gram.stream as stream
+from gram.compat import RoundRobin, delayed_enter
+
+# _CommandChooser ----------------------------------------------------------------------------------
+
+class _CommandChooser(Elaboratable):
+    """Arbitrates between requests, filtering them based on their type
+
+    Uses RoundRobin to choose current request, filters requests based on
+    `want_*` signals.
+
+    Parameters
+    ----------
+    requests : [Endpoint(cmd_request_rw_layout), ...]
+        Request streams to consider for arbitration
+
+    Attributes
+    ----------
+    want_reads : Signal, in
+        Consider read requests
+    want_writes : Signal, in
+        Consider write requests
+    want_cmds : Signal, in
+        Consider command requests (without ACT)
+    want_activates : Signal, in
+        Also consider ACT commands
+    cmd : Endpoint(cmd_request_rw_layout)
+        Currently selected request stream (when ~cmd.valid, cas/ras/we are 0)
+    """
+    def __init__(self, requests):
+        self.want_reads     = Signal()
+        self.want_writes    = Signal()
+        self.want_cmds      = Signal()
+        self.want_activates = Signal()
+
+        self._requests = requests
+        a  = len(requests[0].a)
+        ba = len(requests[0].ba)
+
+        # cas/ras/we are 0 when valid is inactive
+        self.cmd = stream.Endpoint(cmd_request_rw_layout(a, ba))
+
+    def elaborate(self, platform):
+        m = Module()
+
+        n = len(self._requests)
+
+        valids = Signal(n)
+        for i, request in enumerate(self._requests):
+            is_act_cmd = request.ras & ~request.cas & ~request.we
+            command = request.is_cmd & self.want_cmds & (~is_act_cmd | self.want_activates)
+            read = request.is_read == self.want_reads
+            write = request.is_write == self.want_writes
+            self.comb += valids[i].eq(request.valid & (command | (read & write)))
+
+
+        arbiter = RoundRobin(n, SP_CE)
+        self.submodules += arbiter
+        choices = Array(valids[i] for i in range(n))
+        m.d.comb += [
+            arbiter.request.eq(valids),
+            self.cmd.valid.eq(choices[arbiter.grant])
+        ]
+
+        for name in ["a", "ba", "is_read", "is_write", "is_cmd"]:
+            choices = Array(getattr(req, name) for req in self._requests)
+            self.comb += getattr(self.cmd, name).eq(choices[arbiter.grant])
+
+        for name in ["cas", "ras", "we"]:
+            # we should only assert those signals when valid is 1
+            choices = Array(getattr(req, name) for req in self._requests)
+            with m.If(self.cmd.valid):
+                m.d.comb += getattr(cmd, name).eq(choices[arbiter.grant])
+
+        for i, request in enumerate(self._requests):
+            with m.If(self.cmd.valid & self.cmd.ready & (arbiter.grant == i)):
+                m.d.comb += request.ready.eq(1)
+
+        # Arbitrate if a command is being accepted or if the command is not valid to ensure a valid
+        # command is selected when cmd.ready goes high.
+        m.d.comb += arbiter.ce.eq(self.cmd.ready | ~self.cmd.valid)
+
+        return m
+
+    # helpers
+    def accept(self):
+        return self.cmd.valid & self.cmd.ready
+
+    def activate(self):
+        return self.cmd.ras & ~self.cmd.cas & ~self.cmd.we
+
+    def write(self):
+        return self.cmd.is_write
+
+    def read(self):
+        return self.cmd.is_read
+
+# _Steerer -----------------------------------------------------------------------------------------
+
+(STEER_NOP, STEER_CMD, STEER_REQ, STEER_REFRESH) = range(4)
+
+class _Steerer(Elaboratable):
+    """Connects selected request to DFI interface
+
+    cas/ras/we/is_write/is_read are connected only when `cmd.valid & cmd.ready`.
+    Rank bits are decoded and used to drive cs_n in multi-rank systems,
+    STEER_REFRESH always enables all ranks.
+
+    Parameters
+    ----------
+    commands : [Endpoint(cmd_request_rw_layout), ...]
+        Command streams to choose from. Must be of len=4 in the order:
+            NOP, CMD, REQ, REFRESH
+        NOP can be of type Record(cmd_request_rw_layout) instead, so that it is
+        always considered invalid (because of lack of the `valid` attribute).
+    dfi : dfi.Interface
+        DFI interface connected to PHY
+
+    Attributes
+    ----------
+    sel : [Signal(range(len(commands))), ...], in
+        Signals for selecting which request gets connected to the corresponding
+        DFI phase. The signals should take one of the values from STEER_* to
+        select given source.
+    """
+    def __init__(self, commands, dfi):
+        self._commands = commands
+        self._dfi = dfi
+        ncmd = len(commands)
+        nph  = len(dfi.phases)
+        self.sel = [Signal(range(ncmd)) for i in range(nph)]
+
+    def elaborate(self, platform):
+        m = Module()
+
+        commands = self._commands
+        dfi = self._dfi
+
+        def valid_and(cmd, attr):
+            if not hasattr(cmd, "valid"):
+                return 0
+            else:
+                return cmd.valid & cmd.ready & getattr(cmd, attr)
+
+        for i, (phase, sel) in enumerate(zip(dfi.phases, self.sel)):
+            nranks   = len(phase.cs_n)
+            rankbits = log2_int(nranks)
+            if hasattr(phase, "reset_n"):
+                self.comb += phase.reset_n.eq(1)
+            m.d.comb += phase.cke.eq(Replicate(Signal(reset=1), nranks))
+            if hasattr(phase, "odt"):
+                # FIXME: add dynamic drive for multi-rank (will be needed for high frequencies)
+                m.d.comb += phase.odt.eq(Replicate(Signal(reset=1), nranks))
+            if rankbits:
+                rank_decoder = Decoder(nranks)
+                self.submodules += rank_decoder
+                m.d.comb += rank_decoder.i.eq((Array(cmd.ba[-rankbits:] for cmd in commands)[sel]))
+                if i == 0: # Select all ranks on refresh.
+                    with m.If(sel == STEER_REFRESH):
+                        m.d.sync += phase.cs_n.eq(0)
+                    with m.Else():
+                        m.d.sync += phase.cs_n.eq(~rank_decoder.o)
+                else:
+                    m.d.sync += phase.cs_n.eq(~rank_decoder.o)
+                m.d.sync += phase.bank.eq(Array(cmd.ba[:-rankbits] for cmd in commands)[sel])
+            else:
+                m.d.sync += [
+                    phase.cs_n.eq(0),
+                    phase.bank.eq(Array(cmd.ba[:] for cmd in commands)[sel]),
+                ]
+
+            m.d.sync += [
+                phase.address.eq(Array(cmd.a for cmd in commands)[sel]),
+                phase.cas_n.eq(~Array(valid_and(cmd, "cas") for cmd in commands)[sel]),
+                phase.ras_n.eq(~Array(valid_and(cmd, "ras") for cmd in commands)[sel]),
+                phase.we_n.eq(~Array(valid_and(cmd, "we") for cmd in commands)[sel])
+            ]
+
+            rddata_ens = Array(valid_and(cmd, "is_read") for cmd in commands)
+            wrdata_ens = Array(valid_and(cmd, "is_write") for cmd in commands)
+            m.d.sync += [
+                phase.rddata_en.eq(rddata_ens[sel]),
+                phase.wrdata_en.eq(wrdata_ens[sel])
+            ]
+
+        return m
+
+# Multiplexer --------------------------------------------------------------------------------------
+
+class Multiplexer(Peripheral, Elaboratable):
+    """Multplexes requets from BankMachines to DFI
+
+    This module multiplexes requests from BankMachines (and Refresher) and
+    connects them to DFI. Refresh commands are coordinated between the Refresher
+    and BankMachines to ensure there are no conflicts. Enforces required timings
+    between commands (some timings are enforced by BankMachines).
+
+    Parameters
+    ----------
+    settings : ControllerSettings
+        Controller settings (with .phy, .geom and .timing settings)
+    bank_machines : [BankMachine, ...]
+        Bank machines that generate command requests to the Multiplexer
+    refresher : Refresher
+        Generates REFRESH command requests
+    dfi : dfi.Interface
+        DFI connected to the PHY
+    interface : LiteDRAMInterface
+        Data interface connected directly to LiteDRAMCrossbar
+    """
+    def __init__(self,
+            settings,
+            bank_machines,
+            refresher,
+            dfi,
+            interface):
+        assert(settings.phy.nphases == len(dfi.phases))
+
+        ras_allowed = Signal(reset=1)
+        cas_allowed = Signal(reset=1)
+
+        # Command choosing -------------------------------------------------------------------------
+        requests = [bm.cmd for bm in bank_machines]
+        m.submodules.choose_cmd = choose_cmd = _CommandChooser(requests)
+        m.submodules.choose_req = choose_req = _CommandChooser(requests)
+        if settings.phy.nphases == 1:
+            # When only 1 phase, use choose_req for all requests
+            choose_cmd = choose_req
+            m.d.comb += choose_req.want_cmds.eq(1)
+            m.d.comb += choose_req.want_activates.eq(ras_allowed)
+
+        # Command steering -------------------------------------------------------------------------
+        nop = Record(cmd_request_layout(settings.geom.addressbits,
+                                        log2_int(len(bank_machines))))
+        # nop must be 1st
+        commands = [nop, choose_cmd.cmd, choose_req.cmd, refresher.cmd]
+        steerer = _Steerer(commands, dfi)
+        m.submodules += steerer
+
+        # tRRD timing (Row to Row delay) -----------------------------------------------------------
+        m.submodules.trrdcon = trrdcon = tXXDController(settings.timing.tRRD)
+        m.d.comb += trrdcon.valid.eq(choose_cmd.accept() & choose_cmd.activate())
+
+        # tFAW timing (Four Activate Window) -------------------------------------------------------
+        m.submodules.tfawcon = tfawcon = tFAWController(settings.timing.tFAW)
+        m.d.comb += tfawcon.valid.eq(choose_cmd.accept() & choose_cmd.activate())
+
+        # RAS control ------------------------------------------------------------------------------
+        m.d.comb += ras_allowed.eq(trrdcon.ready & tfawcon.ready)
+
+        # tCCD timing (Column to Column delay) -----------------------------------------------------
+        m.submodules.tccdcon = tccdcon = tXXDController(settings.timing.tCCD)
+        m.d.comb += tccdcon.valid.eq(choose_req.accept() & (choose_req.write() | choose_req.read()))
+
+        # CAS control ------------------------------------------------------------------------------
+        m.d.comb += cas_allowed.eq(tccdcon.ready)
+
+        # tWTR timing (Write to Read delay) --------------------------------------------------------
+        write_latency = math.ceil(settings.phy.cwl / settings.phy.nphases)
+        m.submodules.twtrcon = twtrcon = tXXDController(
+            settings.timing.tWTR + write_latency +
+            # tCCD must be added since tWTR begins after the transfer is complete
+            settings.timing.tCCD if settings.timing.tCCD is not None else 0)
+        m.d.comb += twtrcon.valid.eq(choose_req.accept() & choose_req.write())
+
+        # Read/write turnaround --------------------------------------------------------------------
+        read_available = Signal()
+        write_available = Signal()
+        reads = [req.valid & req.is_read for req in requests]
+        writes = [req.valid & req.is_write for req in requests]
+        m.d.comb += [
+            read_available.eq(reduce(or_, reads)),
+            write_available.eq(reduce(or_, writes))
+        ]
+
+        # Anti Starvation --------------------------------------------------------------------------
+
+        def anti_starvation(timeout):
+            en = Signal()
+            max_time = Signal()
+            if timeout:
+                t = timeout - 1
+                time = Signal(range(t+1))
+                m.d.comb += max_time.eq(time == 0)
+                m.d.sync += If(~en,
+                        time.eq(t)
+                    ).Elif(~max_time,
+                        time.eq(time - 1)
+                    )
+            else:
+                m.d.comb += max_time.eq(0)
+            return en, max_time
+
+        read_time_en,   max_read_time = anti_starvation(settings.read_time)
+        write_time_en, max_write_time = anti_starvation(settings.write_time)
+
+        # Refresh ----------------------------------------------------------------------------------
+        m.d.comb += [bm.refresh_req.eq(refresher.cmd.valid) for bm in bank_machines]
+        go_to_refresh = Signal()
+        bm_refresh_gnts = [bm.refresh_gnt for bm in bank_machines]
+        m.d.comb += go_to_refresh.eq(reduce(and_, bm_refresh_gnts))
+
+        # Datapath ---------------------------------------------------------------------------------
+        all_rddata = [p.rddata for p in dfi.phases]
+        all_wrdata = [p.wrdata for p in dfi.phases]
+        all_wrdata_mask = [p.wrdata_mask for p in dfi.phases]
+        m.d.comb += [
+            interface.rdata.eq(Cat(*all_rddata)),
+            Cat(*all_wrdata).eq(interface.wdata),
+            Cat(*all_wrdata_mask).eq(~interface.wdata_we)
+        ]
+
+        def steerer_sel(steerer, r_w_n):
+            r = []
+            for i in range(settings.phy.nphases):
+                s = steerer.sel[i].eq(STEER_NOP)
+                if r_w_n == "read":
+                    if i == settings.phy.rdphase:
+                        s = steerer.sel[i].eq(STEER_REQ)
+                    elif i == settings.phy.rdcmdphase:
+                        s = steerer.sel[i].eq(STEER_CMD)
+                elif r_w_n == "write":
+                    if i == settings.phy.wrphase:
+                        s = steerer.sel[i].eq(STEER_REQ)
+                    elif i == settings.phy.wrcmdphase:
+                        s = steerer.sel[i].eq(STEER_CMD)
+                else:
+                    raise ValueError
+                r.append(s)
+            return r
+
+        # Control FSM ------------------------------------------------------------------------------
+        with m.FSM():
+            with m.State("Read"):
+                m.d.comb += [
+                    read_time_en.eq(1),
+                    choose_req.want_reads.eq(1),
+                    steerer_sel(steerer, "read"),
+                ]
+
+                with m.If(settings.phy.nphases == 1):
+                    m.d.comb += choose_req.cmd.ready.eq(cas_allowed & (~choose_req.activate() | ras_allowed))
+                with m.Else():
+                    m.d.comb += [
+                        choose_cmd.want_activates.eq(ras_allowed),
+                        choose_cmd.cmd.ready.eq(~choose_cmd.activate() | ras_allowed),
+                        choose_req.cmd.ready.eq(cas_allowed),
+                    ]
+
+                with m.If(write_available):
+                    # TODO: switch only after several cycles of ~read_available?
+                    with m.If(~read_available | max_read_time):
+                        m.next = "RTW"
+
+                with m.If(go_to_refresh):
+                    m.next = "Refresh"
+
+            with m.State("Write"):
+                m.d.comb += [
+                    write_time_en.eq(1),
+                    choose_req.want_writes.eq(1),
+                    steerer_sel(steerer, "write"),
+                ]
+
+                with m.If(settings.phy.nphases == 1):
+                    m.d.comb += choose_req.cmd.ready.eq(cas_allowed & (~choose_req.activate() | ras_allowed))
+                with m.Else():
+                    m.d.comb += [
+                        choose_cmd.want_activates.eq(ras_allowed),
+                        choose_cmd.cmd.ready.eq(~choose_cmd.activate() | ras_allowed),
+                        choose_req.cmd.ready.eq(cas_allowed),
+                    ]
+
+                with m.If(read_available):
+                    with m.If(~write_available | max_write_time):
+                        m.next = "WTR"
+
+                with m.If(go_to_refresh):
+                    m.next = "Refresh"
+
+            with m.State("Refresh"):
+                m.d.comb += [
+                    steerer.sel[0].eq(STEER_REFRESH),
+                    refresher.cmd.ready.eq(1),
+                ]
+                with m.If(refresher.cmd.last):
+                    m.next = "Read"
+
+            with m.State("WTR"):
+                with m.If(twtrcon.ready):
+                    m.next = "Read"
+            
+            # TODO: reduce this, actual limit is around (cl+1)/nphases
+            delayed_enter(m, "RTW", "WRITE", settings.phy.read_latency-1)
+
+        if settings.with_bandwidth:
+            data_width = settings.phy.dfi_databits*settings.phy.nphases
+            self.submodules.bandwidth = Bandwidth(self.choose_req.cmd, data_width)
diff --git a/gram/core/refresher.py b/gram/core/refresher.py
new file mode 100644 (file)
index 0000000..ebaea32
--- /dev/null
@@ -0,0 +1,339 @@
+# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
+# License: BSD
+
+"""LiteDRAM Refresher."""
+
+from nmigen import *
+
+from litex.soc.interconnect import stream
+
+from gram.core.multiplexer import *
+from gram.compat import Timeline
+
+# RefreshExecuter ----------------------------------------------------------------------------------
+
+class RefreshExecuter(Elaboratable):
+    """Refresh Executer
+
+    Execute the refresh sequence to the DRAM:
+    - Send a "Precharge All" command
+    - Wait tRP
+    - Send an "Auto Refresh" command
+    - Wait tRFC
+    """
+    def __init__(self, cmd, trp, trfc):
+        self.start = Signal()
+        self.done  = Signal()
+        self._cmd = cmd
+        self._trp = trp
+        self._trfc = trfc
+
+    def elaborate(self, platform):
+        m = Module()
+
+        trp = self._trp
+        trfc = self._trfc
+
+        self.sync += [
+            self._cmd.a.eq(  0),
+            self._cmd.ba.eq( 0),
+            self._cmd.cas.eq(0),
+            self._cmd.ras.eq(0),
+            self._cmd.we.eq( 0),
+            self.done.eq(0),
+        ]
+
+        tl = Timeline([
+            # Precharge All
+            (0, [
+                self._cmd.a.eq(2**10),
+                self._cmd.ba.eq( 0),
+                self._cmd.cas.eq(0),
+                self._cmd.ras.eq(1),
+                self._cmd.we.eq( 1)
+            ]),
+            # Auto Refresh after tRP
+            (trp, [
+                self._cmd.a.eq(  0),
+                self._cmd.ba.eq( 0),
+                self._cmd.cas.eq(1),
+                self._cmd.ras.eq(1),
+                self._cmd.we.eq( 0),
+            ]),
+            # Done after tRP + tRFC
+            (trp + trfc, [
+                self._cmd.a.eq(  0),
+                self._cmd.ba.eq( 0),
+                self._cmd.cas.eq(0),
+                self._cmd.ras.eq(0),
+                self._cmd.we.eq( 0),
+                self.done.eq(1),
+            ]),
+        ])
+        m.submodules += tl
+        m.d.comb += tl.trigger.eq(self.start)
+
+        return m
+
+# RefreshSequencer ---------------------------------------------------------------------------------
+
+class RefreshSequencer(Elaboratable):
+    """Refresh Sequencer
+
+    Sequence N refreshs to the DRAM.
+    """
+    def __init__(self, cmd, trp, trfc, postponing=1):
+        self.start = Signal()
+        self.done  = Signal()
+        self._trp = trp
+        self._trfc = trfc
+        self._postponing = postponing
+
+    def elaborate(self, platform):
+        m = Module()
+
+        trp = self._trp
+        trfc = self._trfc
+
+        executer = RefreshExecuter(cmd, trp, trfc)
+        self.submodules += executer
+
+        count = Signal(bits_for(postponing), reset=postponing-1)
+        with m.If(self.start):
+            m.d.sync += count.eq(count.reset)
+        with m.Elif(executer.done):
+            with m.If(count != 0):
+                m.d.sync += count.eq(count-1)
+
+        m.d.comb += [
+            executer.start.eq(self.start | (count != 0)),
+            self.done.eq(executer.done & (count == 0)),
+        ]
+
+        return m
+
+# RefreshTimer -------------------------------------------------------------------------------------
+
+class RefreshTimer(Elaboratable):
+    """Refresh Timer
+
+    Generate periodic pulses (tREFI period) to trigger DRAM refresh.
+    """
+    def __init__(self, trefi):
+        self.wait  = Signal()
+        self.done  = Signal()
+        self.count = Signal(bits_for(trefi))
+        self._trefi = trefi
+
+    def elaborate(self, platform):
+        m = Module()
+
+        trefi = self._trefi
+
+        done  = Signal()
+        count = Signal(bits_for(trefi), reset=trefi-1)
+
+        with m.If(self.wait & ~self.done):
+            m.d.sync += count.eq(count-1)
+        with m.Else():
+            m.d.sync += count.eq(count.reset)
+
+        m.d.comb += [
+            done.eq(count == 0),
+            self.done.eq(done),
+            self.count.eq(count)
+        ]
+
+        return m
+
+# RefreshPostponer -------------------------------------------------------------------------------
+
+class RefreshPostponer(Elaboratable):
+    """Refresh Postponer
+
+    Postpone N Refresh requests and generate a request when N is reached.
+    """
+    def __init__(self, postponing=1):
+        self.req_i = Signal()
+        self.req_o = Signal()
+        self._postponing = postponing
+
+    def elaborate(self, platform):
+
+        count = Signal(bits_for(self._postponing), reset=self._postponing-1)
+
+        m.d.sync += self.req_o.eq(0)
+        with m.If(self.req_i):
+            with m.If(count == 0):
+                m.d.sync += [
+                    count.eq(count.reset),
+                    self.req_o.eq(1),
+                ]
+            with m.Else():
+                m.d.sync += count.eq(count-1)
+
+        return m
+
+# ZQCSExecuter ----------------------------------------------------------------------------------
+
+class ZQCSExecuter(Elaboratable):
+    """ZQ Short Calibration Executer
+
+    Execute the ZQCS sequence to the DRAM:
+    - Send a "Precharge All" command
+    - Wait tRP
+    - Send an "ZQ Short Calibration" command
+    - Wait tZQCS
+    """
+    def __init__(self, cmd, trp, tzqcs):
+        self.start = Signal()
+        self.done  = Signal()
+        self._cmd = cmd
+        self._trp = trp
+        self._tzqcs = tzqcs
+
+    def elaborate(self, platform):
+        m = Module()
+
+        cmd = self._cmd
+        trp = self._trp
+        tzqcs = self._tzqcs
+
+        m.d.sync += self.done.eq(0)
+
+        tl = Timeline([
+            # Precharge All
+            (0, [
+                cmd.a.eq(  2**10),
+                cmd.ba.eq( 0),
+                cmd.cas.eq(0),
+                cmd.ras.eq(1),
+                cmd.we.eq( 1)
+            ]),
+            # ZQ Short Calibration after tRP
+            (trp, [
+                cmd.a.eq(  0),
+                cmd.ba.eq( 0),
+                cmd.cas.eq(0),
+                cmd.ras.eq(0),
+                cmd.we.eq( 1),
+            ]),
+            # Done after tRP + tZQCS
+            (trp + tzqcs, [
+                cmd.a.eq(  0),
+                cmd.ba.eq( 0),
+                cmd.cas.eq(0),
+                cmd.ras.eq(0),
+                cmd.we.eq( 0),
+                self.done.eq(1)
+            ]),
+        ])
+        m.submodules += tl
+        m.d.comb += tl.trigger.eq(self.start)
+
+        return m
+
+# Refresher ----------------------------------------------------------------------------------------
+
+class Refresher(Elaboratable):
+    """Refresher
+
+    Manage DRAM refresh.
+
+    The DRAM needs to be periodically refreshed with a tREFI period to avoid data corruption. During
+    a refresh, the controller send a "Precharge All" command to close and precharge all rows and then
+    send a "Auto Refresh" command.
+
+    Before executing the refresh, the Refresher advertises the Controller that a refresh should occur,
+    this allows the Controller to finish the current transaction and block next transactions. Once all
+    transactions are done, the Refresher can execute the refresh Sequence and release the Controller.
+
+    """
+    def __init__(self, settings, clk_freq, zqcs_freq=1e0, postponing=1):
+        assert postponing <= 8
+        abits  = settings.geom.addressbits
+        babits = settings.geom.bankbits + log2_int(settings.phy.nranks)
+        self.cmd = cmd = stream.Endpoint(cmd_request_rw_layout(a=abits, ba=babits))
+        self._postponing = postponing
+
+    def elaborate(self, platform):
+        m = Module()
+
+        wants_refresh = Signal()
+        wants_zqcs    = Signal()
+
+        # Refresh Timer ----------------------------------------------------------------------------
+        timer = RefreshTimer(settings.timing.tREFI)
+        self.submodules.timer = timer
+        m.d.comb += timer.wait.eq(~timer.done)
+
+        # Refresh Postponer ------------------------------------------------------------------------
+        postponer = RefreshPostponer(self._postponing)
+        self.submodules.postponer = postponer
+        m.d.comb += [
+            postponer.req_i.eq(self.timer.done),
+            wants_refresh.eq(postponer.req_o),
+        ]
+
+        # Refresh Sequencer ------------------------------------------------------------------------
+        sequencer = RefreshSequencer(cmd, settings.timing.tRP, settings.timing.tRFC, self._postponing)
+        self.submodules.sequencer = sequencer
+
+        if settings.timing.tZQCS is not None:
+            # ZQCS Timer ---------------------------------------------------------------------------
+            zqcs_timer = RefreshTimer(int(clk_freq/zqcs_freq))
+            self.submodules.zqcs_timer = zqcs_timer
+            m.d.comb += wants_zqcs.eq(zqcs_timer.done)
+
+            # ZQCS Executer ------------------------------------------------------------------------
+            zqcs_executer = ZQCSExecuter(cmd, settings.timing.tRP, settings.timing.tZQCS)
+            self.submodules.zqs_executer = zqcs_executer
+            m.d.comb += zqcs_timer.wait.eq(~zqcs_executer.done)
+
+        # Refresh FSM ------------------------------------------------------------------------------
+        with m.FSM():
+            with m.State("Idle"):
+                with m.If(settings.with_refresh & wants_refresh):
+                    m.next = "Wait-Bank-Machines"
+
+            with m.State("Wait-Bank-Machines"):
+                m.d.comb += cmd.valid.eq(1)
+                with m.If(cmd.ready):
+                    m.d.comb += sequencer.start.eq(1)
+                    m.next = "Do-Refresh"
+
+            if settings.timing.tZQCS is None:
+                with m.State("Do-Refresh"):
+                    m.d.comb += cmd.valid.eq(1)
+                    with m.If(sequencer.done):
+                        m.d.comb += [
+                            cmd.valid.eq(0),
+                            cmd.last.eq(1),
+                        ]
+                        m.next = "Idle"
+            else:
+                with m.State("Do-Refresh"):
+                    m.d.comb += cmd.valid.eq(1)
+                    with m.If(sequencer.done):
+                        with m.If(wants_zqcs):
+                            m.d.comb += zqcs_executer.start.eq(1)
+                            m.next = "Do-Zqcs"
+                        with m.Else():
+                            m.d.comb += [
+                                cmd.valid.eq(0),
+                                cmd.last.eq(1),
+                            ]
+                            m.next = "Idle"
+
+                with m.State("Do-Zqcs"):
+                    m.d.comb += cmd.valid.eq(1)
+                    with m.If(zqcs_executer.done):
+                        m.d.comb += [
+                            cmd.valid.eq(0),
+                            cmd.last.eq(1),
+                        ]
+                        m.next = "Idle"
+
+        return m
diff --git a/gram/dfii.py b/gram/dfii.py
new file mode 100644 (file)
index 0000000..11db5d6
--- /dev/null
@@ -0,0 +1,84 @@
+# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
+# License: BSD
+
+from nmigen import *
+
+from gram.phy import dfi
+from lambdasoc.periph import Peripheral
+
+# PhaseInjector ------------------------------------------------------------------------------------
+
+class PhaseInjector(Peripheral, Elaboratable):
+    def __init__(self, phase):
+        bank = self.csr_bank()
+        self._command = bank.csr(6, "rw")
+        self._command_issue = bank.csr(1, "rw")
+        self._address = bank.csr(len(phase.address), "rw", reset_less=True)
+        self._baddress = bank.csr(len(phase.bank), "rw", reset_less=True)
+        self._wrdata = bank.csr(len(phase.wrdata), "rw", reset_less=True)
+        self._rddata = bank.csr(len(phase.rddata))
+
+    def elaborate(self, platform):
+        m = Module()
+
+        m.d.comb += [
+            phase.address.eq(self._address.storage),
+            phase.bank.eq(self._baddress.storage),
+            phase.wrdata_en.eq(self._command_issue.re & self._command.storage[4]),
+            phase.rddata_en.eq(self._command_issue.re & self._command.storage[5]),
+            phase.wrdata.eq(self._wrdata.storage),
+            phase.wrdata_mask.eq(0)
+        ]
+
+        with m.If(self._command_issue.re):
+            m.d.comb += [
+                phase.cs_n.eq(Replicate(~self._command.storage[0], len(phase.cs_n))),
+                phase.we_n.eq(~self._command.storage[1]),
+                phase.cas_n.eq(~self._command.storage[2]),
+                phase.ras_n.eq(~self._command.storage[3]),
+            ]
+        with m.Else():
+            m.d.comb += [
+                phase.cs_n.eq(Replicate(1, len(phase.cs_n))),
+                phase.we_n.eq(1),
+                phase.cas_n.eq(1),
+                phase.ras_n.eq(1),
+            ]
+
+        with m.If(phase.rddata_valid):
+            m.d.sync += self._rddata.status.eq(phase.rddata)
+
+        return m
+
+# DFIInjector --------------------------------------------------------------------------------------
+
+class DFIInjector(Peripheral, Elaboratable):
+    def __init__(self, addressbits, bankbits, nranks, databits, nphases=1):
+        self._inti  = dfi.Interface(addressbits, bankbits, nranks, databits, nphases)
+        self.slave  = dfi.Interface(addressbits, bankbits, nranks, databits, nphases)
+        self.master = dfi.Interface(addressbits, bankbits, nranks, databits, nphases)
+
+        bank = self.csr_bank()
+        self._control = bank.csr(4)  # sel, cke, odt, reset_n
+
+        #for n, phase in enumerate(inti.phases):
+        #    setattr(self.submodules, "pi" + str(n), PhaseInjector(phase)) TODO
+
+        # # #
+
+    def elaborate(self, platform):
+        m = Module()
+
+        with m.If(self._control.storage[0]):
+            m.d.comb += self.slave.connect(self.master)
+        with m.Else():
+            m.d.comb += self._inti.connect(self.master)
+
+        for i in range(nranks):
+            m.d.comb += [phase.cke[i].eq(self._control.storage[1]) for phase in self._inti.phases]
+            m.d.comb += [phase.odt[i].eq(self._control.storage[2]) for phase in self._inti.phases if hasattr(phase, "odt")]
+        m.d.comb += [phase.reset_n.eq(self._control.storage[3]) for phase in self._inti.phases if hasattr(phase, "reset_n")]
+
+        return m
diff --git a/gram/frontend/__init__.py b/gram/frontend/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/gram/frontend/adaptation.py b/gram/frontend/adaptation.py
new file mode 100644 (file)
index 0000000..64650fc
--- /dev/null
@@ -0,0 +1,324 @@
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+from migen import *
+
+from litex.soc.interconnect import stream
+
+from litedram.common import *
+
+# LiteDRAMNativePortCDC ----------------------------------------------------------------------------
+
+class LiteDRAMNativePortCDC(Module):
+    def __init__(self, port_from, port_to,
+                 cmd_depth   = 4,
+                 wdata_depth = 16,
+                 rdata_depth = 16):
+        assert port_from.address_width == port_to.address_width
+        assert port_from.data_width    == port_to.data_width
+        assert port_from.mode          == port_to.mode
+
+        address_width     = port_from.address_width
+        data_width        = port_from.data_width
+        mode              = port_from.mode
+        clock_domain_from = port_from.clock_domain
+        clock_domain_to   = port_to.clock_domain
+
+        # # #
+
+        cmd_fifo = stream.AsyncFIFO(
+            [("we", 1), ("addr", address_width)], cmd_depth)
+        cmd_fifo = ClockDomainsRenamer(
+            {"write": clock_domain_from,
+             "read":  clock_domain_to})(cmd_fifo)
+        self.submodules += cmd_fifo
+        self.submodules += stream.Pipeline(
+            port_from.cmd, cmd_fifo, port_to.cmd)
+
+        if mode == "write" or mode == "both":
+            wdata_fifo = stream.AsyncFIFO(
+                [("data", data_width), ("we", data_width//8)], wdata_depth)
+            wdata_fifo = ClockDomainsRenamer(
+                {"write": clock_domain_from,
+                 "read":  clock_domain_to})(wdata_fifo)
+            self.submodules += wdata_fifo
+            self.submodules += stream.Pipeline(
+                port_from.wdata, wdata_fifo, port_to.wdata)
+
+        if mode == "read" or mode == "both":
+            rdata_fifo = stream.AsyncFIFO([("data", data_width)], rdata_depth)
+            rdata_fifo = ClockDomainsRenamer(
+                {"write": clock_domain_to,
+                 "read":  clock_domain_from})(rdata_fifo)
+            self.submodules += rdata_fifo
+            self.submodules += stream.Pipeline(
+                port_to.rdata, rdata_fifo, port_from.rdata)
+
+# LiteDRAMNativePortDownConverter ------------------------------------------------------------------
+
+class LiteDRAMNativePortDownConverter(Module):
+    """LiteDRAM port DownConverter
+
+    This module reduces user port data width to fit controller data width.
+    With N = port_from.data_width/port_to.data_width:
+    - Address is adapted (multiplied by N + internal increments)
+    - A write from the user is splitted and generates N writes to the
+    controller.
+    - A read from the user generates N reads to the controller and returned
+      datas are regrouped in a single data presented to the user.
+    """
+    def __init__(self, port_from, port_to, reverse=False):
+        assert port_from.clock_domain == port_to.clock_domain
+        assert port_from.data_width    > port_to.data_width
+        assert port_from.mode         == port_to.mode
+        if port_from.data_width % port_to.data_width:
+            raise ValueError("Ratio must be an int")
+
+        # # #
+
+        ratio = port_from.data_width//port_to.data_width
+        mode  = port_from.mode
+
+        counter       = Signal(max=ratio)
+        counter_reset = Signal()
+        counter_ce    = Signal()
+        self.sync += \
+            If(counter_reset,
+                counter.eq(0)
+            ).Elif(counter_ce,
+                counter.eq(counter + 1)
+            )
+
+        self.submodules.fsm = fsm = FSM(reset_state="IDLE")
+        fsm.act("IDLE",
+            counter_reset.eq(1),
+            If(port_from.cmd.valid,
+                NextState("CONVERT")
+            )
+        )
+        fsm.act("CONVERT",
+            port_to.cmd.valid.eq(1),
+            port_to.cmd.we.eq(port_from.cmd.we),
+            port_to.cmd.addr.eq(port_from.cmd.addr*ratio + counter),
+            If(port_to.cmd.ready,
+                counter_ce.eq(1),
+                If(counter == ratio - 1,
+                    port_from.cmd.ready.eq(1),
+                    NextState("IDLE")
+                )
+            )
+        )
+
+        if mode == "write" or mode == "both":
+            wdata_converter = stream.StrideConverter(
+                port_from.wdata.description,
+                port_to.wdata.description,
+                reverse=reverse)
+            self.submodules += wdata_converter
+            self.submodules += stream.Pipeline(
+                port_from.wdata, wdata_converter, port_to.wdata)
+
+        if mode == "read" or mode == "both":
+            rdata_converter = stream.StrideConverter(
+                port_to.rdata.description,
+                port_from.rdata.description,
+                reverse=reverse)
+            self.submodules += rdata_converter
+            self.submodules += stream.Pipeline(
+                port_to.rdata, rdata_converter, port_from.rdata)
+
+# LiteDRAMNativeWritePortUpConverter ---------------------------------------------------------------
+
+class LiteDRAMNativeWritePortUpConverter(Module):
+    # TODO: finish and remove hack
+    """LiteDRAM write port UpConverter
+
+    This module increase user port data width to fit controller data width.
+    With N = port_to.data_width/port_from.data_width:
+    - Address is adapted (divided by N)
+    - N writes from user are regrouped in a single one to the controller
+    (when possible, ie when consecutive and bursting)
+    """
+    def __init__(self, port_from, port_to, reverse=False):
+        assert port_from.clock_domain == port_to.clock_domain
+        assert port_from.data_width    < port_to.data_width
+        assert port_from.mode         == port_to.mode
+        assert port_from.mode         == "write"
+        if port_to.data_width % port_from.data_width:
+            raise ValueError("Ratio must be an int")
+
+        # # #
+
+        ratio = port_to.data_width//port_from.data_width
+
+        we      = Signal()
+        address = Signal(port_to.address_width)
+
+        counter       = Signal(max=ratio)
+        counter_reset = Signal()
+        counter_ce    = Signal()
+        self.sync += \
+            If(counter_reset,
+                counter.eq(0)
+            ).Elif(counter_ce,
+                counter.eq(counter + 1)
+            )
+
+        self.submodules.fsm = fsm = FSM(reset_state="IDLE")
+        fsm.act("IDLE",
+            port_from.cmd.ready.eq(1),
+            If(port_from.cmd.valid,
+                counter_ce.eq(1),
+                NextValue(we, port_from.cmd.we),
+                NextValue(address, port_from.cmd.addr),
+                NextState("RECEIVE")
+            )
+        )
+        fsm.act("RECEIVE",
+            port_from.cmd.ready.eq(1),
+            If(port_from.cmd.valid,
+                counter_ce.eq(1),
+                If(counter == ratio-1,
+                    NextState("GENERATE")
+                )
+            )
+        )
+        fsm.act("GENERATE",
+            port_to.cmd.valid.eq(1),
+            port_to.cmd.we.eq(we),
+            port_to.cmd.addr.eq(address[log2_int(ratio):]),
+            If(port_to.cmd.ready,
+                NextState("IDLE")
+            )
+        )
+
+        wdata_converter = stream.StrideConverter(
+            port_from.wdata.description,
+            port_to.wdata.description,
+            reverse=reverse)
+        self.submodules += wdata_converter
+        self.submodules += stream.Pipeline(
+            port_from.wdata,
+            wdata_converter,
+            port_to.wdata)
+
+# LiteDRAMNativeReadPortUpConverter ----------------------------------------------------------------
+
+class LiteDRAMNativeReadPortUpConverter(Module):
+    """LiteDRAM port UpConverter
+
+    This module increase user port data width to fit controller data width.
+    With N = port_to.data_width/port_from.data_width:
+    - Address is adapted (divided by N)
+    - N read from user are regrouped in a single one to the controller
+    (when possible, ie when consecutive and bursting)
+    """
+    def __init__(self, port_from, port_to, reverse=False):
+        assert port_from.clock_domain == port_to.clock_domain
+        assert port_from.data_width    < port_to.data_width
+        assert port_from.mode         == port_to.mode
+        assert port_from.mode         == "read"
+        if port_to.data_width % port_from.data_width:
+            raise ValueError("Ratio must be an int")
+
+        # # #
+
+        ratio = port_to.data_width//port_from.data_width
+
+
+        # Command ----------------------------------------------------------------------------------
+
+        cmd_buffer = stream.SyncFIFO([("sel", ratio)], 4)
+        self.submodules += cmd_buffer
+
+        counter = Signal(max=ratio)
+        counter_ce = Signal()
+        self.sync += \
+            If(counter_ce,
+                counter.eq(counter + 1)
+            )
+
+        self.comb += \
+            If(port_from.cmd.valid,
+                If(counter == 0,
+                    port_to.cmd.valid.eq(1),
+                    port_to.cmd.addr.eq(port_from.cmd.addr[log2_int(ratio):]),
+                    port_from.cmd.ready.eq(port_to.cmd.ready),
+                    counter_ce.eq(port_to.cmd.ready)
+                ).Else(
+                    port_from.cmd.ready.eq(1),
+                    counter_ce.eq(1)
+                )
+            )
+
+        # TODO: fix sel
+        self.comb += \
+            If(port_to.cmd.valid & port_to.cmd.ready,
+                cmd_buffer.sink.valid.eq(1),
+                cmd_buffer.sink.sel.eq(2**ratio-1)
+            )
+
+        # Datapath ---------------------------------------------------------------------------------
+
+        rdata_buffer    = stream.Buffer(port_to.rdata.description)
+        rdata_converter = stream.StrideConverter(
+            port_to.rdata.description,
+            port_from.rdata.description,
+            reverse=reverse)
+        self.submodules +=  rdata_buffer, rdata_converter
+
+        rdata_chunk       = Signal(ratio, reset=1)
+        rdata_chunk_valid = Signal()
+        self.sync += \
+            If(rdata_converter.source.valid &
+               rdata_converter.source.ready,
+                rdata_chunk.eq(Cat(rdata_chunk[ratio-1], rdata_chunk[:ratio-1]))
+            )
+
+        self.comb += [
+            port_to.rdata.connect(rdata_buffer.sink),
+            rdata_buffer.source.connect(rdata_converter.sink),
+            rdata_chunk_valid.eq((cmd_buffer.source.sel & rdata_chunk) != 0),
+            If(port_from.flush,
+                rdata_converter.source.ready.eq(1)
+            ).Elif(cmd_buffer.source.valid,
+                If(rdata_chunk_valid,
+                    port_from.rdata.valid.eq(rdata_converter.source.valid),
+                    port_from.rdata.data.eq(rdata_converter.source.data),
+                    rdata_converter.source.ready.eq(port_from.rdata.ready)
+                ).Else(
+                    rdata_converter.source.ready.eq(1)
+                )
+            ),
+            cmd_buffer.source.ready.eq(
+                rdata_converter.source.ready & rdata_chunk[ratio-1])
+        ]
+
+# LiteDRAMNativePortConverter ----------------------------------------------------------------------
+
+class LiteDRAMNativePortConverter(Module):
+    def __init__(self, port_from, port_to, reverse=False):
+        assert port_from.clock_domain == port_to.clock_domain
+        assert port_from.mode         == port_to.mode
+
+        # # #
+
+        mode = port_from.mode
+
+        if port_from.data_width > port_to.data_width:
+            converter = LiteDRAMNativePortDownConverter(port_from, port_to, reverse)
+            self.submodules += converter
+        elif port_from.data_width < port_to.data_width:
+            if mode == "write":
+                converter = LiteDRAMNativeWritePortUpConverter(port_from, port_to, reverse)
+            elif mode == "read":
+                converter = LiteDRAMNativeReadPortUpConverter(port_from, port_to, reverse)
+            else:
+                raise NotImplementedError
+            self.submodules += converter
+        else:
+            self.comb += [
+                port_from.cmd.connect(port_to.cmd),
+                port_from.wdata.connect(port_to.wdata),
+                port_to.rdata.connect(port_from.rdata)
+            ]
diff --git a/gram/frontend/axi.py b/gram/frontend/axi.py
new file mode 100644 (file)
index 0000000..d310f65
--- /dev/null
@@ -0,0 +1,196 @@
+# This file is Copyright (c) 2018-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+"""
+AXI frontend for LiteDRAM
+
+Converts AXI ports to Native ports.
+
+Features:
+- Write/Read arbitration.
+- Write/Read data buffers (configurable depth).
+- Burst support (FIXED/INCR/WRAP).
+- ID support (configurable width).
+
+Limitations:
+- Response always okay.
+- No reordering.
+"""
+
+from migen import *
+from migen.genlib.record import *
+from migen.genlib.roundrobin import *
+
+from litex.soc.interconnect import stream
+from litex.soc.interconnect.axi import *
+
+# LiteDRAMAXIPort ----------------------------------------------------------------------------------
+
+class LiteDRAMAXIPort(AXIInterface):
+    pass
+
+# LiteDRAMAXI2NativeW ------------------------------------------------------------------------------
+
+class LiteDRAMAXI2NativeW(Module):
+    def __init__(self, axi, port, buffer_depth, base_address):
+        assert axi.address_width >= log2_int(base_address)
+        assert axi.data_width    == port.data_width
+        self.cmd_request = Signal()
+        self.cmd_grant   = Signal()
+
+        # # #
+
+        ashift = log2_int(port.data_width//8)
+
+        # Burst to Beat ----------------------------------------------------------------------------
+        aw_buffer = stream.Buffer(ax_description(axi.address_width, axi.id_width))
+        self.submodules += aw_buffer
+        self.comb += axi.aw.connect(aw_buffer.sink)
+        aw = stream.Endpoint(ax_description(axi.address_width, axi.id_width))
+        aw_burst2beat = AXIBurst2Beat(aw_buffer.source, aw)
+        self.submodules.aw_burst2beat = aw_burst2beat
+
+        # Write Buffer -----------------------------------------------------------------------------
+        w_buffer = stream.SyncFIFO(w_description(axi.data_width, axi.id_width),
+            buffer_depth, buffered=True)
+        self.submodules.w_buffer = w_buffer
+
+        # Write ID Buffer & Response ---------------------------------------------------------------
+        id_buffer   = stream.SyncFIFO([("id", axi.id_width)], buffer_depth)
+        resp_buffer = stream.SyncFIFO([("id", axi.id_width), ("resp", 2)], buffer_depth)
+        self.submodules += id_buffer, resp_buffer
+        self.comb += [
+            id_buffer.sink.valid.eq(aw.valid & aw.first & aw.ready),
+            id_buffer.sink.id.eq(aw.id),
+            If(w_buffer.source.valid &
+               w_buffer.source.last &
+               w_buffer.source.ready,
+                resp_buffer.sink.valid.eq(1),
+                resp_buffer.sink.resp.eq(RESP_OKAY),
+                resp_buffer.sink.id.eq(id_buffer.source.id),
+                id_buffer.source.ready.eq(1)
+            ),
+            resp_buffer.source.connect(axi.b)
+        ]
+
+        # Command ----------------------------------------------------------------------------------
+        # Accept and send command to the controller only if:
+        # - Address & Data request are *both* valid.
+        # - Data buffer is not full.
+        self.comb += [
+            self.cmd_request.eq(aw.valid & axi.w.valid & w_buffer.sink.ready),
+            If(self.cmd_request & self.cmd_grant,
+                port.cmd.valid.eq(1),
+                port.cmd.we.eq(1),
+                port.cmd.addr.eq((aw.addr - base_address) >> ashift),
+                aw.ready.eq(port.cmd.ready),
+                axi.w.connect(w_buffer.sink, omit={"valid", "ready"}),
+                If(port.cmd.ready,
+                    w_buffer.sink.valid.eq(1),
+                    axi.w.ready.eq(1)
+                )
+            )
+        ]
+
+        # Write Data -------------------------------------------------------------------------------
+        self.comb += [
+            w_buffer.source.connect(port.wdata, omit={"strb", "id"}),
+            port.wdata.we.eq(w_buffer.source.strb)
+        ]
+
+# LiteDRAMAXI2NativeR ------------------------------------------------------------------------------
+
+class LiteDRAMAXI2NativeR(Module):
+    def __init__(self, axi, port, buffer_depth, base_address):
+        assert axi.address_width >= log2_int(base_address)
+        assert axi.data_width    == port.data_width
+        self.cmd_request = Signal()
+        self.cmd_grant   = Signal()
+
+        # # #
+
+        can_read = Signal()
+
+        ashift = log2_int(port.data_width//8)
+
+        # Burst to Beat ----------------------------------------------------------------------------
+        ar_buffer = stream.Buffer(ax_description(axi.address_width, axi.id_width))
+        self.submodules += ar_buffer
+        self.comb += axi.ar.connect(ar_buffer.sink)
+        ar = stream.Endpoint(ax_description(axi.address_width, axi.id_width))
+        ar_burst2beat = AXIBurst2Beat(ar_buffer.source, ar)
+        self.submodules.ar_burst2beat = ar_burst2beat
+
+        # Read buffer ------------------------------------------------------------------------------
+        r_buffer = stream.SyncFIFO(r_description(axi.data_width, axi.id_width), buffer_depth, buffered=True)
+        self.submodules.r_buffer = r_buffer
+
+        # Read Buffer reservation ------------------------------------------------------------------
+        # - Incremented when data is planned to be queued
+        # - Decremented when data is dequeued
+        r_buffer_queue   = Signal()
+        r_buffer_dequeue = Signal()
+        r_buffer_level   = Signal(max=buffer_depth + 1)
+        self.comb += [
+            r_buffer_queue.eq(port.cmd.valid & port.cmd.ready & ~port.cmd.we),
+            r_buffer_dequeue.eq(r_buffer.source.valid & r_buffer.source.ready)
+        ]
+        self.sync += [
+            If(r_buffer_queue,
+                If(~r_buffer_dequeue, r_buffer_level.eq(r_buffer_level + 1))
+            ).Elif(r_buffer_dequeue,
+                r_buffer_level.eq(r_buffer_level - 1)
+            )
+        ]
+        self.comb += can_read.eq(r_buffer_level != buffer_depth)
+
+        # Read ID Buffer ---------------------------------------------------------------------------
+        id_buffer = stream.SyncFIFO([("id", axi.id_width)], buffer_depth)
+        self.submodules += id_buffer
+        self.comb += [
+            id_buffer.sink.valid.eq(ar.valid & ar.ready),
+            id_buffer.sink.last.eq(ar.last),
+            id_buffer.sink.id.eq(ar.id),
+            axi.r.last.eq(id_buffer.source.last),
+            axi.r.id.eq(id_buffer.source.id),
+            id_buffer.source.ready.eq(axi.r.valid & axi.r.ready)
+        ]
+
+        # Command ----------------------------------------------------------------------------------
+        self.comb += [
+            self.cmd_request.eq(ar.valid & can_read),
+            If(self.cmd_grant,
+                port.cmd.valid.eq(ar.valid & can_read),
+                ar.ready.eq(port.cmd.ready & can_read),
+                port.cmd.we.eq(0),
+                port.cmd.addr.eq((ar.addr - base_address) >> ashift)
+            )
+        ]
+
+        # Read data --------------------------------------------------------------------------------
+        self.comb += [
+            port.rdata.connect(r_buffer.sink, omit={"bank"}),
+            r_buffer.source.connect(axi.r, omit={"id", "last"}),
+            axi.r.resp.eq(RESP_OKAY)
+        ]
+
+# LiteDRAMAXI2Native -------------------------------------------------------------------------------
+
+class LiteDRAMAXI2Native(Module):
+    def __init__(self, axi, port, w_buffer_depth=16, r_buffer_depth=16, base_address=0x00000000):
+
+        # # #
+
+        # Write path -------------------------------------------------------------------------------
+        self.submodules.write = LiteDRAMAXI2NativeW(axi, port, w_buffer_depth, base_address)
+
+        # Read path --------------------------------------------------------------------------------
+        self.submodules.read = LiteDRAMAXI2NativeR(axi, port, r_buffer_depth, base_address)
+
+        # Write / Read arbitration -----------------------------------------------------------------
+        arbiter = RoundRobin(2, SP_CE)
+        self.submodules += arbiter
+        self.comb += arbiter.ce.eq(~port.cmd.valid | port.cmd.ready)
+        for i, master in enumerate([self.write, self.read]):
+            self.comb += arbiter.request[i].eq(master.cmd_request)
+            self.comb += master.cmd_grant.eq(arbiter.grant == i)
diff --git a/gram/frontend/bist.py b/gram/frontend/bist.py
new file mode 100644 (file)
index 0000000..c9c7c29
--- /dev/null
@@ -0,0 +1,759 @@
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2016 Tim 'mithro' Ansell <mithro@mithis.com>
+# License: BSD
+
+"""Built In Self Test (BIST) modules for testing LiteDRAM functionality."""
+
+from functools import reduce
+from operator import xor
+
+from migen import *
+
+from litex.soc.interconnect import stream
+from litex.soc.interconnect.csr import *
+
+from litedram.common import LiteDRAMNativePort
+from litedram.frontend.axi import LiteDRAMAXIPort
+from litedram.frontend.dma import LiteDRAMDMAWriter, LiteDRAMDMAReader
+
+# LFSR ---------------------------------------------------------------------------------------------
+
+class LFSR(Module):
+    """Linear-Feedback Shift Register to generate a pseudo-random sequence.
+
+    Parameters
+    ----------
+    n_out : int
+        Width of the output data signal.
+    n_state : int
+        LFSR internal state
+    taps : list of int
+        LFSR taps (from polynom)
+
+    Attributes
+    ----------
+    o : out
+        Output data
+    """
+    def __init__(self, n_out, n_state, taps):
+        self.o = Signal(n_out)
+
+        # # #
+
+        state  = Signal(n_state)
+        curval = [state[i] for i in range(n_state)]
+        curval += [0]*(n_out - n_state)
+        for i in range(n_out):
+            nv = ~reduce(xor, [curval[tap] for tap in taps])
+            curval.insert(0, nv)
+            curval.pop()
+
+        self.sync += [
+            state.eq(Cat(*curval[:n_state])),
+            self.o.eq(Cat(*curval))
+        ]
+
+# Counter ------------------------------------------------------------------------------------------
+
+class Counter(Module):
+    """Simple incremental counter.
+
+    Parameters
+    ----------
+    n_out : int
+        Width of the output data signal.
+
+    Attributes
+    ----------
+    o : out
+        Output data
+    """
+    def __init__(self, n_out):
+        self.o = Signal(n_out)
+
+        # # #
+
+        self.sync += self.o.eq(self.o + 1)
+
+# Generator ----------------------------------------------------------------------------------------
+
+@CEInserter()
+class Generator(Module):
+    """Address/Data Generator.
+
+    Parameters
+    ----------
+    n_out : int
+        Width of the output data signal.
+
+    Attributes
+    ----------
+    random_enable : in
+        Enable Random (LFSR)
+
+    o : out
+        Output data
+    """
+    def __init__(self, n_out, n_state, taps):
+        self.random_enable = Signal()
+        self.o = Signal(n_out)
+
+        # # #
+
+        lfsr  = LFSR(n_out, n_state, taps)
+        count = Counter(n_out)
+        self.submodules += lfsr, count
+
+        self.comb += \
+            If(self.random_enable,
+                self.o.eq(lfsr.o)
+            ).Else(
+                self.o.eq(count.o)
+            )
+
+
+def get_ashift_awidth(dram_port):
+    if isinstance(dram_port, LiteDRAMNativePort):
+        ashift = log2_int(dram_port.data_width//8)
+        awidth = dram_port.address_width + ashift
+    elif isinstance(dram_port, LiteDRAMAXIPort):
+        ashift = log2_int(dram_port.data_width//8)
+        awidth = dram_port.address_width
+    else:
+        raise NotImplementedError
+    return ashift, awidth
+
+# _LiteDRAMBISTGenerator ---------------------------------------------------------------------------
+
+@ResetInserter()
+class _LiteDRAMBISTGenerator(Module):
+    def __init__(self, dram_port):
+        ashift, awidth = get_ashift_awidth(dram_port)
+        self.start       = Signal()
+        self.done        = Signal()
+        self.base        = Signal(awidth)
+        self.end         = Signal(awidth)
+        self.length      = Signal(awidth)
+        self.random_data = Signal()
+        self.random_addr = Signal()
+        self.ticks       = Signal(32)
+
+        self.run_cascade_in  = Signal(reset=1)
+        self.run_cascade_out = Signal()
+
+        # # #
+
+        # Data / Address generators ----------------------------------------------------------------
+        data_gen = Generator(31, n_state=31, taps=[27, 30]) # PRBS31
+        addr_gen = Generator(31, n_state=31, taps=[27, 30])
+        self.submodules += data_gen, addr_gen
+        self.comb += data_gen.random_enable.eq(self.random_data)
+        self.comb += addr_gen.random_enable.eq(self.random_addr)
+
+        # mask random address to the range <base, end), range size must be power of 2
+        addr_mask = Signal(awidth)
+        self.comb += addr_mask.eq((self.end - self.base) - 1)
+
+        # DMA --------------------------------------------------------------------------------------
+        dma = LiteDRAMDMAWriter(dram_port)
+        self.submodules += dma
+
+        cmd_counter = Signal(dram_port.address_width, reset_less=True)
+
+        # Data / Address FSM -----------------------------------------------------------------------
+        fsm = FSM(reset_state="IDLE")
+        self.submodules += fsm
+        fsm.act("IDLE",
+            If(self.start,
+                NextValue(cmd_counter, 0),
+                NextState("RUN")
+            ),
+            NextValue(self.ticks, 0)
+        )
+        fsm.act("WAIT",
+            If(self.run_cascade_in,
+                NextState("RUN")
+            )
+        )
+        fsm.act("RUN",
+            dma.sink.valid.eq(1),
+            If(dma.sink.ready,
+                self.run_cascade_out.eq(1),
+                data_gen.ce.eq(1),
+                addr_gen.ce.eq(1),
+                NextValue(cmd_counter, cmd_counter + 1),
+                If(cmd_counter == (self.length[ashift:] - 1),
+                    NextState("DONE")
+                ).Elif(~self.run_cascade_in,
+                    NextState("WAIT")
+                )
+            ),
+            NextValue(self.ticks, self.ticks + 1)
+        )
+        fsm.act("DONE",
+            self.run_cascade_out.eq(1),
+            self.done.eq(1)
+        )
+
+        if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
+            dma_sink_addr = dma.sink.address
+        elif isinstance(dram_port, LiteDRAMAXIPort):  # addressing in bytes
+            dma_sink_addr = dma.sink.address[ashift:]
+        else:
+            raise NotImplementedError
+
+        self.comb += dma_sink_addr.eq(self.base[ashift:] + (addr_gen.o & addr_mask))
+        self.comb += dma.sink.data.eq(data_gen.o)
+
+
+@ResetInserter()
+class _LiteDRAMPatternGenerator(Module):
+    def __init__(self, dram_port, init=[]):
+        ashift, awidth = get_ashift_awidth(dram_port)
+        self.start  = Signal()
+        self.done   = Signal()
+        self.ticks  = Signal(32)
+
+        self.run_cascade_in  = Signal(reset=1)
+        self.run_cascade_out = Signal()
+
+        # # #
+
+        # Data / Address pattern -------------------------------------------------------------------
+        addr_init, data_init = zip(*init)
+        addr_mem = Memory(dram_port.address_width, len(addr_init), init=addr_init)
+        data_mem = Memory(dram_port.data_width,    len(data_init), init=data_init)
+        addr_port = addr_mem.get_port(async_read=True)
+        data_port = data_mem.get_port(async_read=True)
+        self.specials += addr_mem, data_mem, addr_port, data_port
+
+        # DMA --------------------------------------------------------------------------------------
+        dma = LiteDRAMDMAWriter(dram_port)
+        self.submodules += dma
+
+        cmd_counter = Signal(dram_port.address_width, reset_less=True)
+
+        # Data / Address FSM -----------------------------------------------------------------------
+        fsm = FSM(reset_state="IDLE")
+        self.submodules += fsm
+        fsm.act("IDLE",
+            If(self.start,
+                NextValue(cmd_counter, 0),
+                NextState("RUN")
+            ),
+            NextValue(self.ticks, 0)
+        )
+        fsm.act("WAIT",
+            If(self.run_cascade_in,
+                NextState("RUN")
+            )
+        )
+        fsm.act("RUN",
+            dma.sink.valid.eq(1),
+            If(dma.sink.ready,
+                self.run_cascade_out.eq(1),
+                NextValue(cmd_counter, cmd_counter + 1),
+                If(cmd_counter == (len(init) - 1),
+                    NextState("DONE")
+                ).Elif(~self.run_cascade_in,
+                    NextState("WAIT")
+                )
+            ),
+            NextValue(self.ticks, self.ticks + 1)
+        )
+        fsm.act("DONE",
+            self.run_cascade_out.eq(1),
+            self.done.eq(1)
+        )
+
+        if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
+            dma_sink_addr = dma.sink.address
+        elif isinstance(dram_port, LiteDRAMAXIPort):  # addressing in bytes
+            dma_sink_addr = dma.sink.address[ashift:]
+        else:
+            raise NotImplementedError
+
+        self.comb += [
+            addr_port.adr.eq(cmd_counter),
+            dma_sink_addr.eq(addr_port.dat_r),
+            data_port.adr.eq(cmd_counter),
+            dma.sink.data.eq(data_port.dat_r),
+        ]
+
+# LiteDRAMBISTGenerator ----------------------------------------------------------------------------
+
+class LiteDRAMBISTGenerator(Module, AutoCSR):
+    """DRAM memory pattern generator.
+
+    Attributes
+    ----------
+    reset : in
+        Reset the module.
+
+    start : in
+        Start the generation.
+
+    done : out
+        The module has completed writing the pattern.
+
+    base : in
+        DRAM address to start from.
+
+    end : in
+        Max DRAM address.
+
+    length : in
+        Number of DRAM words to write.
+
+    random_data : in
+        Enable random data (LFSR)
+
+    random_addr : in
+        Enable random address (LFSR). Wrapped to (end - base), so may not be unique.
+
+    ticks : out
+        Duration of the generation.
+    """
+    def __init__(self, dram_port):
+        ashift, awidth = get_ashift_awidth(dram_port)
+        self.reset       = CSR()
+        self.start       = CSR()
+        self.done        = CSRStatus()
+        self.base        = CSRStorage(awidth)
+        self.end         = CSRStorage(awidth)
+        self.length      = CSRStorage(awidth)
+        self.random      = CSRStorage(fields=[
+            CSRField("data", size=1),
+            CSRField("addr", size=1),
+        ])
+        self.ticks       = CSRStatus(32)
+
+        # # #
+
+        clock_domain = dram_port.clock_domain
+
+        core = _LiteDRAMBISTGenerator(dram_port)
+        core = ClockDomainsRenamer(clock_domain)(core)
+        self.submodules += core
+
+        if clock_domain != "sys":
+            control_layout = [
+                ("reset", 1),
+                ("start", 1),
+                ("base",   awidth),
+                ("end",    awidth),
+                ("length", awidth),
+                ("random_data", 1),
+                ("random_addr", 1),
+            ]
+            status_layout = [
+                ("done",  1),
+                ("ticks", 32),
+            ]
+            control_cdc = stream.AsyncFIFO(control_layout)
+            control_cdc = ClockDomainsRenamer({"write" : "sys", "read": clock_domain})(control_cdc)
+            status_cdc  = stream.AsyncFIFO(status_layout)
+            status_cdc  = ClockDomainsRenamer({"write" : clock_domain, "read": "sys"})(status_cdc)
+            self.submodules += control_cdc, status_cdc
+            # Control CDC In
+            self.comb += [
+                control_cdc.sink.valid.eq(self.reset.re | self.start.re),
+                control_cdc.sink.reset.eq(self.reset.re),
+                control_cdc.sink.start.eq(self.start.re),
+                control_cdc.sink.base.eq(self.base.storage),
+                control_cdc.sink.end.eq(self.end.storage),
+                control_cdc.sink.length.eq(self.length.storage),
+                control_cdc.sink.random_data.eq(self.random.fields.data),
+                control_cdc.sink.random_addr.eq(self.random.fields.addr),
+            ]
+            # Control CDC Out
+            self.comb += [
+                control_cdc.source.ready.eq(1),
+                core.reset.eq(control_cdc.source.valid & control_cdc.source.reset),
+                core.start.eq(control_cdc.source.valid & control_cdc.source.start),
+            ]
+            self.sync += [
+                If(control_cdc.source.valid,
+                    core.base.eq(control_cdc.source.base),
+                    core.end.eq(control_cdc.source.end),
+                    core.length.eq(control_cdc.source.length),
+                    core.random_data.eq(control_cdc.source.random_data),
+                    core.random_addr.eq(control_cdc.source.random_addr),
+                )
+            ]
+            # Status CDC In
+            self.comb += [
+                status_cdc.sink.valid.eq(1),
+                status_cdc.sink.done.eq(core.done),
+                status_cdc.sink.ticks.eq(core.ticks),
+            ]
+            # Status CDC Out
+            self.comb += status_cdc.source.ready.eq(1)
+            self.sync += [
+                If(status_cdc.source.valid,
+                    self.done.status.eq(status_cdc.source.done),
+                    self.ticks.status.eq(status_cdc.source.ticks),
+                )
+            ]
+        else:
+            self.comb += [
+                core.reset.eq(self.reset.re),
+                core.start.eq(self.start.re),
+                self.done.status.eq(core.done),
+                core.base.eq(self.base.storage),
+                core.end.eq(self.end.storage),
+                core.length.eq(self.length.storage),
+                core.random_data.eq(self.random.fields.data),
+                core.random_addr.eq(self.random.fields.addr),
+                self.ticks.status.eq(core.ticks)
+            ]
+
+# _LiteDRAMBISTChecker -----------------------------------------------------------------------------
+
+@ResetInserter()
+class _LiteDRAMBISTChecker(Module, AutoCSR):
+    def __init__(self, dram_port):
+        ashift, awidth = get_ashift_awidth(dram_port)
+        self.start       = Signal()
+        self.done        = Signal()
+        self.base        = Signal(awidth)
+        self.end         = Signal(awidth)
+        self.length      = Signal(awidth)
+        self.random_data = Signal()
+        self.random_addr = Signal()
+        self.ticks       = Signal(32)
+        self.errors      = Signal(32)
+
+        self.run_cascade_in  = Signal(reset=1)
+        self.run_cascade_out = Signal()
+
+        # # #
+
+        # Data / Address generators ----------------------------------------------------------------
+        data_gen = Generator(31, n_state=31, taps=[27, 30]) # PRBS31
+        addr_gen = Generator(31, n_state=31, taps=[27, 30])
+        self.submodules += data_gen, addr_gen
+        self.comb += data_gen.random_enable.eq(self.random_data)
+        self.comb += addr_gen.random_enable.eq(self.random_addr)
+
+        # mask random address to the range <base, end), range size must be power of 2
+        addr_mask = Signal(awidth)
+        self.comb += addr_mask.eq((self.end - self.base) - 1)
+
+        # DMA --------------------------------------------------------------------------------------
+        dma = LiteDRAMDMAReader(dram_port)
+        self.submodules += dma
+
+        # Address FSM ------------------------------------------------------------------------------
+        cmd_counter = Signal(dram_port.address_width, reset_less=True)
+
+        cmd_fsm = FSM(reset_state="IDLE")
+        self.submodules += cmd_fsm
+        cmd_fsm.act("IDLE",
+            If(self.start,
+                NextValue(cmd_counter, 0),
+                NextState("WAIT")
+            )
+        )
+        cmd_fsm.act("WAIT",
+            If(self.run_cascade_in,
+                NextState("RUN")
+            )
+        )
+        cmd_fsm.act("RUN",
+            dma.sink.valid.eq(1),
+            If(dma.sink.ready,
+                self.run_cascade_out.eq(1),
+                addr_gen.ce.eq(1),
+                NextValue(cmd_counter, cmd_counter + 1),
+                If(cmd_counter == (self.length[ashift:] - 1),
+                    NextState("DONE")
+                ).Elif(~self.run_cascade_in,
+                    NextState("WAIT")
+                )
+            )
+        )
+        cmd_fsm.act("DONE")
+
+        if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
+            dma_sink_addr = dma.sink.address
+        elif isinstance(dram_port, LiteDRAMAXIPort):  # addressing in bytes
+            dma_sink_addr = dma.sink.address[ashift:]
+        else:
+            raise NotImplementedError
+
+        self.comb += dma_sink_addr.eq(self.base[ashift:] + (addr_gen.o & addr_mask))
+
+        # Data FSM ---------------------------------------------------------------------------------
+        data_counter = Signal(dram_port.address_width, reset_less=True)
+
+        data_fsm = FSM(reset_state="IDLE")
+        self.submodules += data_fsm
+        data_fsm.act("IDLE",
+            If(self.start,
+                NextValue(data_counter, 0),
+                NextValue(self.errors, 0),
+                NextState("RUN")
+            ),
+            NextValue(self.ticks, 0)
+        )
+
+        data_fsm.act("RUN",
+            dma.source.ready.eq(1),
+            If(dma.source.valid,
+                data_gen.ce.eq(1),
+                NextValue(data_counter, data_counter + 1),
+                If(dma.source.data != data_gen.o[:min(len(data_gen.o), dram_port.data_width)],
+                    NextValue(self.errors, self.errors + 1)
+                ),
+                If(data_counter == (self.length[ashift:] - 1),
+                    NextState("DONE")
+                )
+            ),
+            NextValue(self.ticks, self.ticks + 1)
+        )
+        data_fsm.act("DONE",
+            self.done.eq(1)
+        )
+
+@ResetInserter()
+class _LiteDRAMPatternChecker(Module, AutoCSR):
+    def __init__(self, dram_port, init=[]):
+        ashift, awidth = get_ashift_awidth(dram_port)
+        self.start  = Signal()
+        self.done   = Signal()
+        self.ticks  = Signal(32)
+        self.errors = Signal(32)
+
+        self.run_cascade_in  = Signal(reset=1)
+        self.run_cascade_out = Signal()
+
+        # # #
+
+        # Data / Address pattern -------------------------------------------------------------------
+        addr_init, data_init = zip(*init)
+        addr_mem = Memory(dram_port.address_width, len(addr_init), init=addr_init)
+        data_mem = Memory(dram_port.data_width,    len(data_init), init=data_init)
+        addr_port = addr_mem.get_port(async_read=True)
+        data_port = data_mem.get_port(async_read=True)
+        self.specials += addr_mem, data_mem, addr_port, data_port
+
+        # DMA --------------------------------------------------------------------------------------
+        dma = LiteDRAMDMAReader(dram_port)
+        self.submodules += dma
+
+        # Address FSM ------------------------------------------------------------------------------
+        cmd_counter = Signal(dram_port.address_width, reset_less=True)
+
+        cmd_fsm = FSM(reset_state="IDLE")
+        self.submodules += cmd_fsm
+        cmd_fsm.act("IDLE",
+            If(self.start,
+                NextValue(cmd_counter, 0),
+                If(self.run_cascade_in,
+                    NextState("RUN")
+                ).Else(
+                    NextState("WAIT")
+                )
+            )
+        )
+        cmd_fsm.act("WAIT",
+            If(self.run_cascade_in,
+                NextState("RUN")
+            ),
+            NextValue(self.ticks, self.ticks + 1)
+        )
+        cmd_fsm.act("RUN",
+            dma.sink.valid.eq(1),
+            If(dma.sink.ready,
+                self.run_cascade_out.eq(1),
+                NextValue(cmd_counter, cmd_counter + 1),
+                If(cmd_counter == (len(init) - 1),
+                    NextState("DONE")
+                ).Elif(~self.run_cascade_in,
+                    NextState("WAIT")
+                )
+            )
+        )
+        cmd_fsm.act("DONE")
+
+        if isinstance(dram_port, LiteDRAMNativePort): # addressing in dwords
+            dma_sink_addr = dma.sink.address
+        elif isinstance(dram_port, LiteDRAMAXIPort):  # addressing in bytes
+            dma_sink_addr = dma.sink.address[ashift:]
+        else:
+            raise NotImplementedError
+
+        self.comb += [
+            addr_port.adr.eq(cmd_counter),
+            dma_sink_addr.eq(addr_port.dat_r),
+        ]
+
+        # Data FSM ---------------------------------------------------------------------------------
+        data_counter = Signal(dram_port.address_width, reset_less=True)
+
+        expected_data = Signal.like(dma.source.data)
+        self.comb += [
+            data_port.adr.eq(data_counter),
+            expected_data.eq(data_port.dat_r),
+        ]
+
+        data_fsm = FSM(reset_state="IDLE")
+        self.submodules += data_fsm
+        data_fsm.act("IDLE",
+            If(self.start,
+                NextValue(data_counter, 0),
+                NextValue(self.errors, 0),
+                NextState("RUN")
+            ),
+            NextValue(self.ticks, 0)
+        )
+
+        data_fsm.act("RUN",
+            dma.source.ready.eq(1),
+            If(dma.source.valid,
+                NextValue(data_counter, data_counter + 1),
+                If(dma.source.data != expected_data,
+                    NextValue(self.errors, self.errors + 1)
+                ),
+                If(data_counter == (len(init) - 1),
+                    NextState("DONE")
+                )
+            ),
+            NextValue(self.ticks, self.ticks + 1)
+        )
+        data_fsm.act("DONE",
+            self.done.eq(1)
+        )
+
+# LiteDRAMBISTChecker ------------------------------------------------------------------------------
+
+class LiteDRAMBISTChecker(Module, AutoCSR):
+    """DRAM memory pattern checker.
+
+    Attributes
+    ----------
+    reset : in
+        Reset the module
+    start : in
+        Start the checking
+
+    done : out
+        The module has completed checking
+
+    base : in
+        DRAM address to start from.
+    end : in
+        Max DRAM address.
+    length : in
+        Number of DRAM words to check.
+
+    random_data : in
+        Enable random data (LFSR)
+    random_addr : in
+        Enable random address (LFSR). Wrapped to (end - base), so may not be unique.
+
+    ticks: out
+        Duration of the check.
+
+    errors : out
+        Number of DRAM words which don't match.
+    """
+    def __init__(self, dram_port):
+        ashift, awidth = get_ashift_awidth(dram_port)
+        self.reset       = CSR()
+        self.start       = CSR()
+        self.done        = CSRStatus()
+        self.base        = CSRStorage(awidth)
+        self.end         = CSRStorage(awidth)
+        self.length      = CSRStorage(awidth)
+        self.random      = CSRStorage(fields=[
+            CSRField("data", size=1),
+            CSRField("addr", size=1),
+        ])
+        self.ticks       = CSRStatus(32)
+        self.errors      = CSRStatus(32)
+
+        # # #
+
+        clock_domain = dram_port.clock_domain
+
+        core = _LiteDRAMBISTChecker(dram_port)
+        core = ClockDomainsRenamer(clock_domain)(core)
+        self.submodules += core
+
+        if clock_domain != "sys":
+            control_layout = [
+                ("reset", 1),
+                ("start", 1),
+                ("base",   awidth),
+                ("end",    awidth),
+                ("length", awidth),
+                ("random_data", 1),
+                ("random_addr", 1),
+            ]
+            status_layout = [
+                ("done",    1),
+                ("ticks",  32),
+                ("errors", 32),
+            ]
+            control_cdc = stream.AsyncFIFO(control_layout)
+            control_cdc = ClockDomainsRenamer({"write" : "sys", "read": clock_domain})(control_cdc)
+            status_cdc  = stream.AsyncFIFO(status_layout)
+            status_cdc  = ClockDomainsRenamer({"write" : clock_domain, "read": "sys"})(status_cdc)
+            self.submodules += control_cdc, status_cdc
+            # Control CDC In
+            self.comb += [
+                control_cdc.sink.valid.eq(self.reset.re | self.start.re),
+                control_cdc.sink.reset.eq(self.reset.re),
+                control_cdc.sink.start.eq(self.start.re),
+                control_cdc.sink.base.eq(self.base.storage),
+                control_cdc.sink.end.eq(self.end.storage),
+                control_cdc.sink.length.eq(self.length.storage),
+                control_cdc.sink.random_data.eq(self.random.fields.data),
+                control_cdc.sink.random_addr.eq(self.random.fields.addr),
+            ]
+            # Control CDC Out
+            self.comb += [
+                control_cdc.source.ready.eq(1),
+                core.reset.eq(control_cdc.source.valid & control_cdc.source.reset),
+                core.start.eq(control_cdc.source.valid & control_cdc.source.start),
+            ]
+            self.sync += [
+                If(control_cdc.source.valid,
+                    core.base.eq(control_cdc.source.base),
+                    core.end.eq(control_cdc.source.end),
+                    core.length.eq(control_cdc.source.length),
+                    core.random_data.eq(control_cdc.source.random_data),
+                    core.random_addr.eq(control_cdc.source.random_addr),
+                )
+            ]
+            # Status CDC In
+            self.comb += [
+                status_cdc.sink.valid.eq(1),
+                status_cdc.sink.done.eq(core.done),
+                status_cdc.sink.ticks.eq(core.ticks),
+                status_cdc.sink.errors.eq(core.errors),
+            ]
+            # Status CDC Out
+            self.comb += status_cdc.source.ready.eq(1)
+            self.sync += [
+                If(status_cdc.source.valid,
+                    self.done.status.eq(status_cdc.source.done),
+                    self.ticks.status.eq(status_cdc.source.ticks),
+                    self.errors.status.eq(status_cdc.source.errors),
+                )
+            ]
+        else:
+            self.comb += [
+                core.reset.eq(self.reset.re),
+                core.start.eq(self.start.re),
+                self.done.status.eq(core.done),
+                core.base.eq(self.base.storage),
+                core.end.eq(self.end.storage),
+                core.length.eq(self.length.storage),
+                core.random_data.eq(self.random.fields.data),
+                core.random_addr.eq(self.random.fields.addr),
+                self.ticks.status.eq(core.ticks),
+                self.errors.status.eq(core.errors),
+            ]
diff --git a/gram/frontend/dma.py b/gram/frontend/dma.py
new file mode 100644 (file)
index 0000000..0919bd1
--- /dev/null
@@ -0,0 +1,253 @@
+# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2018 John Sully <john@csquare.ca>
+# This file is Copyright (c) 2016 Tim 'mithro' Ansell <mithro@mithis.com>
+# License: BSD
+
+"""Direct Memory Access (DMA) reader and writer modules."""
+
+from migen import *
+
+from litex.soc.interconnect.csr import *
+from litex.soc.interconnect import stream
+
+from litedram.common import LiteDRAMNativePort
+from litedram.frontend.axi import LiteDRAMAXIPort
+
+# LiteDRAMDMAReader --------------------------------------------------------------------------------
+
+class LiteDRAMDMAReader(Module, AutoCSR):
+    """Read data from DRAM memory.
+
+    For every address written to the sink, one DRAM word will be produced on
+    the source.
+
+    Parameters
+    ----------
+    port : port
+        Port on the DRAM memory controller to read from (Native or AXI).
+
+    fifo_depth : int
+        How many request results the output FIFO can contain (and thus how many
+        read requests can be outstanding at once).
+
+    fifo_buffered : bool
+        Implement FIFO in Block Ram.
+
+    Attributes
+    ----------
+    sink : Record("address")
+        Sink for DRAM addresses to be read.
+
+    source : Record("data")
+        Source for DRAM word results from reading.
+
+    rsv_level: Signal()
+        FIFO reservation level counter
+    """
+
+    def __init__(self, port, fifo_depth=16, fifo_buffered=False):
+        assert isinstance(port, (LiteDRAMNativePort, LiteDRAMAXIPort))
+        self.port   = port
+        self.sink   = sink   = stream.Endpoint([("address", port.address_width)])
+        self.source = source = stream.Endpoint([("data", port.data_width)])
+
+        # # #
+
+        # Native / AXI selection
+        is_native = isinstance(port, LiteDRAMNativePort)
+        is_axi    = isinstance(port, LiteDRAMAXIPort)
+        if is_native:
+            (cmd, rdata) = port.cmd, port.rdata
+        elif is_axi:
+            (cmd, rdata) = port.ar, port.r
+        else:
+            raise NotImplementedError
+
+        # Request issuance -------------------------------------------------------------------------
+        request_enable = Signal()
+        request_issued = Signal()
+
+        if is_native:
+            self.comb += cmd.we.eq(0)
+        self.comb += [
+            cmd.addr.eq(sink.address),
+            cmd.valid.eq(sink.valid & request_enable),
+            sink.ready.eq(cmd.ready & request_enable),
+            request_issued.eq(cmd.valid & cmd.ready)
+        ]
+
+        # FIFO reservation level counter -----------------------------------------------------------
+        # incremented when data is planned to be queued
+        # decremented when data is dequeued
+        data_dequeued = Signal()
+        self.rsv_level = rsv_level = Signal(max=fifo_depth+1)
+        self.sync += [
+            If(request_issued,
+                If(~data_dequeued, rsv_level.eq(self.rsv_level + 1))
+            ).Elif(data_dequeued,
+                rsv_level.eq(rsv_level - 1)
+            )
+        ]
+        self.comb += request_enable.eq(rsv_level != fifo_depth)
+
+        # FIFO -------------------------------------------------------------------------------------
+        fifo = stream.SyncFIFO([("data", port.data_width)], fifo_depth, fifo_buffered)
+        self.submodules += fifo
+
+        self.comb += [
+            rdata.connect(fifo.sink, omit={"id", "resp"}),
+            fifo.source.connect(source),
+            data_dequeued.eq(source.valid & source.ready)
+        ]
+
+    def add_csr(self):
+        self._base   = CSRStorage(32)
+        self._length = CSRStorage(32)
+        self._start  = CSR()
+        self._done   = CSRStatus()
+        self._loop   = CSRStorage()
+
+        # # #
+
+        shift   = log2_int(self.port.data_width//8)
+        base    = Signal(self.port.address_width)
+        offset  = Signal(self.port.address_width)
+        length  = Signal(self.port.address_width)
+        self.comb += [
+            base.eq(self._base.storage[shift:]),
+            length.eq(self._length.storage[shift:]),
+        ]
+
+        self.submodules.fsm = fsm = FSM(reset_state="IDLE")
+        fsm.act("IDLE",
+            self._done.status.eq(1),
+            If(self._start.re,
+                NextValue(offset, 0),
+                NextState("RUN"),
+            )
+        )
+        fsm.act("RUN",
+            self.sink.valid.eq(1),
+            self.sink.address.eq(base + offset),
+            If(self.sink.ready,
+                NextValue(offset, offset + 1),
+                If(offset == (length - 1),
+                    If(self._loop.storage,
+                        NextValue(offset, 0)
+                    ).Else(
+                        NextState("IDLE")
+                    )
+                )
+            )
+        )
+
+# LiteDRAMDMAWriter --------------------------------------------------------------------------------
+
+class LiteDRAMDMAWriter(Module, AutoCSR):
+    """Write data to DRAM memory.
+
+    Parameters
+    ----------
+    port : port
+        Port on the DRAM memory controller to write to (Native or AXI).
+
+    fifo_depth : int
+        How many requests the input FIFO can contain (and thus how many write
+        requests can be outstanding at once).
+
+    fifo_buffered : bool
+        Implement FIFO in Block Ram.
+
+    Attributes
+    ----------
+    sink : Record("address", "data")
+        Sink for DRAM addresses and DRAM data word to be written too.
+    """
+    def __init__(self, port, fifo_depth=16, fifo_buffered=False):
+        assert isinstance(port, (LiteDRAMNativePort, LiteDRAMAXIPort))
+        self.port = port
+        self.sink = sink = stream.Endpoint([("address", port.address_width),
+                                            ("data", port.data_width)])
+
+        # # #
+
+        # Native / AXI selection -------------------------------------------------------------------
+        is_native = isinstance(port, LiteDRAMNativePort)
+        is_axi    = isinstance(port, LiteDRAMAXIPort)
+        if is_native:
+            (cmd, wdata) = port.cmd, port.wdata
+        elif is_axi:
+            (cmd, wdata) = port.aw, port.w
+        else:
+            raise NotImplementedError
+
+        # FIFO -------------------------------------------------------------------------------------
+        fifo = stream.SyncFIFO([("data", port.data_width)], fifo_depth, fifo_buffered)
+        self.submodules += fifo
+
+        if is_native:
+            self.comb += cmd.we.eq(1)
+        self.comb += [
+            cmd.addr.eq(sink.address),
+            cmd.valid.eq(fifo.sink.ready & sink.valid),
+            sink.ready.eq(fifo.sink.ready & cmd.ready),
+            fifo.sink.valid.eq(sink.valid & cmd.ready),
+            fifo.sink.data.eq(sink.data)
+        ]
+
+        if is_native:
+            self.comb += wdata.we.eq(2**(port.data_width//8)-1)
+        if is_axi:
+            self.comb += wdata.strb.eq(2**(port.data_width//8)-1)
+        self.comb += [
+            wdata.valid.eq(fifo.source.valid),
+            fifo.source.ready.eq(wdata.ready),
+            wdata.data.eq(fifo.source.data)
+        ]
+
+    def add_csr(self):
+        self._sink = self.sink
+        self.sink  = stream.Endpoint([("data", self.port.data_width)])
+
+        self._base   = CSRStorage(32)
+        self._length = CSRStorage(32)
+        self._start  = CSR()
+        self._done   = CSRStatus()
+        self._loop   = CSRStorage()
+
+        # # #
+
+        shift   = log2_int(self.port.data_width//8)
+        base    = Signal(self.port.address_width)
+        offset  = Signal(self.port.address_width)
+        length  = Signal(self.port.address_width)
+        self.comb += [
+            base.eq(self._base.storage[shift:]),
+            length.eq(self._length.storage[shift:]),
+        ]
+
+        self.submodules.fsm = fsm = FSM(reset_state="IDLE")
+        fsm.act("IDLE",
+            self._done.status.eq(1),
+            If(self._start.re,
+                NextValue(offset, 0),
+                NextState("RUN"),
+            )
+        )
+        fsm.act("RUN",
+            self._sink.valid.eq(self.sink.valid),
+            self._sink.data.eq(self.sink.data),
+            self._sink.address.eq(base + offset),
+            self.sink.ready.eq(self._sink.ready),
+            If(self.sink.valid & self.sink.ready,
+                NextValue(offset, offset + 1),
+                If(offset == (length - 1),
+                    If(self._loop.storage,
+                        NextValue(offset, 0)
+                    ).Else(
+                        NextState("IDLE")
+                    )
+                )
+            )
+        )
diff --git a/gram/frontend/ecc.py b/gram/frontend/ecc.py
new file mode 100644 (file)
index 0000000..71a2019
--- /dev/null
@@ -0,0 +1,141 @@
+# This file is Copyright (c) 2018-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+"""
+ECC frontend for LiteDRAM
+
+Adds ECC support to Native ports.
+
+Features:
+- Single Error Correction.
+- Double Error Detection.
+- Errors injection.
+- Errors reporting.
+
+Limitations:
+- Byte enable not supported for writes.
+"""
+
+from migen import *
+
+from litex.soc.interconnect.csr import *
+from litex.soc.interconnect.stream import *
+from litex.soc.cores.ecc import *
+
+from litedram.common import wdata_description, rdata_description
+
+
+# LiteDRAMNativePortECCW ---------------------------------------------------------------------------
+
+class LiteDRAMNativePortECCW(Module):
+    def __init__(self, data_width_from, data_width_to):
+        self.sink   = sink   = Endpoint(wdata_description(data_width_from))
+        self.source = source = Endpoint(wdata_description(data_width_to))
+
+        # # #
+
+        for i in range(8):
+            encoder = ECCEncoder(data_width_from//8)
+            self.submodules += encoder
+            self.comb += [
+                sink.connect(source, omit={"data", "we"}),
+                encoder.i.eq(sink.data[i*data_width_from//8:(i+1)*data_width_from//8]),
+                source.data[i*data_width_to//8:(i+1)*data_width_to//8].eq(encoder.o)
+            ]
+        self.comb += source.we.eq(2**len(source.we)-1)
+
+# LiteDRAMNativePortECCR ---------------------------------------------------------------------------
+
+class LiteDRAMNativePortECCR(Module):
+    def __init__(self, data_width_from, data_width_to):
+        self.sink   = sink   = Endpoint(rdata_description(data_width_to))
+        self.source = source = Endpoint(rdata_description(data_width_from))
+        self.enable = Signal()
+        self.sec    = Signal(8)
+        self.ded    = Signal(8)
+
+        # # #
+
+        self.comb +=  sink.connect(source, omit={"data"})
+
+        for i in range(8):
+            decoder = ECCDecoder(data_width_from//8)
+            self.submodules += decoder
+            self.comb += [
+                decoder.enable.eq(self.enable),
+                decoder.i.eq(sink.data[i*data_width_to//8:(i+1)*data_width_to//8]),
+                source.data[i*data_width_from//8:(i+1)*data_width_from//8].eq(decoder.o),
+                If(source.valid,
+                    self.sec[i].eq(decoder.sec),
+                    self.ded[i].eq(decoder.ded)
+                )
+            ]
+
+# LiteDRAMNativePortECC ----------------------------------------------------------------------------
+
+class LiteDRAMNativePortECC(Module, AutoCSR):
+    def __init__(self, port_from, port_to, with_error_injection=False):
+        _ , n = compute_m_n(port_from.data_width//8)
+        assert port_to.data_width >= (n + 1)*8
+
+        self.enable     = CSRStorage(reset=1)
+        self.clear      = CSR()
+        self.sec_errors = CSRStatus(32)
+        self.ded_errors = CSRStatus(32)
+        self.sec_detected = sec_detected = Signal()
+        self.ded_detected = ded_detected = Signal()
+        if with_error_injection:
+            self.flip = CSRStorage(8)
+
+        # # #
+
+        # Cmd --------------------------------------------------------------------------------------
+        self.comb += port_from.cmd.connect(port_to.cmd)
+
+        # Wdata (ecc encoding) ---------------------------------------------------------------------
+        ecc_wdata = LiteDRAMNativePortECCW(port_from.data_width, port_to.data_width)
+        ecc_wdata = BufferizeEndpoints({"source": DIR_SOURCE})(ecc_wdata)
+        self.submodules += ecc_wdata
+        self.comb += [
+            port_from.wdata.connect(ecc_wdata.sink),
+            ecc_wdata.source.connect(port_to.wdata)
+        ]
+        if with_error_injection:
+            self.comb += port_to.wdata.data[:8].eq(self.flip.storage ^ ecc_wdata.source.data[:8])
+
+        # Rdata (ecc decoding) ---------------------------------------------------------------------
+        sec = Signal()
+        ded = Signal()
+        ecc_rdata = LiteDRAMNativePortECCR(port_from.data_width, port_to.data_width)
+        ecc_rdata = BufferizeEndpoints({"source": DIR_SOURCE})(ecc_rdata)
+        self.submodules += ecc_rdata
+        self.comb += [
+            ecc_rdata.enable.eq(self.enable.storage),
+            port_to.rdata.connect(ecc_rdata.sink),
+            ecc_rdata.source.connect(port_from.rdata)
+        ]
+
+        # Errors count -----------------------------------------------------------------------------
+        sec_errors = self.sec_errors.status
+        ded_errors = self.ded_errors.status
+        self.sync += [
+            If(self.clear.re,
+                sec_errors.eq(0),
+                ded_errors.eq(0),
+                sec_detected.eq(0),
+                ded_detected.eq(0),
+            ).Else(
+                If(sec_errors != (2**len(sec_errors) - 1),
+                    If(ecc_rdata.sec != 0,
+                        sec_detected.eq(1),
+                        sec_errors.eq(sec_errors + 1)
+                    )
+                ),
+                If(ded_errors != (2**len(ded_errors) - 1),
+                    If(ecc_rdata.ded != 0,
+                        ded_detected.eq(1),
+                        ded_errors.eq(ded_errors + 1)
+                    )
+                )
+            )
+        ]
diff --git a/gram/frontend/fifo.py b/gram/frontend/fifo.py
new file mode 100644 (file)
index 0000000..6d1286a
--- /dev/null
@@ -0,0 +1,123 @@
+# This file is Copyright (c) 2018-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+from litex.gen import *
+
+from litex.soc.interconnect import stream
+
+from litedram.frontend import dma
+
+
+def _inc(signal, modulo):
+    if modulo == 2**len(signal):
+        return signal.eq(signal + 1)
+    else:
+        return If(signal == (modulo - 1),
+            signal.eq(0)
+        ).Else(
+            signal.eq(signal + 1)
+        )
+
+
+class _LiteDRAMFIFOCtrl(Module):
+    def __init__(self, base, depth, read_threshold, write_threshold):
+        self.base  = base
+        self.depth = depth
+        self.level = Signal(max=depth+1)
+
+        # # #
+
+        # To write buffer
+        self.writable = Signal()
+        self.write_address = Signal(max=depth)
+
+        # From write buffer
+        self.write = Signal()
+
+        # To read buffer
+        self.readable = Signal()
+        self.read_address = Signal(max=depth)
+
+        # From read buffer
+        self.read = Signal()
+
+        # # #
+
+        produce = self.write_address
+        consume = self.read_address
+
+        self.sync += [
+            If(self.write,
+                _inc(produce, depth)
+            ),
+            If(self.read,
+                _inc(consume, depth)
+            ),
+            If(self.write & ~self.read,
+                self.level.eq(self.level + 1),
+            ).Elif(self.read & ~self.write,
+                self.level.eq(self.level - 1)
+            )
+        ]
+
+        self.comb += [
+            self.writable.eq(self.level < write_threshold),
+            self.readable.eq(self.level > read_threshold)
+        ]
+
+
+class _LiteDRAMFIFOWriter(Module):
+    def __init__(self, data_width, port, ctrl):
+        self.sink = sink = stream.Endpoint([("data", data_width)])
+
+        # # #
+
+        self.submodules.writer = writer = dma.LiteDRAMDMAWriter(port, fifo_depth=32)
+        self.comb += [
+            writer.sink.valid.eq(sink.valid & ctrl.writable),
+            writer.sink.address.eq(ctrl.base + ctrl.write_address),
+            writer.sink.data.eq(sink.data),
+            If(writer.sink.valid & writer.sink.ready,
+                ctrl.write.eq(1),
+                sink.ready.eq(1)
+            )
+        ]
+
+
+class _LiteDRAMFIFOReader(Module):
+    def __init__(self, data_width, port, ctrl):
+        self.source = source = stream.Endpoint([("data", data_width)])
+
+        # # #
+
+        self.submodules.reader = reader = dma.LiteDRAMDMAReader(port, fifo_depth=32)
+        self.comb += [
+            reader.sink.valid.eq(ctrl.readable),
+            reader.sink.address.eq(ctrl.base + ctrl.read_address),
+            If(reader.sink.valid & reader.sink.ready,
+                ctrl.read.eq(1)
+            )
+        ]
+        self.comb += reader.source.connect(source)
+
+
+class LiteDRAMFIFO(Module):
+    def __init__(self, data_width, base, depth, write_port, read_port,
+        read_threshold=None, write_threshold=None):
+        self.sink   = stream.Endpoint([("data", data_width)])
+        self.source = stream.Endpoint([("data", data_width)])
+
+        # # #
+
+        if read_threshold is None:
+            read_threshold = 0
+        if write_threshold is None:
+            write_threshold = depth
+
+        self.submodules.ctrl   = _LiteDRAMFIFOCtrl(base, depth, read_threshold, write_threshold)
+        self.submodules.writer = _LiteDRAMFIFOWriter(data_width, write_port, self.ctrl)
+        self.submodules.reader = _LiteDRAMFIFOReader(data_width, read_port, self.ctrl)
+        self.comb += [
+            self.sink.connect(self.writer.sink),
+            self.reader.source.connect(self.source)
+        ]
diff --git a/gram/frontend/wishbone.py b/gram/frontend/wishbone.py
new file mode 100644 (file)
index 0000000..f3e6ea3
--- /dev/null
@@ -0,0 +1,81 @@
+# This file is Copyright (c) 2016-2020 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+"""Wishbone frontend for LiteDRAM"""
+
+from math import log2
+
+from nmigen import *
+
+import gram.stream as stream
+
+
+# LiteDRAMWishbone2Native --------------------------------------------------------------------------
+
+class LiteDRAMWishbone2Native(Module):
+    def __init__(self, wishbone, port, base_address=0x00000000):
+        wishbone_data_width = len(wishbone.dat_w)
+        port_data_width     = 2**int(log2(len(port.wdata.data))) # Round to lowest power 2
+        assert wishbone_data_width >= port_data_width
+
+        # # #
+
+        adr_offset = base_address >> log2_int(port.data_width//8)
+
+        # Write Datapath ---------------------------------------------------------------------------
+        wdata_converter = stream.StrideConverter(
+            [("data", wishbone_data_width), ("we", wishbone_data_width//8)],
+            [("data", port_data_width),     ("we", port_data_width//8)],
+        )
+        self.submodules += wdata_converter
+        self.comb += [
+            wdata_converter.sink.valid.eq(wishbone.cyc & wishbone.stb & wishbone.we),
+            wdata_converter.sink.data.eq(wishbone.dat_w),
+            wdata_converter.sink.we.eq(wishbone.sel),
+            wdata_converter.source.connect(port.wdata)
+        ]
+
+        # Read Datapath ----------------------------------------------------------------------------
+        rdata_converter = stream.StrideConverter(
+            [("data", port_data_width)],
+            [("data", wishbone_data_width)],
+        )
+        self.submodules += rdata_converter
+        self.comb += [
+            port.rdata.connect(rdata_converter.sink),
+            rdata_converter.source.ready.eq(1),
+            wishbone.dat_r.eq(rdata_converter.source.data),
+        ]
+
+        # Control ----------------------------------------------------------------------------------
+        ratio = wishbone_data_width//port_data_width
+        count = Signal(max=max(ratio, 2))
+        self.submodules.fsm = fsm = FSM(reset_state="CMD")
+        fsm.act("CMD",
+            port.cmd.valid.eq(wishbone.cyc & wishbone.stb),
+            port.cmd.we.eq(wishbone.we),
+            port.cmd.addr.eq(wishbone.adr*ratio + count - adr_offset),
+            If(port.cmd.valid & port.cmd.ready,
+                NextValue(count, count + 1),
+                If(count == (ratio - 1),
+                    NextValue(count, 0),
+                    If(wishbone.we,
+                        NextState("WAIT-WRITE")
+                    ).Else(
+                        NextState("WAIT-READ")
+                    )
+                )
+            )
+        )
+        fsm.act("WAIT-WRITE",
+            If(wdata_converter.sink.ready,
+                wishbone.ack.eq(1),
+                NextState("CMD")
+            )
+        )
+        fsm.act("WAIT-READ",
+            If(rdata_converter.source.valid,
+               wishbone.ack.eq(1),
+               NextState("CMD")
+            )
+        )
diff --git a/gram/gen.py b/gram/gen.py
new file mode 100755 (executable)
index 0000000..12cc076
--- /dev/null
@@ -0,0 +1,568 @@
+#!/usr/bin/env python3
+
+# This file is Copyright (c) 2018-2020 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2020 Stefan Schrijvers <ximin@ximinity.net>
+# License: BSD
+
+"""
+LiteDRAM standalone core generator
+
+LiteDRAM aims to be directly used as a python package when the SoC is created using LiteX. However,
+for some use cases it could be interesting to generate a standalone verilog file of the core:
+- integration of the core in a SoC using a more traditional flow.
+- need to version/package the core.
+- avoid Migen/LiteX dependencies.
+- etc...
+
+The standalone core is generated from a YAML configuration file that allows the user to generate
+easily a custom configuration of the core.
+
+Current version of the generator is limited to DDR2/DDR3 Xilinx 7-Series FPGA and DDR3 on Lattice
+ECP5.
+"""
+
+import os
+import sys
+import math
+import struct
+import yaml
+import argparse
+
+from migen import *
+from migen.genlib.resetsync import AsyncResetSynchronizer
+
+from litex.build.tools import replace_in_file
+from litex.build.generic_platform import *
+from litex.build.xilinx import XilinxPlatform
+from litex.build.lattice import LatticePlatform
+from litex.boards.platforms import versa_ecp5
+
+from litex.soc.cores.clock import *
+from litex.soc.integration.soc_core import *
+from litex.soc.integration.builder import *
+from litex.soc.interconnect import wishbone
+from litex.soc.cores.uart import *
+
+from gram import modules as litedram_modules
+from gram import phy as litedram_phys
+from gram.phy.ecp5ddrphy import ECP5DDRPHY
+from gram.phy.s7ddrphy import S7DDRPHY
+from gram.core.controller import ControllerSettings
+from gram.frontend.axi import *
+from gram.frontend.wishbone import *
+from gram.frontend.bist import LiteDRAMBISTGenerator
+from gram.frontend.bist import LiteDRAMBISTChecker
+from gram.frontend.fifo import LiteDRAMFIFO
+
+# IOs/Interfaces -----------------------------------------------------------------------------------
+
+def get_common_ios():
+    return [
+        # clk / rst
+        ("clk", 0, Pins(1)),
+        ("rst", 0, Pins(1)),
+
+        # serial
+        ("serial", 0,
+            Subsignal("tx", Pins(1)),
+            Subsignal("rx", Pins(1))
+        ),
+
+        # crg status
+        ("pll_locked", 0, Pins(1)),
+
+        # init status
+        ("init_done",  0, Pins(1)),
+        ("init_error", 0, Pins(1)),
+
+        # iodelay clk / rst
+        ("clk_iodelay", 0, Pins(1)),
+        ("rst_iodelay", 0, Pins(1)),
+
+        # user clk / rst
+        ("user_clk", 0, Pins(1)),
+        ("user_rst", 0, Pins(1))
+    ]
+
+def get_dram_ios(core_config):
+    sdram_module = core_config["sdram_module"]
+    return [
+        ("ddram", 0,
+            Subsignal("a",       Pins(log2_int(core_config["sdram_module"].nrows))),
+            Subsignal("ba",      Pins(log2_int(core_config["sdram_module"].nbanks))),
+            Subsignal("ras_n",   Pins(1)),
+            Subsignal("cas_n",   Pins(1)),
+            Subsignal("we_n",    Pins(1)),
+            Subsignal("cs_n",    Pins(core_config["sdram_rank_nb"])),
+            Subsignal("dm",      Pins(core_config["sdram_module_nb"])),
+            Subsignal("dq",      Pins(8*core_config["sdram_module_nb"])),
+            Subsignal("dqs_p",   Pins(core_config["sdram_module_nb"])),
+            Subsignal("dqs_n",   Pins(core_config["sdram_module_nb"])),
+            Subsignal("clk_p",   Pins(core_config["sdram_rank_nb"])),
+            Subsignal("clk_n",   Pins(core_config["sdram_rank_nb"])),
+            Subsignal("cke",     Pins(core_config["sdram_rank_nb"])),
+            Subsignal("odt",     Pins(core_config["sdram_rank_nb"])),
+            Subsignal("reset_n", Pins(1))
+        ),
+    ]
+
+def get_native_user_port_ios(_id, aw, dw):
+    return [
+        ("user_port_{}".format(_id), 0,
+            # cmd
+            Subsignal("cmd_valid", Pins(1)),
+            Subsignal("cmd_ready", Pins(1)),
+            Subsignal("cmd_we",    Pins(1)),
+            Subsignal("cmd_addr",  Pins(aw)),
+
+            # wdata
+            Subsignal("wdata_valid", Pins(1)),
+            Subsignal("wdata_ready", Pins(1)),
+            Subsignal("wdata_we",    Pins(dw//8)),
+            Subsignal("wdata_data",  Pins(dw)),
+
+            # rdata
+            Subsignal("rdata_valid", Pins(1)),
+            Subsignal("rdata_ready", Pins(1)),
+            Subsignal("rdata_data",  Pins(dw))
+        ),
+    ]
+
+def get_wishbone_user_port_ios(_id, aw, dw):
+    return [
+        ("user_port_{}".format(_id), 0,
+            Subsignal("adr",   Pins(aw)),
+            Subsignal("dat_w", Pins(dw)),
+            Subsignal("dat_r", Pins(dw)),
+            Subsignal("sel",   Pins(dw//8)),
+            Subsignal("cyc",   Pins(1)),
+            Subsignal("stb",   Pins(1)),
+            Subsignal("ack",   Pins(1)),
+            Subsignal("we",    Pins(1)),
+            Subsignal("err",   Pins(1)),
+        ),
+    ]
+
+def get_axi_user_port_ios(_id, aw, dw, iw):
+    return [
+        ("user_port_{}".format(_id), 0,
+            # aw
+            Subsignal("awvalid", Pins(1)),
+            Subsignal("awready", Pins(1)),
+            Subsignal("awaddr",  Pins(aw)),
+            Subsignal("awburst", Pins(2)),
+            Subsignal("awlen",   Pins(8)),
+            Subsignal("awsize",  Pins(4)),
+            Subsignal("awid",    Pins(iw)),
+
+            # w
+            Subsignal("wvalid", Pins(1)),
+            Subsignal("wready", Pins(1)),
+            Subsignal("wlast",  Pins(1)),
+            Subsignal("wstrb",  Pins(dw//8)),
+            Subsignal("wdata",  Pins(dw)),
+
+            # b
+            Subsignal("bvalid", Pins(1)),
+            Subsignal("bready", Pins(1)),
+            Subsignal("bresp",  Pins(2)),
+            Subsignal("bid",    Pins(iw)),
+
+            # ar
+            Subsignal("arvalid", Pins(1)),
+            Subsignal("arready", Pins(1)),
+            Subsignal("araddr",  Pins(aw)),
+            Subsignal("arburst", Pins(2)),
+            Subsignal("arlen",   Pins(8)),
+            Subsignal("arsize",  Pins(4)),
+            Subsignal("arid",    Pins(iw)),
+
+            # r
+            Subsignal("rvalid", Pins(1)),
+            Subsignal("rready", Pins(1)),
+            Subsignal("rlast",  Pins(1)),
+            Subsignal("rresp",  Pins(2)),
+            Subsignal("rdata",  Pins(dw)),
+            Subsignal("rid",    Pins(iw))
+        ),
+    ]
+
+def get_fifo_user_port_ios(_id, dw):
+    return [
+        ("user_fifo_{}".format(_id), 0,
+            # in
+            Subsignal("in_valid", Pins(1)),
+            Subsignal("in_ready", Pins(1)),
+            Subsignal("in_data",  Pins(dw)),
+
+            # out
+            Subsignal("out_valid", Pins(1)),
+            Subsignal("out_ready", Pins(1)),
+            Subsignal("out_data",  Pins(dw)),
+        ),
+    ]
+
+
+class Platform(XilinxPlatform):
+    def __init__(self):
+        XilinxPlatform.__init__(self, "", io=[], toolchain="vivado")
+
+# CRG ----------------------------------------------------------------------------------------------
+
+class LiteDRAMECP5DDRPHYCRG(Module):
+    def __init__(self, platform, core_config):
+        self.clock_domains.cd_init    = ClockDomain()
+        self.clock_domains.cd_por     = ClockDomain(reset_less=True)
+        self.clock_domains.cd_sys     = ClockDomain()
+        self.clock_domains.cd_sys2x   = ClockDomain()
+        self.clock_domains.cd_sys2x_i = ClockDomain(reset_less=True)
+
+        # # #
+
+        self.stop = Signal()
+
+        # clk / rst
+        clk = platform.request("clk")
+        rst = platform.request("rst")
+
+        # power on reset
+        por_count = Signal(16, reset=2**16-1)
+        por_done  = Signal()
+        self.comb += self.cd_por.clk.eq(ClockSignal())
+        self.comb += por_done.eq(por_count == 0)
+        self.sync.por += If(~por_done, por_count.eq(por_count - 1))
+
+        # pll
+        self.submodules.pll = pll = ECP5PLL()
+        pll.register_clkin(clk, core_config["input_clk_freq"])
+        pll.create_clkout(self.cd_sys2x_i, 2*core_config["sys_clk_freq"])
+        pll.create_clkout(self.cd_init, core_config["init_clk_freq"])
+        self.specials += [
+            Instance("ECLKSYNCB",
+                i_ECLKI = self.cd_sys2x_i.clk,
+                i_STOP  = self.stop,
+                o_ECLKO = self.cd_sys2x.clk),
+            Instance("CLKDIVF",
+                p_DIV     = "2.0",
+                i_ALIGNWD = 0,
+                i_CLKI    = self.cd_sys2x.clk,
+                i_RST     = self.cd_sys2x.rst,
+                o_CDIVX   = self.cd_sys.clk),
+            AsyncResetSynchronizer(self.cd_init, ~por_done | ~pll.locked | rst),
+            AsyncResetSynchronizer(self.cd_sys,  ~por_done | ~pll.locked | rst),
+        ]
+
+class LiteDRAMS7DDRPHYCRG(Module):
+    def __init__(self, platform, core_config):
+        self.clock_domains.cd_sys = ClockDomain()
+        if core_config["memtype"] == "DDR3":
+            self.clock_domains.cd_sys4x     = ClockDomain(reset_less=True)
+            self.clock_domains.cd_sys4x_dqs = ClockDomain(reset_less=True)
+        else:
+            self.clock_domains.cd_sys2x     = ClockDomain(reset_less=True)
+            self.clock_domains.cd_sys2x_dqs = ClockDomain(reset_less=True)
+        self.clock_domains.cd_iodelay = ClockDomain()
+
+        # # #
+
+        clk = platform.request("clk")
+        rst = platform.request("rst")
+
+        self.submodules.sys_pll = sys_pll = S7PLL(speedgrade=core_config["speedgrade"])
+        self.comb += sys_pll.reset.eq(rst)
+        sys_pll.register_clkin(clk, core_config["input_clk_freq"])
+        sys_pll.create_clkout(self.cd_sys, core_config["sys_clk_freq"])
+        if core_config["memtype"] == "DDR3":
+            sys_pll.create_clkout(self.cd_sys4x, 4*core_config["sys_clk_freq"])
+            sys_pll.create_clkout(self.cd_sys4x_dqs, 4*core_config["sys_clk_freq"], phase=90)
+        else:
+            sys_pll.create_clkout(self.cd_sys2x, 2*core_config["sys_clk_freq"])
+            sys_pll.create_clkout(self.cd_sys2x_dqs, 2*core_config["sys_clk_freq"], phase=90)
+        self.comb += platform.request("pll_locked").eq(sys_pll.locked)
+
+        self.submodules.iodelay_pll = iodelay_pll = S7PLL(speedgrade=core_config["speedgrade"])
+        self.comb += iodelay_pll.reset.eq(rst)
+        iodelay_pll.register_clkin(clk, core_config["input_clk_freq"])
+        iodelay_pll.create_clkout(self.cd_iodelay, core_config["iodelay_clk_freq"])
+        self.submodules.idelayctrl = S7IDELAYCTRL(self.cd_iodelay)
+
+# LiteDRAMCoreControl ------------------------------------------------------------------------------
+
+class LiteDRAMCoreControl(Module, AutoCSR):
+    def __init__(self):
+        self.init_done  = CSRStorage()
+        self.init_error = CSRStorage()
+
+# LiteDRAMCore -------------------------------------------------------------------------------------
+
+class LiteDRAMCore(SoCCore):
+    def __init__(self, platform, core_config, **kwargs):
+        platform.add_extension(get_common_ios())
+
+        # Parameters -------------------------------------------------------------------------------
+        sys_clk_freq  = core_config["sys_clk_freq"]
+        cpu_type      = core_config["cpu"]
+        cpu_variant   = core_config.get("cpu_variant", "standard")
+        csr_alignment = core_config.get("csr_alignment", 32)
+        if cpu_type is None:
+            kwargs["integrated_rom_size"]  = 0
+            kwargs["integrated_sram_size"] = 0
+            kwargs["with_uart"]            = False
+            kwargs["with_timer"]           = False
+            kwargs["with_ctrl"]            = False
+
+        # SoCCore ----------------------------------------------------------------------------------
+        SoCCore.__init__(self, platform, sys_clk_freq,
+            cpu_type      = cpu_type,
+            cpu_variant   = cpu_variant,
+            csr_alignment = csr_alignment,
+            **kwargs)
+
+        # CRG --------------------------------------------------------------------------------------
+        if core_config["sdram_phy"] in [litedram_phys.ECP5DDRPHY]:
+            self.submodules.crg = crg = LiteDRAMECP5DDRPHYCRG(platform, core_config)
+        if core_config["sdram_phy"] in [litedram_phys.A7DDRPHY, litedram_phys.K7DDRPHY, litedram_phys.V7DDRPHY]:
+            self.submodules.crg = LiteDRAMS7DDRPHYCRG(platform, core_config)
+
+        # DRAM -------------------------------------------------------------------------------------
+        platform.add_extension(get_dram_ios(core_config))
+        # ECP5DDRPHY
+        if core_config["sdram_phy"] in  [litedram_phys.ECP5DDRPHY]:
+            assert core_config["memtype"] in ["DDR3"]
+            self.submodules.ddrphy = core_config["sdram_phy"](
+                pads         = platform.request("ddram"),
+                sys_clk_freq = sys_clk_freq)
+            self.comb += crg.stop.eq(self.ddrphy.init.stop)
+            self.add_constant("ECP5DDRPHY")
+            sdram_module = core_config["sdram_module"](sys_clk_freq, "1:2")
+        # S7DDRPHY
+        if core_config["sdram_phy"] in [litedram_phys.A7DDRPHY, litedram_phys.K7DDRPHY, litedram_phys.V7DDRPHY]:
+            assert core_config["memtype"] in ["DDR2", "DDR3"]
+            self.submodules.ddrphy = core_config["sdram_phy"](
+                pads             = platform.request("ddram"),
+                memtype          = core_config["memtype"],
+                nphases          = 4 if core_config["memtype"] == "DDR3" else 2,
+                sys_clk_freq     = sys_clk_freq,
+                iodelay_clk_freq = core_config["iodelay_clk_freq"],
+                cmd_latency      = core_config["cmd_latency"])
+            self.add_constant("CMD_DELAY", core_config["cmd_delay"])
+            if core_config["memtype"] == "DDR3":
+                self.ddrphy.settings.add_electrical_settings(
+                    rtt_nom = core_config["rtt_nom"],
+                    rtt_wr  = core_config["rtt_wr"],
+                    ron     = core_config["ron"])
+        self.add_csr("ddrphy")
+
+        sdram_module = core_config["sdram_module"](sys_clk_freq,
+            "1:4" if core_config["memtype"] == "DDR3" else "1:2")
+        controller_settings = controller_settings = ControllerSettings(
+            cmd_buffer_depth=core_config["cmd_buffer_depth"])
+        self.add_sdram("sdram",
+            phy                     = self.ddrphy,
+            module                  = sdram_module,
+            origin                  = self.mem_map["main_ram"],
+            size                    = 0x01000000, # Only expose 16MB to the CPU, enough for Init/Calib.
+            with_soc_interconnect   = cpu_type is not None,
+            l2_cache_size           = 0,
+            l2_cache_min_data_width = 0,
+            controller_settings     = controller_settings,
+        )
+
+        # DRAM Control/Status ----------------------------------------------------------------------
+        # Expose calibration status to user.
+        self.submodules.ddrctrl = LiteDRAMCoreControl()
+        self.add_csr("ddrctrl")
+        self.comb += platform.request("init_done").eq(self.ddrctrl.init_done.storage)
+        self.comb += platform.request("init_error").eq(self.ddrctrl.init_error.storage)
+        # If no CPU, expose a bus control interface to user.
+        if cpu_type is None:
+            wb_bus = wishbone.Interface()
+            self.bus.add_master(master=wb_bus)
+            platform.add_extension(wb_bus.get_ios("wb_ctrl"))
+            wb_pads = platform.request("wb_ctrl")
+            self.comb += wb_bus.connect_to_pads(wb_pads, mode="slave")
+
+        # User ports -------------------------------------------------------------------------------
+        self.comb += [
+            platform.request("user_clk").eq(ClockSignal()),
+            platform.request("user_rst").eq(ResetSignal())
+        ]
+        for name, port in core_config["user_ports"].items():
+            # Native -------------------------------------------------------------------------------
+            if port["type"] == "native":
+                user_port = self.sdram.crossbar.get_port()
+                platform.add_extension(get_native_user_port_ios(name,
+                    user_port.address_width,
+                    user_port.data_width))
+                _user_port_io = platform.request("user_port_{}".format(name))
+                self.comb += [
+                    # cmd
+                    user_port.cmd.valid.eq(_user_port_io.cmd_valid),
+                    _user_port_io.cmd_ready.eq(user_port.cmd.ready),
+                    user_port.cmd.we.eq(_user_port_io.cmd_we),
+                    user_port.cmd.addr.eq(_user_port_io.cmd_addr),
+
+                    # wdata
+                    user_port.wdata.valid.eq(_user_port_io.wdata_valid),
+                    _user_port_io.wdata_ready.eq(user_port.wdata.ready),
+                    user_port.wdata.we.eq(_user_port_io.wdata_we),
+                    user_port.wdata.data.eq(_user_port_io.wdata_data),
+
+                    # rdata
+                    _user_port_io.rdata_valid.eq(user_port.rdata.valid),
+                    user_port.rdata.ready.eq(_user_port_io.rdata_ready),
+                    _user_port_io.rdata_data.eq(user_port.rdata.data),
+                ]
+            # Wishbone -----------------------------------------------------------------------------
+            elif port["type"] == "wishbone":
+                user_port = self.sdram.crossbar.get_port()
+                wb_port = wishbone.Interface(
+                    user_port.data_width,
+                    user_port.address_width)
+                wishbone2native = LiteDRAMWishbone2Native(wb_port, user_port)
+                self.submodules += wishbone2native
+                platform.add_extension(get_wishbone_user_port_ios(name,
+                        len(wb_port.adr),
+                        len(wb_port.dat_w)))
+                _wb_port_io = platform.request("user_port_{}".format(name))
+                self.comb += [
+                    wb_port.adr.eq(_wb_port_io.adr),
+                    wb_port.dat_w.eq(_wb_port_io.dat_w),
+                    _wb_port_io.dat_r.eq(wb_port.dat_r),
+                    wb_port.sel.eq(_wb_port_io.sel),
+                    wb_port.cyc.eq(_wb_port_io.cyc),
+                    wb_port.stb.eq(_wb_port_io.stb),
+                    _wb_port_io.ack.eq(wb_port.ack),
+                    wb_port.we.eq(_wb_port_io.we),
+                    _wb_port_io.err.eq(wb_port.err),
+                ]
+            # AXI ----------------------------------------------------------------------------------
+            elif port["type"] == "axi":
+                user_port = self.sdram.crossbar.get_port()
+                axi_port  = LiteDRAMAXIPort(
+                    user_port.data_width,
+                    user_port.address_width + log2_int(user_port.data_width//8),
+                    port["id_width"])
+                axi2native = LiteDRAMAXI2Native(axi_port, user_port)
+                self.submodules += axi2native
+                platform.add_extension(get_axi_user_port_ios(name,
+                        axi_port.address_width,
+                        axi_port.data_width,
+                        port["id_width"]))
+                _axi_port_io = platform.request("user_port_{}".format(name))
+                self.comb += [
+                    # aw
+                    axi_port.aw.valid.eq(_axi_port_io.awvalid),
+                    _axi_port_io.awready.eq(axi_port.aw.ready),
+                    axi_port.aw.addr.eq(_axi_port_io.awaddr),
+                    axi_port.aw.burst.eq(_axi_port_io.awburst),
+                    axi_port.aw.len.eq(_axi_port_io.awlen),
+                    axi_port.aw.size.eq(_axi_port_io.awsize),
+                    axi_port.aw.id.eq(_axi_port_io.awid),
+
+                    # w
+                    axi_port.w.valid.eq(_axi_port_io.wvalid),
+                    _axi_port_io.wready.eq(axi_port.w.ready),
+                    axi_port.w.last.eq(_axi_port_io.wlast),
+                    axi_port.w.strb.eq(_axi_port_io.wstrb),
+                    axi_port.w.data.eq(_axi_port_io.wdata),
+
+                    # b
+                    _axi_port_io.bvalid.eq(axi_port.b.valid),
+                    axi_port.b.ready.eq(_axi_port_io.bready),
+                    _axi_port_io.bresp.eq(axi_port.b.resp),
+                    _axi_port_io.bid.eq(axi_port.b.id),
+
+                    # ar
+                    axi_port.ar.valid.eq(_axi_port_io.arvalid),
+                    _axi_port_io.arready.eq(axi_port.ar.ready),
+                    axi_port.ar.addr.eq(_axi_port_io.araddr),
+                    axi_port.ar.burst.eq(_axi_port_io.arburst),
+                    axi_port.ar.len.eq(_axi_port_io.arlen),
+                    axi_port.ar.size.eq(_axi_port_io.arsize),
+                    axi_port.ar.id.eq(_axi_port_io.arid),
+
+                    # r
+                    _axi_port_io.rvalid.eq(axi_port.r.valid),
+                    axi_port.r.ready.eq(_axi_port_io.rready),
+                    _axi_port_io.rlast.eq(axi_port.r.last),
+                    _axi_port_io.rresp.eq(axi_port.r.resp),
+                    _axi_port_io.rdata.eq(axi_port.r.data),
+                    _axi_port_io.rid.eq(axi_port.r.id),
+                ]
+            # FIFO ---------------------------------------------------------------------------------
+            elif port["type"] == "fifo":
+                platform.add_extension(get_fifo_user_port_ios(name, user_port.data_width))
+                _user_fifo_io = platform.request("user_fifo_{}".format(name))
+                fifo = LiteDRAMFIFO(
+                    data_width      = user_port.data_width,
+                    base            = port["base"],
+                    depth           = port["depth"],
+                    write_port      = self.sdram.crossbar.get_port("write"),
+                    write_threshold = port["depth"] - 32, # FIXME
+                    read_port       = self.sdram.crossbar.get_port("read"),
+                    read_threshold  = 32 # FIXME
+                )
+                self.submodules += fifo
+                self.comb += [
+                    # in
+                    fifo.sink.valid.eq(_user_fifo_io.in_valid),
+                    _user_fifo_io.in_ready.eq(fifo.sink.ready),
+                    fifo.sink.data.eq(_user_fifo_io.in_data),
+
+                    # out
+                    _user_fifo_io.out_valid.eq(fifo.source.valid),
+                    fifo.source.ready.eq(_user_fifo_io.out_ready),
+                    _user_fifo_io.out_data.eq(fifo.source.data),
+                ]
+            else:
+                raise ValueError("Unsupported port type: {}".format(port["type"]))
+
+# Build --------------------------------------------------------------------------------------------
+
+def main():
+    parser = argparse.ArgumentParser(description="LiteDRAM standalone core generator")
+    builder_args(parser)
+    parser.set_defaults(output_dir="build")
+    parser.add_argument("config", help="YAML config file")
+    args = parser.parse_args()
+    core_config = yaml.load(open(args.config).read(), Loader=yaml.Loader)
+
+    # Convert YAML elements to Python/LiteX --------------------------------------------------------
+    for k, v in core_config.items():
+        replaces = {"False": False, "True": True, "None": None}
+        for r in replaces.keys():
+            if v == r:
+                core_config[k] = replaces[r]
+        if "clk_freq" in k:
+            core_config[k] = float(core_config[k])
+        if k == "sdram_module":
+            core_config[k] = getattr(litedram_modules, core_config[k])
+        if k == "sdram_phy":
+            core_config[k] = getattr(litedram_phys, core_config[k])
+
+    # Generate core --------------------------------------------------------------------------------
+    if core_config["sdram_phy"] in [litedram_phys.ECP5DDRPHY]:
+        platform = LatticePlatform("LFE5UM5G-45F-8BG381C", io=[], toolchain="trellis") # FIXME: allow other devices.
+    elif core_config["sdram_phy"] in [litedram_phys.A7DDRPHY, litedram_phys.K7DDRPHY, litedram_phys.V7DDRPHY]:
+        platform = XilinxPlatform("", io=[], toolchain="vivado")
+    else:
+        raise ValueError("Unsupported SDRAM PHY: {}".format(core_config["sdram_phy"]))
+
+    builder_arguments = builder_argdict(args)
+    builder_arguments["compile_gateware"] = False
+
+    soc      = LiteDRAMCore(platform, core_config, integrated_rom_size=0x6000)
+    builder  = Builder(soc, **builder_arguments)
+    vns      = builder.build(build_name="litedram_core", regular_comb=False)
+
+    if soc.cpu_type is not None:
+        init_filename = "mem.init"
+        os.system("mv {} {}".format(
+            os.path.join(builder.gateware_dir, init_filename),
+            os.path.join(builder.gateware_dir, "litedram_core.init"),
+        ))
+        replace_in_file(os.path.join(builder.gateware_dir, "litedram_core.v"), init_filename, "litedram_core.init")
+
+if __name__ == "__main__":
+    main()
diff --git a/gram/init.py b/gram/init.py
new file mode 100644 (file)
index 0000000..3e94a30
--- /dev/null
@@ -0,0 +1,623 @@
+# This file is Copyright (c) 2013-2014 Sebastien Bourdeauducq <sb@m-labs.hk>
+# This file is Copyright (c) 2013-2020 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2017 whitequark <whitequark@whitequark.org>
+# This file is Copyright (c) 2014 Yann Sionneau <ys@m-labs.hk>
+# This file is Copyright (c) 2018 bunnie <bunnie@kosagi.com>
+# This file is Copyright (c) 2019 Gabriel L. Somlo <gsomlo@gmail.com>
+# License: BSD
+
+from nmigen.utils import log2_int
+
+cmds = {
+    "PRECHARGE_ALL": "DFII_COMMAND_RAS|DFII_COMMAND_WE|DFII_COMMAND_CS",
+    "MODE_REGISTER": "DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS",
+    "AUTO_REFRESH":  "DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_CS",
+    "UNRESET":       "DFII_CONTROL_ODT|DFII_CONTROL_RESET_N",
+    "CKE":           "DFII_CONTROL_CKE|DFII_CONTROL_ODT|DFII_CONTROL_RESET_N"
+}
+
+# SDR ----------------------------------------------------------------------------------------------
+
+def get_sdr_phy_init_sequence(phy_settings, timing_settings):
+    cl = phy_settings.cl
+    bl = 1
+    mr = log2_int(bl) + (cl << 4)
+    reset_dll = 1 << 8
+
+    init_sequence = [
+        ("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
+        ("Precharge All",  0x0400, 0, cmds["PRECHARGE_ALL"], 0),
+        ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
+        ("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
+        ("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
+        ("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
+        ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200)
+    ]
+
+    return init_sequence, None
+
+# DDR ----------------------------------------------------------------------------------------------
+
+def get_ddr_phy_init_sequence(phy_settings, timing_settings):
+    cl  = phy_settings.cl
+    bl  = 4
+    mr  = log2_int(bl) + (cl << 4)
+    emr = 0
+    reset_dll = 1 << 8
+
+    init_sequence = [
+        ("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
+        ("Precharge All",  0x0400, 0, cmds["PRECHARGE_ALL"], 0),
+        ("Load Extended Mode Register", emr, 1, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
+        ("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
+        ("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
+        ("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
+        ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200)
+    ]
+
+    return init_sequence, None
+
+# LPDDR --------------------------------------------------------------------------------------------
+
+def get_lpddr_phy_init_sequence(phy_settings, timing_settings):
+    cl  = phy_settings.cl
+    bl  = 4
+    mr  = log2_int(bl) + (cl << 4)
+    emr = 0
+    reset_dll = 1 << 8
+
+    init_sequence = [
+        ("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
+        ("Precharge All",  0x0400, 0, cmds["PRECHARGE_ALL"], 0),
+        ("Load Extended Mode Register", emr, 2, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
+        ("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
+        ("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
+        ("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
+        ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200)
+    ]
+
+    return init_sequence, None
+
+# DDR2 ---------------------------------------------------------------------------------------------
+
+def get_ddr2_phy_init_sequence(phy_settings, timing_settings):
+    cl   = phy_settings.cl
+    bl   = 4
+    wr   = 2
+    mr   = log2_int(bl) + (cl << 4) + (wr << 9)
+    emr  = 0
+    emr2 = 0
+    emr3 = 0
+    ocd  = 7 << 7
+    reset_dll = 1 << 8
+
+    init_sequence = [
+        ("Bring CKE high", 0x0000, 0, cmds["CKE"], 20000),
+        ("Precharge All",  0x0400, 0, cmds["PRECHARGE_ALL"], 0),
+        ("Load Extended Mode Register 3", emr3, 3, cmds["MODE_REGISTER"], 0),
+        ("Load Extended Mode Register 2", emr2, 2, cmds["MODE_REGISTER"], 0),
+        ("Load Extended Mode Register", emr, 1, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register / Reset DLL, CL={0:d}, BL={1:d}".format(cl, bl), mr + reset_dll, 0, cmds["MODE_REGISTER"], 200),
+        ("Precharge All", 0x0400, 0, cmds["PRECHARGE_ALL"], 0),
+        ("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
+        ("Auto Refresh", 0x0, 0, cmds["AUTO_REFRESH"], 4),
+        ("Load Mode Register / CL={0:d}, BL={1:d}".format(cl, bl), mr, 0, cmds["MODE_REGISTER"], 200),
+        ("Load Extended Mode Register / OCD Default", emr+ocd, 1, cmds["MODE_REGISTER"], 0),
+        ("Load Extended Mode Register / OCD Exit", emr, 1, cmds["MODE_REGISTER"], 0),
+    ]
+
+    return init_sequence, None
+
+# DDR3 ---------------------------------------------------------------------------------------------
+
+def get_ddr3_phy_init_sequence(phy_settings, timing_settings):
+    cl  = phy_settings.cl
+    bl  = 8
+    cwl = phy_settings.cwl
+
+    def format_mr0(bl, cl, wr, dll_reset):
+        bl_to_mr0 = {
+            4: 0b10,
+            8: 0b00
+        }
+        cl_to_mr0 = {
+             5: 0b0010,
+             6: 0b0100,
+             7: 0b0110,
+             8: 0b1000,
+             9: 0b1010,
+            10: 0b1100,
+            11: 0b1110,
+            12: 0b0001,
+            13: 0b0011,
+            14: 0b0101
+        }
+        wr_to_mr0 = {
+            16: 0b000,
+             5: 0b001,
+             6: 0b010,
+             7: 0b011,
+             8: 0b100,
+            10: 0b101,
+            12: 0b110,
+            14: 0b111
+        }
+        mr0 = bl_to_mr0[bl]
+        mr0 |= (cl_to_mr0[cl] & 1) << 2
+        mr0 |= ((cl_to_mr0[cl] >> 1) & 0b111) << 4
+        mr0 |= dll_reset << 8
+        mr0 |= wr_to_mr0[wr] << 9
+        return mr0
+
+    def format_mr1(ron, rtt_nom):
+        mr1 = ((ron >> 0) & 1) << 1
+        mr1 |= ((ron >> 1) & 1) << 5
+        mr1 |= ((rtt_nom >> 0) & 1) << 2
+        mr1 |= ((rtt_nom >> 1) & 1) << 6
+        mr1 |= ((rtt_nom >> 2) & 1) << 9
+        return mr1
+
+    def format_mr2(cwl, rtt_wr):
+        mr2 = (cwl-5) << 3
+        mr2 |= rtt_wr << 9
+        return mr2
+
+    z_to_rtt_nom = {
+        "disabled" : 0,
+        "60ohm"    : 1,
+        "120ohm"   : 2,
+        "40ohm"    : 3,
+        "20ohm"    : 4,
+        "30ohm"    : 5
+    }
+
+    z_to_rtt_wr = {
+        "disabled" : 0,
+        "60ohm"    : 1,
+        "120ohm"   : 2,
+    }
+
+    z_to_ron = {
+        "40ohm" : 0,
+        "34ohm" : 1,
+    }
+
+    # default electrical settings (point to point)
+    rtt_nom = "60ohm"
+    rtt_wr  = "60ohm"
+    ron     = "34ohm"
+
+    # override electrical settings if specified
+    if hasattr(phy_settings, "rtt_nom"):
+        rtt_nom = phy_settings.rtt_nom
+    if hasattr(phy_settings, "rtt_wr"):
+        rtt_wr = phy_settings.rtt_wr
+    if hasattr(phy_settings, "ron"):
+        ron = phy_settings.ron
+
+    wr  = max(timing_settings.tWTR*phy_settings.nphases, 5) # >= ceiling(tWR/tCK)
+    mr0 = format_mr0(bl, cl, wr, 1)
+    mr1 = format_mr1(z_to_ron[ron], z_to_rtt_nom[rtt_nom])
+    mr2 = format_mr2(cwl, z_to_rtt_wr[rtt_wr])
+    mr3 = 0
+
+    init_sequence = [
+        ("Release reset", 0x0000, 0, cmds["UNRESET"], 50000),
+        ("Bring CKE high", 0x0000, 0, cmds["CKE"], 10000),
+        ("Load Mode Register 2, CWL={0:d}".format(cwl), mr2, 2, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register 3", mr3, 3, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register 1", mr1, 1, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register 0, CL={0:d}, BL={1:d}".format(cl, bl), mr0, 0, cmds["MODE_REGISTER"], 200),
+        ("ZQ Calibration", 0x0400, 0, "DFII_COMMAND_WE|DFII_COMMAND_CS", 200),
+    ]
+
+    return init_sequence, mr1
+
+# DDR4 ---------------------------------------------------------------------------------------------
+
+def get_ddr4_phy_init_sequence(phy_settings, timing_settings):
+    cl  = phy_settings.cl
+    bl  = 8
+    cwl = phy_settings.cwl
+
+    def format_mr0(bl, cl, wr, dll_reset):
+        bl_to_mr0 = {
+            4: 0b10,
+            8: 0b00
+        }
+        cl_to_mr0 = {
+             9: 0b00000,
+            10: 0b00001,
+            11: 0b00010,
+            12: 0b00011,
+            13: 0b00100,
+            14: 0b00101,
+            15: 0b00110,
+            16: 0b00111,
+            18: 0b01000,
+            20: 0b01001,
+            22: 0b01010,
+            24: 0b01011,
+            23: 0b01100,
+            17: 0b01101,
+            19: 0b01110,
+            21: 0b01111,
+            25: 0b10000,
+            26: 0b10001,
+            27: 0b10010,
+            28: 0b10011,
+            29: 0b10100,
+            30: 0b10101,
+            31: 0b10110,
+            32: 0b10111,
+        }
+        wr_to_mr0 = {
+            10: 0b0000,
+            12: 0b0001,
+            14: 0b0010,
+            16: 0b0011,
+            18: 0b0100,
+            20: 0b0101,
+            24: 0b0110,
+            22: 0b0111,
+            26: 0b1000,
+            28: 0b1001,
+        }
+        mr0 = bl_to_mr0[bl]
+        mr0 |= (cl_to_mr0[cl] & 0b1) << 2
+        mr0 |= ((cl_to_mr0[cl] >> 1) & 0b111) << 4
+        mr0 |= ((cl_to_mr0[cl] >> 4) & 0b1) << 12
+        mr0 |= dll_reset << 8
+        mr0 |= (wr_to_mr0[wr] & 0b111) << 9
+        mr0 |= (wr_to_mr0[wr] >> 3) << 13
+        return mr0
+
+    def format_mr1(dll_enable, ron, rtt_nom):
+        mr1 = dll_enable
+        mr1 |= ((ron >> 0) & 0b1) << 1
+        mr1 |= ((ron >> 1) & 0b1) << 2
+        mr1 |= ((rtt_nom >> 0) & 0b1) << 8
+        mr1 |= ((rtt_nom >> 1) & 0b1) << 9
+        mr1 |= ((rtt_nom >> 2) & 0b1) << 10
+        return mr1
+
+    def format_mr2(cwl, rtt_wr):
+        cwl_to_mr2 = {
+             9: 0b000,
+            10: 0b001,
+            11: 0b010,
+            12: 0b011,
+            14: 0b100,
+            16: 0b101,
+            18: 0b110,
+            20: 0b111
+        }
+        mr2 = cwl_to_mr2[cwl] << 3
+        mr2 |= rtt_wr << 9
+        return mr2
+
+    def format_mr3(fine_refresh_mode):
+        fine_refresh_mode_to_mr3 = {
+            "1x": 0b000,
+            "2x": 0b001,
+            "4x": 0b010
+        }
+        mr3 = fine_refresh_mode_to_mr3[fine_refresh_mode] << 6
+        return mr3
+
+    def format_mr6(tccd):
+        tccd_to_mr6 = {
+            4: 0b000,
+            5: 0b001,
+            6: 0b010,
+            7: 0b011,
+            8: 0b100
+        }
+        mr6 = tccd_to_mr6[tccd] << 10
+        return mr6
+
+    z_to_rtt_nom = {
+        "disabled" : 0b000,
+        "60ohm"    : 0b001,
+        "120ohm"   : 0b010,
+        "40ohm"    : 0b011,
+        "240ohm"   : 0b100,
+        "48ohm"    : 0b101,
+        "80ohm"    : 0b110,
+        "34ohm"    : 0b111
+    }
+
+    z_to_rtt_wr = {
+        "disabled" : 0b000,
+        "120ohm"   : 0b001,
+        "240ohm"   : 0b010,
+        "high-z"   : 0b011,
+        "80ohm"    : 0b100,
+    }
+
+    z_to_ron = {
+        "34ohm" : 0b00,
+        "48ohm" : 0b01,
+    }
+
+    # default electrical settings (point to point)
+    rtt_nom = "40ohm"
+    rtt_wr  = "120ohm"
+    ron     = "34ohm"
+
+    # override electrical settings if specified
+    if hasattr(phy_settings, "rtt_nom"):
+        rtt_nom = phy_settings.rtt_nom
+    if hasattr(phy_settings, "rtt_wr"):
+        rtt_wr = phy_settings.rtt_wr
+    if hasattr(phy_settings, "ron"):
+        ron = phy_settings.ron
+
+    wr  = max(timing_settings.tWTR*phy_settings.nphases, 10) # >= ceiling(tWR/tCK)
+    mr0 = format_mr0(bl, cl, wr, 1)
+    mr1 = format_mr1(1, z_to_ron[ron], z_to_rtt_nom[rtt_nom])
+    mr2 = format_mr2(cwl, z_to_rtt_wr[rtt_wr])
+    mr3 = format_mr3(timing_settings.fine_refresh_mode)
+    mr4 = 0
+    mr5 = 0
+    mr6 = format_mr6(4) # FIXME: tCCD
+
+    rdimm_init = []
+    if phy_settings.is_rdimm:
+        def get_coarse_speed(tck, pll_bypass):
+            # JESD82-31A page 78
+            f_to_coarse_speed = {
+                1600e6: 0,
+                1866e6: 1,
+                2133e6: 2,
+                2400e6: 3,
+                2666e6: 4,
+                2933e6: 5,
+                3200e6: 6,
+            }
+            if pll_bypass:
+                return 7
+            else:
+                for f, speed in f_to_coarse_speed.items():
+                        if tck >= 2/f:
+                            return speed
+                raise ValueError
+        def get_fine_speed(tck):
+            # JESD82-31A page 83
+            freq = 2/tck
+            fine_speed = (freq - 1240e6) // 20e6
+            fine_speed = max(fine_speed, 0)
+            fine_speed = min(fine_speed, 0b1100001)
+            return fine_speed
+
+        coarse_speed = get_coarse_speed(phy_settings.tck, phy_settings.rcd_pll_bypass)
+        fine_speed = get_fine_speed(phy_settings.tck)
+
+        rcd_reset = 0x060 | 0x0                          # F0RC06: command space control; 0: reset RCD
+
+        f0rc0f = 0x0F0 | 0x4                             # F0RC05: 0 nCK latency adder
+
+        f0rc03 = 0x030 | phy_settings.rcd_ca_cs_drive    # F0RC03: CA/CS drive strength
+        f0rc04 = 0x040 | phy_settings.rcd_odt_cke_drive  # F0RC04: ODT/CKE drive strength
+        f0rc05 = 0x050 | phy_settings.rcd_clk_drive      # F0RC04: ODT/CKE drive strength
+
+        f0rc0a = 0x0A0 | coarse_speed                    # F0RC0A: coarse speed selection and PLL bypass
+        f0rc3x = 0x300 | fine_speed                      # F0RC3x: fine speed selection
+
+        rdimm_init = [
+            ("Reset RCD", rcd_reset, 7, cmds["MODE_REGISTER"], 50000),
+            ("Load RCD F0RC0F", f0rc0f, 7, cmds["MODE_REGISTER"], 100),
+            ("Load RCD F0RC03", f0rc03, 7, cmds["MODE_REGISTER"], 100),
+            ("Load RCD F0RC04", f0rc04, 7, cmds["MODE_REGISTER"], 100),
+            ("Load RCD F0RC05", f0rc05, 7, cmds["MODE_REGISTER"], 100),
+            ("Load RCD F0RC0A", f0rc0a, 7, cmds["MODE_REGISTER"], 100),
+            ("Load RCD F0RC3X", f0rc3x, 7, cmds["MODE_REGISTER"], 100),
+        ]
+
+    init_sequence = [
+        ("Release reset", 0x0000, 0, cmds["UNRESET"], 50000),
+        ("Bring CKE high", 0x0000, 0, cmds["CKE"], 10000),
+    ] + rdimm_init + [
+        ("Load Mode Register 3", mr3, 3, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register 6", mr6, 6, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register 5", mr5, 5, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register 4", mr4, 4, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register 2, CWL={0:d}".format(cwl), mr2, 2, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register 1", mr1, 1, cmds["MODE_REGISTER"], 0),
+        ("Load Mode Register 0, CL={0:d}, BL={1:d}".format(cl, bl), mr0, 0, cmds["MODE_REGISTER"], 200),
+        ("ZQ Calibration", 0x0400, 0, "DFII_COMMAND_WE|DFII_COMMAND_CS", 200),
+    ]
+
+    return init_sequence, mr1
+
+# Init Sequence ------------------------------------------------------------------------------------
+
+def get_sdram_phy_init_sequence(phy_settings, timing_settings):
+    return {
+        "SDR"  : get_sdr_phy_init_sequence,
+        "DDR"  : get_ddr_phy_init_sequence,
+        "LPDDR": get_lpddr_phy_init_sequence,
+        "DDR2" : get_ddr2_phy_init_sequence,
+        "DDR3" : get_ddr3_phy_init_sequence,
+        "DDR4" : get_ddr4_phy_init_sequence,
+    }[phy_settings.memtype](phy_settings, timing_settings)
+
+# C Header -----------------------------------------------------------------------------------------
+
+def get_sdram_phy_c_header(phy_settings, timing_settings):
+    r = "#ifndef __GENERATED_SDRAM_PHY_H\n#define __GENERATED_SDRAM_PHY_H\n"
+    r += "#include <hw/common.h>\n"
+    r += "#include <generated/csr.h>\n"
+    r += "\n"
+
+    r += "#define DFII_CONTROL_SEL        0x01\n"
+    r += "#define DFII_CONTROL_CKE        0x02\n"
+    r += "#define DFII_CONTROL_ODT        0x04\n"
+    r += "#define DFII_CONTROL_RESET_N    0x08\n"
+    r += "\n"
+
+    r += "#define DFII_COMMAND_CS         0x01\n"
+    r += "#define DFII_COMMAND_WE         0x02\n"
+    r += "#define DFII_COMMAND_CAS        0x04\n"
+    r += "#define DFII_COMMAND_RAS        0x08\n"
+    r += "#define DFII_COMMAND_WRDATA     0x10\n"
+    r += "#define DFII_COMMAND_RDDATA     0x20\n"
+    r += "\n"
+
+    phytype = phy_settings.phytype.upper()
+    nphases = phy_settings.nphases
+
+    # Define PHY type and number of phases
+    r += "#define SDRAM_PHY_"+phytype+"\n"
+    r += "#define SDRAM_PHY_PHASES "+str(nphases)+"\n"
+
+    # Define Read/Write Leveling capability
+    if phytype in ["USDDRPHY", "USPDDRPHY", "K7DDRPHY", "V7DDRPHY"]:
+        r += "#define SDRAM_PHY_WRITE_LEVELING_CAPABLE\n"
+    if phytype in ["USDDRPHY", "USPDDRPHY"]:
+        r += "#define SDRAM_PHY_WRITE_LEVELING_REINIT\n"
+    if phytype in ["USDDRPHY", "USPDDRPHY", "A7DDRPHY", "K7DDRPHY", "V7DDRPHY", "ECP5DDRPHY"]:
+        r += "#define SDRAM_PHY_READ_LEVELING_CAPABLE\n"
+
+    # Define number of modules/delays/bitslips
+    if phytype in ["USDDRPHY", "USPDDRPHY"]:
+        r += "#define SDRAM_PHY_MODULES DFII_PIX_DATA_BYTES/2\n"
+        r += "#define SDRAM_PHY_DELAYS 512\n"
+        r += "#define SDRAM_PHY_BITSLIPS 16\n"
+    elif phytype in ["A7DDRPHY", "K7DDRPHY", "V7DDRPHY"]:
+        r += "#define SDRAM_PHY_MODULES DFII_PIX_DATA_BYTES/2\n"
+        r += "#define SDRAM_PHY_DELAYS 32\n"
+        r += "#define SDRAM_PHY_BITSLIPS 16\n"
+    elif phytype in ["ECP5DDRPHY"]:
+        r += "#define SDRAM_PHY_MODULES DFII_PIX_DATA_BYTES/4\n"
+        r += "#define SDRAM_PHY_DELAYS 8\n"
+        r += "#define SDRAM_PHY_BITSLIPS 4\n"
+
+    if phy_settings.is_rdimm:
+        assert phy_settings.memtype == "DDR4"
+        r += "#define SDRAM_PHY_DDR4_RDIMM\n"
+
+    r += "\n"
+
+    r += "static void cdelay(int i);\n"
+
+    # commands_px functions
+    for n in range(nphases):
+        r += """
+__attribute__((unused)) static void command_p{n}(int cmd)
+{{
+    sdram_dfii_pi{n}_command_write(cmd);
+    sdram_dfii_pi{n}_command_issue_write(1);
+}}""".format(n=str(n))
+    r += "\n\n"
+
+    # rd/wr access macros
+    r += """
+#define sdram_dfii_pird_address_write(X) sdram_dfii_pi{rdphase}_address_write(X)
+#define sdram_dfii_piwr_address_write(X) sdram_dfii_pi{wrphase}_address_write(X)
+#define sdram_dfii_pird_baddress_write(X) sdram_dfii_pi{rdphase}_baddress_write(X)
+#define sdram_dfii_piwr_baddress_write(X) sdram_dfii_pi{wrphase}_baddress_write(X)
+#define command_prd(X) command_p{rdphase}(X)
+#define command_pwr(X) command_p{wrphase}(X)
+""".format(rdphase=str(phy_settings.rdphase), wrphase=str(phy_settings.wrphase))
+    r += "\n"
+
+    #
+    # sdrrd/sdrwr functions utilities
+    #
+    r += "#define DFII_PIX_DATA_SIZE CSR_SDRAM_DFII_PI0_WRDATA_SIZE\n"
+    sdram_dfii_pix_wrdata_addr = []
+    for n in range(nphases):
+        sdram_dfii_pix_wrdata_addr.append("CSR_SDRAM_DFII_PI{n}_WRDATA_ADDR".format(n=n))
+    r += """
+const unsigned long sdram_dfii_pix_wrdata_addr[SDRAM_PHY_PHASES] = {{
+\t{sdram_dfii_pix_wrdata_addr}
+}};
+""".format(sdram_dfii_pix_wrdata_addr=",\n\t".join(sdram_dfii_pix_wrdata_addr))
+
+    sdram_dfii_pix_rddata_addr = []
+    for n in range(nphases):
+        sdram_dfii_pix_rddata_addr.append("CSR_SDRAM_DFII_PI{n}_RDDATA_ADDR".format(n=n))
+    r += """
+const unsigned long sdram_dfii_pix_rddata_addr[SDRAM_PHY_PHASES] = {{
+\t{sdram_dfii_pix_rddata_addr}
+}};
+""".format(sdram_dfii_pix_rddata_addr=",\n\t".join(sdram_dfii_pix_rddata_addr))
+    r += "\n"
+
+    init_sequence, mr1 = get_sdram_phy_init_sequence(phy_settings, timing_settings)
+
+    if phy_settings.memtype in ["DDR3", "DDR4"]:
+        # the value of MR1 needs to be modified during write leveling
+        r += "#define DDRX_MR1 {}\n\n".format(mr1)
+
+    r += "static void init_sequence(void)\n{\n"
+    for comment, a, ba, cmd, delay in init_sequence:
+        invert_masks = [(0, 0), ]
+        if phy_settings.is_rdimm:
+            assert phy_settings.memtype == "DDR4"
+            # JESD82-31A page 38
+            #
+            # B-side chips have certain usually-inconsequential address and BA
+            # bits inverted by the RCD to reduce SSO current. For mode register
+            # writes, however, we must compensate for this. BG[1] also directs
+            # writes either to the A side (BG[1]=0) or B side (BG[1]=1)
+            #
+            # The 'ba != 7' is because we don't do this to writes to the RCD
+            # itself.
+            if ba != 7:
+                invert_masks.append((0b10101111111000, 0b1111))
+
+        for a_inv, ba_inv in invert_masks:
+            r += "\t/* {0} */\n".format(comment)
+            r += "\tsdram_dfii_pi0_address_write({0:#x});\n".format(a ^ a_inv)
+            r += "\tsdram_dfii_pi0_baddress_write({0:d});\n".format(ba ^ ba_inv)
+            if cmd[:12] == "DFII_CONTROL":
+                r += "\tsdram_dfii_control_write({0});\n".format(cmd)
+            else:
+                r += "\tcommand_p0({0});\n".format(cmd)
+            if delay:
+                r += "\tcdelay({0:d});\n".format(delay)
+            r += "\n"
+    r += "}\n"
+
+    r += "#endif\n"
+
+    return r
+
+# Python Header ------------------------------------------------------------------------------------
+
+def get_sdram_phy_py_header(phy_settings, timing_settings):
+    r = ""
+    r += "dfii_control_sel     = 0x01\n"
+    r += "dfii_control_cke     = 0x02\n"
+    r += "dfii_control_odt     = 0x04\n"
+    r += "dfii_control_reset_n = 0x08\n"
+    r += "\n"
+    r += "dfii_command_cs     = 0x01\n"
+    r += "dfii_command_we     = 0x02\n"
+    r += "dfii_command_cas    = 0x04\n"
+    r += "dfii_command_ras    = 0x08\n"
+    r += "dfii_command_wrdata = 0x10\n"
+    r += "dfii_command_rddata = 0x20\n"
+    r += "\n"
+
+    init_sequence, mr1 = get_sdram_phy_init_sequence(phy_settings, timing_settings)
+
+    if mr1 is not None:
+        r += "ddrx_mr1 = 0x{:x}\n".format(mr1)
+        r += "\n"
+
+    r += "init_sequence = [\n"
+    for comment, a, ba, cmd, delay in init_sequence:
+        r += "    "
+        r += "(\"" + comment + "\", "
+        r += str(a) + ", "
+        r += str(ba) + ", "
+        r += cmd.lower() + ", "
+        r += str(delay) + "),"
+        r += "\n"
+    r += "]\n"
+    return r
diff --git a/gram/modules.py b/gram/modules.py
new file mode 100644 (file)
index 0000000..fa909f4
--- /dev/null
@@ -0,0 +1,816 @@
+# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+# This file is Copyright (c) 2015-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2018 John Sully <john@csquare.ca>
+# This file is Copyright (c) 2019 Ambroz Bizjak <abizjak.pro@gmail.com>
+# This file is Copyright (c) 2019 Antony Pavlov <antonynpavlov@gmail.com>
+# This file is Copyright (c) 2018 bunnie <bunnie@kosagi.com>
+# This file is Copyright (c) 2018 David Shah <dave@ds0.me>
+# This file is Copyright (c) 2019 Steve Haynal - VSD Engineering
+# This file is Copyright (c) 2018 Tim 'mithro' Ansell <me@mith.ro>
+# This file is Copyright (c) 2018 Daniel Kucera <daniel.kucera@gmail.com>
+# This file is Copyright (c) 2018 MikoÅ‚aj SowiÅ„ski <mikolaj.sowinski@gmail.com>
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+from math import ceil
+from collections import namedtuple
+
+from nmigen import *
+
+from gram.common import Settings, GeomSettings, TimingSettings
+
+# Timings ------------------------------------------------------------------------------------------
+
+_technology_timings = ["tREFI", "tWTR", "tCCD", "tRRD", "tZQCS"]
+
+class _TechnologyTimings(Settings):
+    def __init__(self, tREFI, tWTR, tCCD, tRRD, tZQCS=None):
+        self.set_attributes(locals())
+
+
+_speedgrade_timings = ["tRP", "tRCD", "tWR", "tRFC", "tFAW", "tRAS"]
+
+class _SpeedgradeTimings(Settings):
+    def __init__(self, tRP, tRCD, tWR, tRFC, tFAW, tRAS):
+        self.set_attributes(locals())
+
+# SPD ----------------------------------------------------------------------------------------------
+
+def _read_field(byte, nbits, shift):
+    mask = 2**nbits - 1
+    return (byte & (mask << shift)) >> shift
+
+def _twos_complement(value, nbits):
+    if value & (1 << (nbits - 1)):
+        value -= (1 << nbits)
+    return value
+
+def _word(msb, lsb):
+    return (msb << 8) | lsb
+
+
+class DDR3SPDData:
+    memtype = "DDR3"
+
+    def __init__(self, spd_data):
+        # Geometry ---------------------------------------------------------------------------------
+        bankbits = {
+            0b000: 3,
+            0b001: 4,
+            0b010: 5,
+            0b011: 6,
+        }[_read_field(spd_data[4], nbits=3, shift=4)]
+        rowbits = {
+            0b000: 12,
+            0b001: 13,
+            0b010: 14,
+            0b011: 15,
+            0b100: 16,
+        }[_read_field(spd_data[5], nbits=3, shift=3)]
+        colbits = {
+            0b000:  9,
+            0b001: 10,
+            0b010: 11,
+            0b011: 12,
+        }[_read_field(spd_data[5], nbits=3, shift=0)]
+
+        self.nbanks = 2**bankbits
+        self.nrows = 2**rowbits
+        self.ncols = 2**colbits
+
+        # Timings ----------------------------------------------------------------------------------
+        self.init_timebase(spd_data)
+
+        # most signifficant (upper) / least signifficant (lower) nibble
+        def msn(byte):
+            return _read_field(byte, nbits=4, shift=4)
+
+        def lsn(byte):
+            return _read_field(byte, nbits=4, shift=0)
+
+        b = spd_data
+        tck_min  = self.txx_ns(mtb=b[12], ftb=b[34])
+        taa_min  = self.txx_ns(mtb=b[16], ftb=b[35])
+        twr_min  = self.txx_ns(mtb=b[17])
+        trcd_min = self.txx_ns(mtb=b[18], ftb=b[36])
+        trrd_min = self.txx_ns(mtb=b[19])
+        trp_min  = self.txx_ns(mtb=b[20], ftb=b[37])
+        tras_min = self.txx_ns(mtb=_word(lsn(b[21]), b[22]))
+        trc_min  = self.txx_ns(mtb=_word(msn(b[21]), b[23]), ftb=b[38])
+        trfc_min = self.txx_ns(mtb=_word(b[25], b[24]))
+        twtr_min = self.txx_ns(mtb=b[26])
+        trtp_min = self.txx_ns(mtb=b[27])
+        tfaw_min = self.txx_ns(mtb=_word(lsn(b[28]), b[29]))
+
+        technology_timings = _TechnologyTimings(
+            tREFI = 64e6/8192,      # 64ms/8192ops
+            tWTR  = (4, twtr_min),  # min 4 cycles
+            tCCD  = (4, None),      # min 4 cycles
+            tRRD  = (4, trrd_min),  # min 4 cycles
+            tZQCS = (64, 80),
+        )
+        speedgrade_timings = _SpeedgradeTimings(
+            tRP  = trp_min,
+            tRCD = trcd_min,
+            tWR  = twr_min,
+            tRFC = (None, trfc_min),
+            tFAW = (None, tfaw_min),
+            tRAS = tras_min,
+        )
+
+        self.speedgrade = str(self.speedgrade_freq(tck_min))
+        self.technology_timings = technology_timings
+        self.speedgrade_timings = {
+            self.speedgrade: speedgrade_timings,
+            "default": speedgrade_timings,
+        }
+
+    def init_timebase(self, data):
+        # All the DDR3 timings are defined in the units of "timebase", which
+        # consists of medium timebase (nanosec) and fine timebase (picosec).
+        fine_timebase_dividend = _read_field(data[9], nbits=4, shift=4)
+        fine_timebase_divisor  = _read_field(data[9], nbits=4, shift=0)
+        fine_timebase_ps = fine_timebase_dividend / fine_timebase_divisor
+        self.fine_timebase_ns = fine_timebase_ps * 1e-3
+        medium_timebase_dividend = data[10]
+        medium_timebase_divisor  = data[11]
+        self.medium_timebase_ns = medium_timebase_dividend / medium_timebase_divisor
+
+    def txx_ns(self, mtb, ftb=0):
+        """Get tXX in nanoseconds from medium and (optional) fine timebase."""
+        # decode FTB encoded in 8-bit two's complement
+        ftb = _twos_complement(ftb, 8)
+        return mtb * self.medium_timebase_ns + ftb * self.fine_timebase_ns
+
+    @staticmethod
+    def speedgrade_freq(tck_ns):
+        # Calculate rounded speedgrade frequency from tck_min
+        freq_mhz = (1 / (tck_ns * 1e-9)) / 1e6
+        freq_mhz *= 2  # clock rate -> transfer rate (DDR)
+        speedgrades = [800, 1066, 1333, 1600, 1866, 2133]
+        for f in speedgrades:
+            # Due to limited tck accuracy of 1ps, calculations may yield higher
+            # frequency than in reality (e.g. for DDR3-1866: tck=1.071 ns ->
+            # -> f=1867.4 MHz, while real is f=1866.6(6) MHz).
+            max_error = 2
+            if abs(freq_mhz - f) < max_error:
+                return f
+        raise ValueError("Transfer rate = {:.2f} does not correspond to any DDR3 speedgrade"
+                         .format(freq_mhz))
+
+def parse_spd_hexdump(filename):
+    """Parse data dumped using the `spdread` command in LiteX BIOS
+
+    This will read files in format:
+        Memory dump:
+        0x00000000  00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e 0f  ................
+        0x00000010  10 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f  ................
+    """
+    data = []
+    last_addr = -1
+    with open(filename) as f:
+        for line in f:
+            if line.startswith("0x"):
+                tokens = line.strip().split()
+                addr = int(tokens[0], 16)
+                assert addr > last_addr
+                values = [int(v, 16) for v in tokens[1:17]]
+                data.extend(values)
+                last_addr = addr
+    return data
+
+# SDRAMModule --------------------------------------------------------------------------------------
+
+class SDRAMModule:
+    """SDRAM module geometry and timings.
+
+    SDRAM controller has to ensure that all geometry and
+    timings parameters are fulfilled. Timings parameters
+    can be expressed in ns, in SDRAM clock cycles or both
+    and controller needs to use the greater value.
+
+    SDRAM modules with the same geometry exist can have
+    various speedgrades.
+    """
+    registered = False
+    def __init__(self, clk_freq, rate, speedgrade=None, fine_refresh_mode=None):
+        self.clk_freq      = clk_freq
+        self.rate          = rate
+        self.speedgrade    = speedgrade
+        self.geom_settings = GeomSettings(
+            bankbits = log2_int(self.nbanks),
+            rowbits  = log2_int(self.nrows),
+            colbits  = log2_int(self.ncols),
+        )
+        assert not (self.memtype != "DDR4" and fine_refresh_mode != None)
+        assert fine_refresh_mode in [None, "1x", "2x", "4x"]
+        if (fine_refresh_mode is None) and (self.memtype == "DDR4"):
+            fine_refresh_mode = "1x"
+        self.timing_settings = TimingSettings(
+            tRP   = self.ns_to_cycles(self.get("tRP")),
+            tRCD  = self.ns_to_cycles(self.get("tRCD")),
+            tWR   = self.ns_to_cycles(self.get("tWR")),
+            tREFI = self.ns_to_cycles(self.get("tREFI", fine_refresh_mode), False),
+            tRFC  = self.ck_ns_to_cycles(*self.get("tRFC", fine_refresh_mode)),
+            tWTR  = self.ck_ns_to_cycles(*self.get("tWTR")),
+            tFAW  = None if self.get("tFAW") is None else self.ck_ns_to_cycles(*self.get("tFAW")),
+            tCCD  = None if self.get("tCCD") is None else self.ck_ns_to_cycles(*self.get("tCCD")),
+            tRRD  = None if self.get("tRRD") is None else self.ck_ns_to_cycles(*self.get("tRRD")),
+            tRC   = None  if self.get("tRAS") is None else self.ns_to_cycles(self.get("tRP") + self.get("tRAS")),
+            tRAS  = None if self.get("tRAS") is None else self.ns_to_cycles(self.get("tRAS")),
+            tZQCS = None if self.get("tZQCS") is None else self.ck_ns_to_cycles(*self.get("tZQCS"))
+        )
+        self.timing_settings.fine_refresh_mode = fine_refresh_mode
+
+    def get(self, name, key=None):
+        r = None
+        if name in _speedgrade_timings:
+            if hasattr(self, "speedgrade_timings"):
+                speedgrade = "default" if self.speedgrade is None else self.speedgrade
+                r = getattr(self.speedgrade_timings[speedgrade], name)
+            else:
+                name = name + "_" + self.speedgrade if self.speedgrade is not None else name
+                try:
+                    r = getattr(self, name)
+                except:
+                    pass
+        else:
+            if hasattr(self, "technology_timings"):
+                r = getattr(self.technology_timings, name)
+            else:
+                try:
+                    r = getattr(self, name)
+                except:
+                    pass
+        if (r is not None) and (key is not None):
+            r = r[key]
+        return r
+
+    def ns_to_cycles(self, t, margin=True):
+        clk_period_ns = 1e9/self.clk_freq
+        if margin:
+            margins = {
+                "1:1" : 0,
+                "1:2" : clk_period_ns/2,
+                "1:4" : 3*clk_period_ns/4
+            }
+            t += margins[self.rate]
+        return ceil(t/clk_period_ns)
+
+    def ck_to_cycles(self, c):
+        d = {
+            "1:1" : 1,
+            "1:2" : 2,
+            "1:4" : 4
+        }
+        return ceil(c/d[self.rate])
+
+    def ck_ns_to_cycles(self, c, t):
+        c = 0 if c is None else c
+        t = 0 if t is None else t
+        return max(self.ck_to_cycles(c), self.ns_to_cycles(t))
+
+    @classmethod
+    def from_spd_data(cls, spd_data, clk_freq, fine_refresh_mode=None):
+        # set parameters from SPD data based on memory type
+        spd_cls = {
+            0x0b: DDR3SPDData,
+        }[spd_data[2]]
+        spd = spd_cls(spd_data)
+
+        # Create a deriving class to avoid modifying this one
+        class _SDRAMModule(cls):
+            memtype = spd.memtype
+            nbanks = spd.nbanks
+            nrows = spd.nrows
+            ncols = spd.ncols
+            technology_timings = spd.technology_timings
+            speedgrade_timings = spd.speedgrade_timings
+
+        nphases = {
+            "SDR":   1,
+            "DDR":   2,
+            "LPDDR": 2,
+            "DDR2":  2,
+            "DDR3":  4,
+            "DDR4":  4,
+        }[spd.memtype]
+        rate = "1:{}".format(nphases)
+
+        return _SDRAMModule(clk_freq,
+            rate              = rate,
+            speedgrade        = spd.speedgrade,
+            fine_refresh_mode = fine_refresh_mode)
+
+class SDRAMRegisteredModule(SDRAMModule): registered = True
+
+# SDR ----------------------------------------------------------------------------------------------
+
+class SDRModule(SDRAMModule):                     memtype = "SDR"
+class SDRRegisteredModule(SDRAMRegisteredModule): memtype = "SDR"
+
+class IS42S16160(SDRModule):
+    # geometry
+    nbanks = 4
+    nrows  = 8192
+    ncols  = 512
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=20, tRCD=20, tWR=20, tRFC=(None, 70), tFAW=None, tRAS=None)}
+
+class IS42S16320(SDRModule):
+    # geometry
+    nbanks = 4
+    nrows  = 8192
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=20, tRCD=20, tWR=20, tRFC=(None, 70), tFAW=None, tRAS=None)}
+
+class MT48LC4M16(SDRModule):
+    # geometry
+    nbanks = 4
+    nrows  = 4096
+    ncols  = 256
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=14, tRFC=(None, 66), tFAW=None, tRAS=None)}
+
+class MT48LC16M16(SDRModule):
+    # geometry
+    nbanks = 4
+    nrows  = 8192
+    ncols  = 512
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 15))
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=20, tRCD=20, tWR=15, tRFC=(None, 66), tFAW=None, tRAS=44)}
+
+class AS4C16M16(SDRModule):
+    # geometry
+    nbanks = 4
+    nrows  = 8192
+    ncols  = 512
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=18, tRCD=18, tWR=12, tRFC=(None, 60), tFAW=None, tRAS=None)}
+
+class AS4C32M16(SDRModule):
+    # geometry
+    nbanks = 4
+    nrows  = 8192
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=18, tRCD=18, tWR=12, tRFC=(None, 60), tFAW=None, tRAS=None)}
+
+class AS4C32M8(SDRModule):
+    # geometry
+    nbanks = 4
+    nrows  = 8192
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 15))
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=20, tRCD=20, tWR=15, tRFC=(None, 66), tFAW=None, tRAS=44)}
+
+class M12L64322A(SDRModule):
+    # geometry
+    nbanks = 4
+    nrows  = 2048
+    ncols  = 256
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/4096, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 10))
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 55), tFAW=None, tRAS=40)}
+
+class M12L16161A(SDRModule):
+    # geometry
+    nbanks = 2
+    nrows  = 2048
+    ncols  = 256
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/4096, tWTR=(2, None), tCCD=(1, None), tRRD=(None, 10))
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 55), tFAW=None, tRAS=40)}
+
+# DDR ----------------------------------------------------------------------------------------------
+
+class DDRModule(SDRAMModule):                     memtype = "DDR"
+class DDRRegisteredModule(SDRAMRegisteredModule): memtype = "DDR"
+
+class MT46V32M16(SDRAMModule):
+    memtype = "DDR"
+    # geometry
+    nbanks = 4
+    nrows  = 8192
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 70), tFAW=None, tRAS=None)}
+
+# LPDDR --------------------------------------------------------------------------------------------
+
+class LPDDRModule(SDRAMModule):                     memtype = "LPDDR"
+class LPDDRRegisteredModule(SDRAMRegisteredModule): memtype = "LPDDR"
+
+class MT46H32M16(LPDDRModule):
+    # geometry
+    nbanks = 4
+    nrows  = 8192
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 72), tFAW=None, tRAS=None)}
+
+class MT46H32M32(LPDDRModule):
+    # geometry
+    nbanks = 4
+    nrows  = 8192
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(2, None), tCCD=(1, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 72), tFAW=None, tRAS=None)}
+
+# DDR2 ---------------------------------------------------------------------------------------------
+
+class DDR2Module(SDRAMModule):                     memtype = "DDR2"
+class DDR2RegisteredModule(SDRAMRegisteredModule): memtype = "DDR2"
+
+class MT47H128M8(DDR2Module):
+    memtype = "DDR2"
+    # geometry
+    nbanks = 8
+    nrows  = 16384
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+
+class MT47H32M16(DDR2Module):
+    memtype = "DDR2"
+    # geometry
+    nbanks = 4
+    nrows  = 8192
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+
+class MT47H64M16(DDR2Module):
+    memtype = "DDR2"
+    # geometry
+    nbanks = 8
+    nrows  = 8192
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+
+class P3R1GE4JGF(DDR2Module):
+    memtype = "DDR2"
+    # geometry
+    nbanks = 8
+    nrows  = 8192
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(None, 7.5), tCCD=(2, None), tRRD=None)
+    speedgrade_timings = {"default": _SpeedgradeTimings(tRP=12.5, tRCD=12.5, tWR=15, tRFC=(None, 127.5), tFAW=None, tRAS=None)}
+
+# DDR3 (Chips) -------------------------------------------------------------------------------------
+
+class DDR3Module(SDRAMModule):                     memtype = "DDR3"
+class DDR3RegisteredModule(SDRAMRegisteredModule): memtype = "DDR3"
+
+class MT41K64M16(DDR3Module):
+    memtype = "DDR3"
+    # geometry
+    nbanks = 8
+    nrows  = 8192
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "800":  _SpeedgradeTimings(tRP=13.1,  tRCD=13.1,  tWR=13.1,  tRFC=(64,  None), tFAW=(None, 50), tRAS=37.5),
+        "1066": _SpeedgradeTimings(tRP=13.1,  tRCD=13.1,  tWR=13.1,  tRFC=(86,  None), tFAW=(None, 50), tRAS=37.5),
+        "1333": _SpeedgradeTimings(tRP=13.5,  tRCD=13.5,  tWR=13.5,  tRFC=(107, None), tFAW=(None, 45), tRAS=36),
+        "1600": _SpeedgradeTimings(tRP=13.75, tRCD=13.75, tWR=13.75, tRFC=(128, None), tFAW=(None, 40), tRAS=35),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1600"]
+
+class MT41J128M16(DDR3Module):
+    memtype = "DDR3"
+    # geometry
+    nbanks = 8
+    nrows  = 16384
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "800":  _SpeedgradeTimings(tRP=13.1,  tRCD=13.1,  tWR=13.1,  tRFC=(64, None),  tFAW=(None, 50), tRAS=37.5),
+        "1066": _SpeedgradeTimings(tRP=13.1,  tRCD=13.1,  tWR=13.1,  tRFC=(86, None),  tFAW=(None, 50), tRAS=37.5),
+        "1333": _SpeedgradeTimings(tRP=13.5,  tRCD=13.5,  tWR=13.5,  tRFC=(107, None), tFAW=(None, 45), tRAS=36),
+        "1600": _SpeedgradeTimings(tRP=13.75, tRCD=13.75, tWR=13.75, tRFC=(128, None), tFAW=(None, 40), tRAS=35),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1600"]
+
+class MT41K128M16(MT41J128M16): pass
+
+class MT41J256M16(DDR3Module):
+    # geometry
+    nbanks = 8
+    nrows  = 32768
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "800":  _SpeedgradeTimings(tRP=13.1,  tRCD=13.1,  tWR=13.1,  tRFC=(139, None), tFAW=(None, 50), tRAS=37.5),
+        "1066": _SpeedgradeTimings(tRP=13.1,  tRCD=13.1,  tWR=13.1,  tRFC=(138, None), tFAW=(None, 50), tRAS=37.5),
+        "1333": _SpeedgradeTimings(tRP=13.5,  tRCD=13.5,  tWR=13.5,  tRFC=(174, None), tFAW=(None, 45), tRAS=36),
+        "1600": _SpeedgradeTimings(tRP=13.75, tRCD=13.75, tWR=13.75, tRFC=(208, None), tFAW=(None, 40), tRAS=35),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1600"]
+
+class MT41K256M16(MT41J256M16): pass
+
+class MT41J512M16(DDR3Module):
+    # geometry
+    nbanks = 8
+    nrows  = 65536
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "1600": _SpeedgradeTimings(tRP=13.75, tRCD=13.75, tWR=13.75, tRFC=(280, None), tFAW=(None, 40), tRAS=39),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1600"]
+
+class MT41K512M16(MT41J512M16): pass
+
+class K4B1G0446F(DDR3Module):
+    # geometry
+    nbanks = 8
+    nrows  = 16384
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "800":  _SpeedgradeTimings(tRP=15,     tRCD=15,     tWR=15, tRFC=(120, None), tFAW=(None, 50), tRAS=37.5),
+        "1066": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(160, None), tFAW=(None, 50), tRAS=37.5),
+        "1333": _SpeedgradeTimings(tRP=13.5,   tRCD=13.5,   tWR=15, tRFC=(200, None), tFAW=(None, 45), tRAS=36),
+        "1600": _SpeedgradeTimings(tRP=13.75,  tRCD=13.75,  tWR=15, tRFC=(240, None), tFAW=(None, 40), tRAS=35),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1600"]
+
+class K4B2G1646F(DDR3Module):
+    # geometry
+    nbanks = 8
+    nrows  = 16384
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 10), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "800":  _SpeedgradeTimings(tRP=15,     tRCD=15,     tWR=15, tRFC=(104, None), tFAW=(None, 50), tRAS=37.5),
+        "1066": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(139, None), tFAW=(None, 50), tRAS=37.5),
+        "1333": _SpeedgradeTimings(tRP=13.5,   tRCD=13.5,   tWR=15, tRFC=(174, None), tFAW=(None, 45), tRAS=36),
+        "1600": _SpeedgradeTimings(tRP=13.75,  tRCD=13.75,  tWR=15, tRFC=(208, None), tFAW=(None, 40), tRAS=35),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1600"]
+
+class H5TC4G63CFR(DDR3Module):
+    # geometry
+    nbanks = 8
+    nrows  = 16384
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 7.5), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "800":  _SpeedgradeTimings(tRP=15, tRCD=15, tWR=15, tRFC=(260, None), tFAW=(None, 40), tRAS=37.5),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["800"]
+
+class IS43TR16128B(DDR3Module):
+    # geometry
+    nbanks = 8
+    nrows  = 16384
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "1600": _SpeedgradeTimings(tRP=13.75, tRCD=13.75, tWR=15, tRFC=(None, 160), tFAW=(None, 40), tRAS=35),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1600"]
+
+
+# DDR3 (SO-DIMM) -----------------------------------------------------------------------------------
+
+class MT8JTF12864(DDR3Module):
+    # base chip: MT41J128M8
+    # geometry
+    nbanks = 8
+    nrows  = 16384
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "1066": _SpeedgradeTimings(tRP=15,     tRCD=15,     tWR=15, tRFC=(None, 110), tFAW=(None, 37.5), tRAS=37.5),
+        "1333": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 110), tFAW=(None, 30),   tRAS=36),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1333"]
+
+class MT8KTF51264(DDR3Module):
+    # base chip: MT41K512M8
+    # geometry
+    nbanks = 8
+    nrows  = 65536
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "800" : _SpeedgradeTimings(tRP=15,     tRCD=15,     tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
+        "1066": _SpeedgradeTimings(tRP=15,     tRCD=15,     tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
+        "1333": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=36),
+        "1600": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=35),
+        "1866": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 27), tRAS=34),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1866"]
+
+class MT18KSF1G72HZ(DDR3Module):
+    # base chip: MT41K512M8
+    # geometry
+    nbanks = 8
+    nrows  = 65536
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "1066": _SpeedgradeTimings(tRP=15,     tRCD=15,     tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
+        "1333": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=36),
+        "1600": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=35),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1600"]
+
+class AS4C256M16D3A(DDR3Module):
+    # geometry
+    nbanks = 8
+    nrows  = 32768
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 7.5), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "1600": _SpeedgradeTimings(tRP=13.75, tRCD=13.75, tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=35),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1600"]
+
+class MT16KTF1G64HZ(DDR3Module):
+    # base chip: MT41K512M8
+    # geometry
+    nbanks = 8
+    nrows  = 65536
+    ncols  = 1024
+    # timings
+    technology_timings = _TechnologyTimings(tREFI=64e6/8192, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6), tZQCS=(64, 80))
+    speedgrade_timings = {
+        "800" : _SpeedgradeTimings(tRP=15,     tRCD=15,     tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
+        "1066": _SpeedgradeTimings(tRP=15,     tRCD=15,     tWR=15, tRFC=(None, 260), tFAW=(None, 40), tRAS=37.5),
+        "1333": _SpeedgradeTimings(tRP=15,     tRCD=15,     tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=36),
+        "1600": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 30), tRAS=35),
+        "1866": _SpeedgradeTimings(tRP=13.125, tRCD=13.125, tWR=15, tRFC=(None, 260), tFAW=(None, 27), tRAS=34),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["1866"]
+
+
+# DDR4 (Chips) -------------------------------------------------------------------------------------
+
+class DDR4Module(SDRAMModule):                     memtype = "DDR4"
+class DDR4RegisteredModule(SDRAMRegisteredModule): memtype = "DDR4"
+
+class EDY4016A(DDR4Module):
+    # geometry
+    ngroupbanks = 4
+    ngroups     = 2
+    nbanks      = ngroups * ngroupbanks
+    nrows       = 32768
+    ncols       = 1024
+    # timings
+    trefi = {"1x": 64e6/8192,   "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
+    trfc  = {"1x": (None, 260), "2x": (None, 160),   "4x": (None, 110)}
+    technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+    speedgrade_timings = {
+        "2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(28, 30), tRAS=32),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["2400"]
+
+class MT40A1G8(DDR4Module):
+    # geometry
+    ngroupbanks = 4
+    ngroups     = 4
+    nbanks      = ngroups * ngroupbanks
+    nrows       = 65536
+    ncols       = 1024
+    # timings
+    trefi = {"1x": 64e6/8192,   "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
+    trfc  = {"1x": (None, 350), "2x": (None, 260),   "4x": (None, 160)}
+    technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 6.4), tZQCS=(128, 80))
+    speedgrade_timings = {
+        "2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=32),
+        "2666": _SpeedgradeTimings(tRP=13.50, tRCD=13.50, tWR=15, tRFC=trfc, tFAW=(20, 21), tRAS=32),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["2400"]
+
+class MT40A256M16(DDR4Module):
+    # geometry
+    ngroupbanks = 4
+    ngroups     = 2
+    nbanks      = ngroups * ngroupbanks
+    nrows       = 32768
+    ncols       = 1024
+    # timings
+    trefi = {"1x": 64e6/8192, "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
+    trfc  = {"1x": (None, 260), "2x": (None, 160), "4x": (None, 110)}
+    technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+    speedgrade_timings = {
+        "2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(28, 35), tRAS=32),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["2400"]
+
+class MT40A512M8(DDR4Module):
+    # geometry
+    ngroupbanks = 4
+    ngroups     = 4
+    nbanks      = ngroups * ngroupbanks
+    nrows       = 32768
+    ncols       = 1024
+    # timings
+    trefi = {"1x": 64e6/8192,   "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
+    trfc  = {"1x": (None, 350), "2x": (None, 260),   "4x": (None, 160)}
+    technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+    speedgrade_timings = {
+        "2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=32),
+        "2666": _SpeedgradeTimings(tRP=13.50, tRCD=13.50, tWR=15, tRFC=trfc, tFAW=(20, 21), tRAS=32),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["2400"]
+
+class MT40A512M16(DDR4Module):
+    # geometry
+    ngroupbanks = 4
+    ngroups     = 2
+    nbanks      = ngroups * ngroupbanks
+    nrows       = 65536
+    ncols       = 1024
+    # timings
+    trefi = {"1x": 64e6/8192,   "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
+    trfc  = {"1x": (None, 350), "2x": (None, 260),   "4x": (None, 160)}
+    technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+    speedgrade_timings = {
+        "2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=32),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["2400"]
+
+# DDR4 (SO-DIMM) -----------------------------------------------------------------------------------
+
+class KVR21SE15S84(DDR4Module):
+    # geometry
+    ngroupbanks = 4
+    ngroups     = 4
+    nbanks      = ngroups * ngroupbanks
+    nrows       = 32768
+    ncols       = 1024
+    # timings
+    trefi = {"1x": 64e6/8192,   "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
+    trfc  = {"1x": (None, 350), "2x": (None, 260),   "4x": (None, 160)}
+    technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+    speedgrade_timings = {
+        "2133": _SpeedgradeTimings(tRP=13.5, tRCD=13.5, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=33),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["2133"]
+
+class MTA4ATF51264HZ(DDR4Module):
+    # geometry
+    ngroupbanks = 4
+    ngroups     = 2
+    nbanks      = ngroups * ngroupbanks
+    nrows       = 65536
+    ncols       = 1024
+    # timings
+    trefi = {"1x": 64e6/8192,   "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
+    trfc  = {"1x": (None, 350), "2x": (None, 260),   "4x": (None, 160)}
+    technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+    speedgrade_timings = {
+        "2133": _SpeedgradeTimings(tRP=13.5, tRCD=13.5, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=33),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["2133"]
+
+# DDR4 (RDIMM) -------------------------------------------------------------------------------------
+
+class MTA18ASF2G72PZ(DDR4RegisteredModule):
+    # geometry
+    ngroupbanks = 4
+    ngroups     = 4
+    nbanks      = ngroups * ngroupbanks
+    nrows       = 131072
+    ncols       = 1024
+    # timings
+    trefi = {"1x": 64e6/8192,   "2x": (64e6/8192)/2, "4x": (64e6/8192)/4}
+    trfc  = {"1x": (None, 350), "2x": (None, 260),   "4x": (None, 160)}
+    technology_timings = _TechnologyTimings(tREFI=trefi, tWTR=(4, 7.5), tCCD=(4, None), tRRD=(4, 4.9), tZQCS=(128, 80))
+    speedgrade_timings = {
+        "2400": _SpeedgradeTimings(tRP=13.32, tRCD=13.32, tWR=15, tRFC=trfc, tFAW=(20, 25), tRAS=32),
+    }
+    speedgrade_timings["default"] = speedgrade_timings["2400"]
diff --git a/gram/phy/__init__.py b/gram/phy/__init__.py
new file mode 100644 (file)
index 0000000..e541bc9
--- /dev/null
@@ -0,0 +1,11 @@
+from litedram.phy.gensdrphy import GENSDRPHY
+
+from litedram.phy.s6ddrphy import S6HalfRateDDRPHY, S6QuarterRateDDRPHY
+from litedram.phy.s7ddrphy import V7DDRPHY, K7DDRPHY, A7DDRPHY
+from litedram.phy.usddrphy import USDDRPHY, USPDDRPHY
+
+from litedram.phy.ecp5ddrphy import ECP5DDRPHY, ECP5DDRPHYInit
+
+# backward compatibility (remove when no longer needed)
+from litedram.phy import s7ddrphy as a7ddrphy
+from litedram.phy import s7ddrphy as k7ddrphy
diff --git a/gram/phy/dfi.py b/gram/phy/dfi.py
new file mode 100644 (file)
index 0000000..f78bb80
--- /dev/null
@@ -0,0 +1,111 @@
+# This file is Copyright (c) 2015 Sebastien Bourdeauducq <sb@m-labs.hk>
+#              Copyright (c) 2020 LambdaConcept <contact@lambdaconcept.com>
+
+from nmigen import *
+from nmigen.hdl.rec import *
+
+def phase_cmd_description(addressbits, bankbits, nranks):
+    return [
+        ("address", addressbits, DIR_FANOUT),
+        ("bank",       bankbits, DIR_FANOUT),
+        ("cas_n",             1, DIR_FANOUT),
+        ("cs_n",         nranks, DIR_FANOUT),
+        ("ras_n",             1, DIR_FANOUT),
+        ("we_n",              1, DIR_FANOUT),
+        ("cke",          nranks, DIR_FANOUT),
+        ("odt",          nranks, DIR_FANOUT),
+        ("reset_n",           1, DIR_FANOUT),
+        ("act_n",             1, DIR_FANOUT)
+    ]
+
+
+def phase_wrdata_description(databits):
+    return [
+        ("wrdata",         databits, DIR_FANOUT),
+        ("wrdata_en",             1, DIR_FANOUT),
+        ("wrdata_mask", databits//8, DIR_FANOUT)
+    ]
+
+
+def phase_rddata_description(databits):
+    return [
+        ("rddata_en",           1, DIR_FANOUT),
+        ("rddata",       databits, DIR_FANIN),
+        ("rddata_valid",        1, DIR_FANIN)
+    ]
+
+
+def phase_description(addressbits, bankbits, nranks, databits):
+    r = phase_cmd_description(addressbits, bankbits, nranks)
+    r += phase_wrdata_description(databits)
+    r += phase_rddata_description(databits)
+    return r
+
+
+class Interface(Record):
+    def __init__(self, addressbits, bankbits, nranks, databits, nphases=1):
+        layout = [("p"+str(i), phase_description(addressbits, bankbits, nranks, databits)) for i in range(nphases)]
+        Record.__init__(self, layout)
+        self.phases = [getattr(self, "p"+str(i)) for i in range(nphases)]
+        for p in self.phases:
+            p.cas_n.reset = 1
+            p.cs_n.reset = (2**nranks-1)
+            p.ras_n.reset = 1
+            p.we_n.reset = 1
+            p.act_n.reset = 1
+
+    # Returns pairs (DFI-mandated signal name, Migen signal object)
+    def get_standard_names(self, m2s=True, s2m=True):
+        r = []
+        add_suffix = len(self.phases) > 1
+        for n, phase in enumerate(self.phases):
+            for field, size, direction in phase.layout:
+                if (m2s and direction == DIR_FANOUT) or (s2m and direction == DIR_FANIN):
+                    if add_suffix:
+                        if direction == DIR_FANOUT:
+                            suffix = "_p" + str(n)
+                        else:
+                            suffix = "_w" + str(n)
+                    else:
+                        suffix = ""
+                    r.append(("dfi_" + field + suffix, getattr(phase, field)))
+        return r
+
+
+class Interconnect(Elaboratable):
+    def __init__(self, master, slave):
+        self._master = master
+        self._slave = slave
+
+    def elaborate(self, platform):
+        m = Module()
+        m.d.comb += self._master.connect(self._slave)
+        return m
+
+
+class DDR4DFIMux(Elaboratable):
+    def __init__(self, dfi_i, dfi_o):
+        self.dfi_i = dfi_i
+        self.dfi_o = dfi_o
+
+    def elaborate(self, platform):
+        m = Module()
+
+        dfi_i = self.dfi_i
+        dfi_o = self.dfi_o
+
+        for i in range(len(dfi_i.phases)):
+            p_i = dfi_i.phases[i]
+            p_o = dfi_o.phases[i]
+            m.d.comb += p_i.connect(p_o)
+            with m.If(~p_i.ras_n & p_i.cas_n & p_i.we_n):
+                m.d.comb += [
+                    p_o.act_n.eq(0),
+                    p_o.we_n.eq(p_i.address[14]),
+                    p_o.cas_n.eq(p_i.address[15]),
+                    p_o.ras_n.eq(p_i.address[16]),
+                ]
+            with m.Else():
+                m.d.comb += p_o.act_n.eq(1)
+
+        return m
diff --git a/gram/phy/ecp5ddrphy.py b/gram/phy/ecp5ddrphy.py
new file mode 100644 (file)
index 0000000..70f1672
--- /dev/null
@@ -0,0 +1,491 @@
+# This file is Copyright (c) 2019 David Shah <dave@ds0.me>
+# This file is Copyright (c) 2019-2020 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+# 1:2 frequency-ratio DDR3 PHY for Lattice's ECP5
+# DDR3: 800 MT/s
+
+import math
+
+# from litex.soc.interconnect.csr import *
+
+from nmigen import *
+from nmigen.lib.cdc import FFSynchronizer
+from nmigen.utils import log2_int
+
+from lambdasoc.periph import Peripheral
+
+import gram.stream as stream
+from gram.common import *
+from gram.phy.dfi import *
+from gram.timeline import Timeline
+
+# Lattice ECP5 DDR PHY Initialization --------------------------------------------------------------
+
+class ECP5DDRPHYInit(Elaboratable):
+    def __init__(self, eclk_cd):
+        self.pause = Signal()
+        self.stop  = Signal()
+        self.delay = Signal()
+        self._eclk_cd = eclk_cd
+
+    def elaborate(self, platform):
+        m = Module()
+
+        new_lock = Signal()
+        update   = Signal()
+        stop     = Signal()
+        freeze   = Signal()
+        pause    = Signal()
+        reset    = Signal()
+
+        # DDRDLLA instance -------------------------------------------------------------------------
+        _lock = Signal()
+        delay = Signal()
+        m.submodules += Instance("DDRDLLA",
+            i_CLK      = ClockSignal("sys2x"),
+            i_RST      = ResetSignal(),
+            i_UDDCNTLN = ~update,
+            i_FREEZE   = freeze,
+            o_DDRDEL   = delay,
+            o_LOCK     = _lock
+        )
+        lock   = Signal()
+        lock_d = Signal()
+        m.submodules += FFSynchronizer(_lock, lock)
+        m.d.sync += lock_d.eq(lock)
+        m.d.syn += new_lock.eq(lock & ~lock_d)
+
+        # DDRDLLA/DDQBUFM/ECLK initialization sequence ---------------------------------------------
+        t = 8 # in cycles
+        tl = Timeline([
+            (1*t,  [freeze.eq(1)]), # Freeze DDRDLLA
+            (2*t,  [stop.eq(1)]),   # Stop ECLK domain
+            (3*t,  [reset.eq(1)]),  # Reset ECLK domain
+            (4*t,  [reset.eq(0)]),  # Release ECLK domain reset
+            (5*t,  [stop.eq(0)]),   # Release ECLK domain stop
+            (6*t,  [freeze.eq(0)]), # Release DDRDLLA freeze
+            (7*t,  [pause.eq(1)]),  # Pause DQSBUFM
+            (8*t,  [update.eq(1)]), # Update DDRDLLA
+            (9*t,  [update.eq(0)]), # Release DDRDMMA update
+            (10*t, [pause.eq(0)]),  # Release DQSBUFM pause
+        ])
+        m.submodules += tl
+        # Wait DDRDLLA Lock
+        m.d.comb += tl.trigger.eq(new_lock)
+
+        # ------------------------------------------------------------------------------------------
+        m.d.comb += [
+            self.pause.eq(pause),
+            self.stop.eq(stop),
+            self.delay.eq(delay),
+            ResetSignal(self._eclk_cd).eq(reset)
+        ]
+
+        return m
+
+# Lattice ECP5 DDR PHY -----------------------------------------------------------------------------
+
+class ECP5DDRPHY(Peripheral, Elaboratable):
+    def __init__(self, pads, sys_clk_freq=100e6):
+        super().__init__() # Peripheral init
+
+        #self.pads = PHYPadsCombiner(pads)
+        self.pads = pads
+        self._sys_clk_freq = sys_clk_freq
+
+        databits = len(self.pads.dq.o)
+        print("databits = ", databits)
+        assert databits%8 == 0
+
+        # CSR
+        bank = self.csr_bank()
+        
+        self._dly_sel = bank.csr(databits//8, "rw")
+        
+        self._rdly_dq_rst = bank.csr(1, "rw")
+        self._rdly_dq_inc = bank.csr(1, "rw")
+        self._rdly_dq_bitslip_rst = bank.csr(1, "rw")
+        self._rdly_dq_bitslip = bank.csr(1, "rw")
+
+        self._burstdet_clr = bank.csr(1, "rw")
+        self._burstdet_seen = bank.csr(databits//8, "r")
+
+        self._zero_ev = self.event(mode="rise")
+
+        self._bridge  = self.bridge(data_width=32, granularity=8, alignment=2)
+        self.bus = self._bridge.bus
+        self.irq = self._bridge.irq
+
+    def elaborate(self, platform):
+        m = Module()
+
+        memtype = "DDR3"
+        tck = 2/(2*2*self._sys_clk_freq)
+        addressbits = len(self.pads.a.o)
+        bankbits = len(self.pads.ba.o)
+        nranks = 1 if not hasattr(self.pads, "cs_n") else len(self.pads.cs_n)
+        databits = len(self.pads.dq.oe)
+        nphases = 2
+
+        # Init -------------------------------------------------------------------------------------
+        m.submodules.init = DomainRenamer("init")(ECP5DDRPHYInit("sys2x"))
+
+        # Parameters -------------------------------------------------------------------------------
+        cl, cwl         = get_cl_cw(memtype, tck)
+        cl_sys_latency  = get_sys_latency(nphases, cl)
+        cwl_sys_latency = get_sys_latency(nphases, cwl)
+
+        # Observation
+        self.datavalid = Signal(databits//8)
+
+        # PHY settings -----------------------------------------------------------------------------
+        rdcmdphase, rdphase = get_sys_phases(nphases, cl_sys_latency, cl)
+        wrcmdphase, wrphase = get_sys_phases(nphases, cwl_sys_latency, cwl)
+        self.settings = PhySettings(
+            phytype       = "ECP5DDRPHY",
+            memtype       = memtype,
+            databits      = databits,
+            dfi_databits  = 4*databits,
+            nranks        = nranks,
+            nphases       = nphases,
+            rdphase       = rdphase,
+            wrphase       = wrphase,
+            rdcmdphase    = rdcmdphase,
+            wrcmdphase    = wrcmdphase,
+            cl            = cl,
+            cwl           = cwl,
+            read_latency  = 2 + cl_sys_latency + 2 + log2_int(4//nphases) + 4,
+            write_latency = cwl_sys_latency
+        )
+
+        # DFI Interface ----------------------------------------------------------------------------
+        self.dfi = dfi = Interface(addressbits, bankbits, nranks, 4*databits, 4)
+
+        # # #
+
+        bl8_chunk   = Signal()
+        rddata_en = Signal(self.settings.read_latency)
+
+        # Clock --------------------------------------------------------------------------------
+        for i in range(len(self.pads.clk.o)):
+            sd_clk_se = Signal()
+            m.submodules += Instance("ODDRX2F",
+                i_RST  = ResetSignal("sys2x"),
+                i_ECLK = ClockSignal("sys2x"),
+                i_SCLK = ClockSignal(),
+                i_D0   = 0,
+                i_D1   = 1,
+                i_D2   = 0,
+                i_D3   = 1,
+                o_Q    = self.pads.clk.o[i]
+            )
+            
+
+        # Addresses and Commands ---------------------------------------------------------------
+        for i in range(addressbits):
+            m.submodules += Instance("ODDRX2F",
+                i_RST  = ResetSignal("sys2x"),
+                i_ECLK = ClockSignal("sys2x"),
+                i_SCLK = ClockSignal(),
+                i_D0   = dfi.phases[0].address[i],
+                i_D1   = dfi.phases[0].address[i],
+                i_D2   = dfi.phases[1].address[i],
+                i_D3   = dfi.phases[1].address[i],
+                o_Q    = self.pads.a.o[i]
+            )
+        for i in range(bankbits):
+            m.submodules += Instance("ODDRX2F",
+                i_RST  = ResetSignal("sys2x"),
+                i_ECLK = ClockSignal("sys2x"),
+                i_SCLK = ClockSignal(),
+                i_D0   = dfi.phases[0].bank[i],
+                i_D1   = dfi.phases[0].bank[i],
+                i_D2   = dfi.phases[1].bank[i],
+                i_D3   = dfi.phases[1].bank[i],
+                o_Q    = self.pads.ba.o[i]
+            )
+        controls = ["ras_n", "cas_n", "we_n", "cke", "odt"]
+        if hasattr(self.pads, "reset_n"):
+            controls.append("reset_n")
+        if hasattr(self.pads, "cs_n"):
+            controls.append("cs_n")
+        for name in controls:
+            for i in range(len(getattr(self.pads, name))):
+                m.submodules += Instance("ODDRX2F",
+                    i_RST  = ResetSignal("sys2x"),
+                    i_ECLK = ClockSignal("sys2x"),
+                    i_SCLK = ClockSignal(),
+                    i_D0   = getattr(dfi.phases[0], name)[i],
+                    i_D1   = getattr(dfi.phases[0], name)[i],
+                    i_D2   = getattr(dfi.phases[1], name)[i],
+                    i_D3   = getattr(dfi.phases[1], name)[i],
+                    o_Q    = getattr(self.pads, name)[i]
+                )
+
+        # DQ ---------------------------------------------------------------------------------------
+        dq_oe       = Signal()
+        dqs_oe      = Signal()
+        dqs_pattern = DQSPattern()
+        m.submodules += dqs_pattern
+        for i in range(databits//8):
+            # DQSBUFM
+            dqs_i   = Signal()
+            dqsr90  = Signal()
+            dqsw270 = Signal()
+            dqsw    = Signal()
+            rdpntr  = Signal(3)
+            wrpntr  = Signal(3)
+            rdly    = Signal(7)
+            with m.If(self._dly_sel.storage[i]):
+                with m.If(self._rdly_dq_rst.re):
+                    m.d.sync += rdly.eq(0)
+                with m.Elif(self._rdly_dq_inc.re):
+                    m.d.sync += rdly.eq(rdly + 1)
+            datavalid   = Signal()
+            burstdet    = Signal()
+            dqs_read    = Signal()
+            dqs_bitslip = Signal(2)
+            with m.If(self._dly_sel.storage[i]):
+                with m.If(self._rdly_dq_bitslip_rst.re):
+                    m.d.sync += dqs_bitslip.eq(0)
+                with m.Elif(self._rdly_dq_bitslip.re):
+                    m.d.sync += dqs_bitslip.eq(dqs_bitslip + 1)
+            dqs_cases = {}
+            for j, b in enumerate(range(-2, 2)):
+                dqs_cases[j] = dqs_read.eq(rddata_en[cl_sys_latency + b:cl_sys_latency + b + 2] != 0)
+            m.d.sync += Case(dqs_bitslip, dqs_cases)
+            m.submodules += Instance("DQSBUFM",
+                p_DQS_LI_DEL_ADJ = "MINUS",
+                p_DQS_LI_DEL_VAL = 1,
+                p_DQS_LO_DEL_ADJ = "MINUS",
+                p_DQS_LO_DEL_VAL = 4,
+                # Clocks / Reset
+                i_SCLK           = ClockSignal("sys"),
+                i_ECLK           = ClockSignal("sys2x"),
+                i_RST            = ResetSignal("sys2x"),
+                i_DDRDEL         = self.init.delay,
+                i_PAUSE          = self.init.pause | self._dly_sel.storage[i],
+
+                # Control
+                # Assert LOADNs to use DDRDEL control
+                i_RDLOADN        = 0,
+                i_RDMOVE         = 0,
+                i_RDDIRECTION    = 1,
+                i_WRLOADN        = 0,
+                i_WRMOVE         = 0,
+                i_WRDIRECTION    = 1,
+
+                # Reads (generate shifted DQS clock for reads)
+                i_READ0          = dqs_read,
+                i_READ1          = dqs_read,
+                i_READCLKSEL0    = rdly[0],
+                i_READCLKSEL1    = rdly[1],
+                i_READCLKSEL2    = rdly[2],
+                i_DQSI           = dqs_i,
+                o_DQSR90         = dqsr90,
+                o_RDPNTR0        = rdpntr[0],
+                o_RDPNTR1        = rdpntr[1],
+                o_RDPNTR2        = rdpntr[2],
+                o_WRPNTR0        = wrpntr[0],
+                o_WRPNTR1        = wrpntr[1],
+                o_WRPNTR2        = wrpntr[2],
+                o_DATAVALID      = self.datavalid[i],
+                o_BURSTDET       = burstdet,
+
+                # Writes (generate shifted ECLK clock for writes)
+                o_DQSW270        = dqsw270,
+                o_DQSW           = dqsw
+            )
+            burstdet_d = Signal()
+            m.d.sync += burstdet_d.eq(burstdet)
+            with m.If(self._burstdet_clr.re):
+                m.d.sync += self._burstdet_seen.status[i].eq(0)
+            with m.If(burstdet & ~burstdet_d):
+                m.d.sync += self._burstdet_seen.status[i].eq(1)
+
+            # DQS and DM ---------------------------------------------------------------------------
+            dm_o_data          = Signal(8)
+            dm_o_data_d        = Signal(8)
+            dm_o_data_muxed    = Signal(4)
+            m.d.comb += dm_o_data.eq(Cat(
+                dfi.phases[0].wrdata_mask[0*databits//8+i],
+                dfi.phases[0].wrdata_mask[1*databits//8+i],
+                dfi.phases[0].wrdata_mask[2*databits//8+i],
+                dfi.phases[0].wrdata_mask[3*databits//8+i],
+
+                dfi.phases[1].wrdata_mask[0*databits//8+i],
+                dfi.phases[1].wrdata_mask[1*databits//8+i],
+                dfi.phases[1].wrdata_mask[2*databits//8+i],
+                dfi.phases[1].wrdata_mask[3*databits//8+i]),
+            )
+            m.d.sync += dm_o_data_d.eq(dm_o_data)
+            dm_bl8_cases = {}
+            dm_bl8_cases[0] = dm_o_data_muxed.eq(dm_o_data[:4])
+            dm_bl8_cases[1] = dm_o_data_muxed.eq(dm_o_data_d[4:])
+            m.d.sync += Case(bl8_chunk, dm_bl8_cases) # FIXME: use self.comb?
+            m.submodules += Instance("ODDRX2DQA",
+                i_RST     = ResetSignal("sys2x"),
+                i_ECLK    = ClockSignal("sys2x"),
+                i_SCLK    = ClockSignal(),
+                i_DQSW270 = dqsw270,
+                i_D0      = dm_o_data_muxed[0],
+                i_D1      = dm_o_data_muxed[1],
+                i_D2      = dm_o_data_muxed[2],
+                i_D3      = dm_o_data_muxed[3],
+                o_Q       = pads.dm[i]
+            )
+
+            dqs      = Signal()
+            dqs_oe_n = Signal()
+            m.submodules += [
+                Instance("ODDRX2DQSB",
+                    i_RST  = ResetSignal("sys2x"),
+                    i_ECLK = ClockSignal("sys2x"),
+                    i_SCLK = ClockSignal(),
+                    i_DQSW = dqsw,
+                    i_D0   = 0, # FIXME: dqs_pattern.o[3],
+                    i_D1   = 1, # FIXME: dqs_pattern.o[2],
+                    i_D2   = 0, # FIXME: dqs_pattern.o[1],
+                    i_D3   = 1, # FIXME: dqs_pattern.o[0],
+                    o_Q    = dqs
+                ),
+                Instance("TSHX2DQSA",
+                    i_RST  = ResetSignal("sys2x"),
+                    i_ECLK = ClockSignal("sys2x"),
+                    i_SCLK = ClockSignal(),
+                    i_DQSW = dqsw,
+                    i_T0   = ~(dqs_pattern.preamble | dqs_oe | dqs_pattern.postamble),
+                    i_T1   = ~(dqs_pattern.preamble | dqs_oe | dqs_pattern.postamble),
+                    o_Q    = dqs_oe_n
+                ),
+                Tristate(pads.dqs_p[i], dqs, ~dqs_oe_n, dqs_i)
+            ]
+
+            for j in range(8*i, 8*(i+1)):
+                dq_o            = Signal()
+                dq_i            = Signal()
+                dq_oe_n         = Signal()
+                dq_i_delayed    = Signal()
+                dq_i_data       = Signal(8)
+                dq_o_data       = Signal(8)
+                dq_o_data_d     = Signal(8)
+                dq_o_data_muxed = Signal(4)
+                m.d.comb += dq_o_data.eq(Cat(
+                    dfi.phases[0].wrdata[0*databits+j],
+                    dfi.phases[0].wrdata[1*databits+j],
+                    dfi.phases[0].wrdata[2*databits+j],
+                    dfi.phases[0].wrdata[3*databits+j],
+
+                    dfi.phases[1].wrdata[0*databits+j],
+                    dfi.phases[1].wrdata[1*databits+j],
+                    dfi.phases[1].wrdata[2*databits+j],
+                    dfi.phases[1].wrdata[3*databits+j])
+                )
+                m.d.sync += dq_o_data_d.eq(dq_o_data)
+                dq_bl8_cases = {}
+                dq_bl8_cases[0] = dq_o_data_muxed.eq(dq_o_data[:4])
+                dq_bl8_cases[1] = dq_o_data_muxed.eq(dq_o_data_d[4:])
+                m.d.sync += Case(bl8_chunk, dq_bl8_cases) # FIXME: use self.comb?
+                _dq_i_data = Signal(4)
+                m.submodules += [
+                    Instance("ODDRX2DQA",
+                        i_RST     = ResetSignal("sys2x"),
+                        i_ECLK    = ClockSignal("sys2x"),
+                        i_SCLK    = ClockSignal(),
+                        i_DQSW270 = dqsw270,
+                        i_D0      = dq_o_data_muxed[0],
+                        i_D1      = dq_o_data_muxed[1],
+                        i_D2      = dq_o_data_muxed[2],
+                        i_D3      = dq_o_data_muxed[3],
+                        o_Q       = dq_o
+                    ),
+                    Instance("DELAYF",
+                        p_DEL_MODE  = "DQS_ALIGNED_X2",
+                        i_LOADN     = 1,
+                        i_MOVE      = 0,
+                        i_DIRECTION = 0,
+                        i_A         = dq_i,
+                        o_Z         = dq_i_delayed
+                    ),
+                    Instance("IDDRX2DQA",
+                        i_RST     = ResetSignal("sys2x"),
+                        i_ECLK    = ClockSignal("sys2x"),
+                        i_SCLK    = ClockSignal(),
+                        i_DQSR90  = dqsr90,
+                        i_RDPNTR0 = rdpntr[0],
+                        i_RDPNTR1 = rdpntr[1],
+                        i_RDPNTR2 = rdpntr[2],
+                        i_WRPNTR0 = wrpntr[0],
+                        i_WRPNTR1 = wrpntr[1],
+                        i_WRPNTR2 = wrpntr[2],
+                        i_D       = dq_i_delayed,
+                        o_Q0      = _dq_i_data[0],
+                        o_Q1      = _dq_i_data[1],
+                        o_Q2      = _dq_i_data[2],
+                        o_Q3      = _dq_i_data[3],
+                    )
+                ]
+                m.d.sync += dq_i_data[:4].eq(dq_i_data[4:])
+                m.d.sync += dq_i_data[4:].eq(_dq_i_data)
+                m.d.comb += [
+                    dfi.phases[0].rddata[0*databits+j].eq(dq_i_data[0]),
+                    dfi.phases[0].rddata[1*databits+j].eq(dq_i_data[1]),
+                    dfi.phases[0].rddata[2*databits+j].eq(dq_i_data[2]),
+                    dfi.phases[0].rddata[3*databits+j].eq(dq_i_data[3]),
+                    dfi.phases[1].rddata[0*databits+j].eq(dq_i_data[4]),
+                    dfi.phases[1].rddata[1*databits+j].eq(dq_i_data[5]),
+                    dfi.phases[1].rddata[2*databits+j].eq(dq_i_data[6]),
+                    dfi.phases[1].rddata[3*databits+j].eq(dq_i_data[7]),
+                ]
+                m.submodules += [
+                    Instance("TSHX2DQA",
+                        i_RST     = ResetSignal("sys2x"),
+                        i_ECLK    = ClockSignal("sys2x"),
+                        i_SCLK    = ClockSignal(),
+                        i_DQSW270 = dqsw270,
+                        i_T0      = ~(dqs_pattern.preamble | dq_oe | dqs_pattern.postamble),
+                        i_T1      = ~(dqs_pattern.preamble | dq_oe | dqs_pattern.postamble),
+                        o_Q       = dq_oe_n,
+                    ),
+                    Tristate(pads.dq[j], dq_o, ~dq_oe_n, dq_i)
+                ]
+
+        # Read Control Path ------------------------------------------------------------------------
+        # Creates a shift register of read commands coming from the DFI interface. This shift register
+        # is used to control DQS read (internal read pulse of the DQSBUF) and to indicate to the
+        # DFI interface that the read data is valid.
+        #
+        # The DQS read must be asserted for 2 sys_clk cycles before the read data is coming back from
+        # the DRAM (see 6.2.4 READ Pulse Positioning Optimization of FPGA-TN-02035-1.2)
+        #
+        # The read data valid is asserted for 1 sys_clk cycle when the data is available on the DFI
+        # interface, the latency is the sum of the ODDRX2DQA, CAS, IDDRX2DQA latencies.
+        rddata_en_last = Signal.like(rddata_en)
+        m.d.comb += rddata_en.eq(Cat(dfi.phases[self.settings.rdphase].rddata_en, rddata_en_last))
+        m.d.sync += rddata_en_last.eq(rddata_en)
+        m.d.sync += [phase.rddata_valid.eq(rddata_en[-1]) for phase in dfi.phases]
+
+        # Write Control Path -----------------------------------------------------------------------
+        # Creates a shift register of write commands coming from the DFI interface. This shift register
+        # is used to control DQ/DQS tristates and to select write data of the DRAM burst from the DFI
+        # interface: The PHY is operating in halfrate mode (so provide 4 datas every sys_clk cycles:
+        # 2x for DDR, 2x for halfrate) but DDR3 requires a burst of 8 datas (BL8) for best efficiency.
+        # Writes are then performed in 2 sys_clk cycles and data needs to be selected for each cycle.
+        # FIXME: understand +2
+        wrdata_en = Signal(cwl_sys_latency + 5)
+        wrdata_en_last = Signal.like(wrdata_en)
+        m.d.comb += wrdata_en.eq(Cat(dfi.phases[self.settings.wrphase].wrdata_en, wrdata_en_last))
+        m.d.sync += wrdata_en_last.eq(wrdata_en)
+        m.d.comb += dq_oe.eq(wrdata_en[cwl_sys_latency + 2] | wrdata_en[cwl_sys_latency + 3])
+        m.d.comb += bl8_chunk.eq(wrdata_en[cwl_sys_latency + 1])
+        m.d.comb += dqs_oe.eq(dq_oe)
+
+        # Write DQS Postamble/Preamble Control Path ------------------------------------------------
+        # Generates DQS Preamble 1 cycle before the first write and Postamble 1 cycle after the last
+        # write. During writes, DQS tristate is configured as output for at least 4 sys_clk cycles:
+        # 1 for Preamble, 2 for the Write and 1 for the Postamble.
+        m.d.comb += dqs_pattern.preamble.eq( wrdata_en[cwl_sys_latency + 1]  & ~wrdata_en[cwl_sys_latency + 2])
+        m.d.comb += dqs_pattern.postamble.eq(wrdata_en[cwl_sys_latency + 4]  & ~wrdata_en[cwl_sys_latency + 3])
+
+        return m
diff --git a/gram/phy/model.py b/gram/phy/model.py
new file mode 100644 (file)
index 0000000..33ca2d8
--- /dev/null
@@ -0,0 +1,607 @@
+# This file is Copyright (c) 2015-2020 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+# SDRAM simulation PHY at DFI level tested with SDR/DDR/DDR2/LPDDR/DDR3
+# TODO:
+# - add multirank support.
+
+from migen import *
+
+from litedram.common import burst_lengths
+from litedram.phy.dfi import *
+from litedram.modules import _speedgrade_timings, _technology_timings
+
+from functools import reduce
+from operator import or_
+
+import struct
+
+
+SDRAM_VERBOSE_OFF = 0
+SDRAM_VERBOSE_STD = 1
+SDRAM_VERBOSE_DBG = 2
+
+# Bank Model ---------------------------------------------------------------------------------------
+
+class BankModel(Module):
+    def __init__(self, data_width, nrows, ncols, burst_length, nphases, we_granularity, init):
+        self.activate     = Signal()
+        self.activate_row = Signal(max=nrows)
+        self.precharge    = Signal()
+
+        self.write        = Signal()
+        self.write_col    = Signal(max=ncols)
+        self.write_data   = Signal(data_width)
+        self.write_mask   = Signal(data_width//8)
+
+        self.read         = Signal()
+        self.read_col     = Signal(max=ncols)
+        self.read_data    = Signal(data_width)
+
+        # # #
+
+        active = Signal()
+        row    = Signal(max=nrows)
+
+        self.sync += \
+            If(self.precharge,
+                active.eq(0),
+            ).Elif(self.activate,
+                active.eq(1),
+                row.eq(self.activate_row)
+            )
+
+        bank_mem_len   = nrows*ncols//(burst_length*nphases)
+        mem            = Memory(data_width, bank_mem_len, init=init)
+        write_port     = mem.get_port(write_capable=True, we_granularity=we_granularity)
+        read_port      = mem.get_port(async_read=True)
+        self.specials += mem, read_port, write_port
+
+        wraddr         = Signal(max=bank_mem_len)
+        rdaddr         = Signal(max=bank_mem_len)
+
+        self.comb += [
+            wraddr.eq((row*ncols | self.write_col)[log2_int(burst_length*nphases):]),
+            rdaddr.eq((row*ncols | self.read_col)[log2_int(burst_length*nphases):]),
+        ]
+
+        self.comb += [
+            If(active,
+                write_port.adr.eq(wraddr),
+                write_port.dat_w.eq(self.write_data),
+                If(we_granularity,
+                    write_port.we.eq(Replicate(self.write, data_width//8) & ~self.write_mask),
+                ).Else(
+                    write_port.we.eq(self.write),
+                ),
+                If(self.read,
+                    read_port.adr.eq(rdaddr),
+                    self.read_data.eq(read_port.dat_r)
+                )
+            )
+        ]
+
+# DFI Phase Model ----------------------------------------------------------------------------------
+
+class DFIPhaseModel(Module):
+    def __init__(self, dfi, n):
+        phase = getattr(dfi, "p"+str(n))
+
+        self.bank         = phase.bank
+        self.address      = phase.address
+
+        self.wrdata       = phase.wrdata
+        self.wrdata_mask  = phase.wrdata_mask
+
+        self.rddata       = phase.rddata
+        self.rddata_valid = phase.rddata_valid
+
+        self.activate     = Signal()
+        self.precharge    = Signal()
+        self.write        = Signal()
+        self.read         = Signal()
+
+        # # #
+
+        self.comb += [
+            If(~phase.cs_n & ~phase.ras_n & phase.cas_n,
+                self.activate.eq(phase.we_n),
+                self.precharge.eq(~phase.we_n)
+            ),
+            If(~phase.cs_n & phase.ras_n & ~phase.cas_n,
+                self.write.eq(~phase.we_n),
+                self.read.eq(phase.we_n)
+            )
+        ]
+
+# DFI Timings Checker ------------------------------------------------------------------------------
+
+class SDRAMCMD:
+    def __init__(self, name: str, enc: int, idx: int):
+        self.name = name
+        self.enc  = enc
+        self.idx  = idx
+
+
+class TimingRule:
+    def __init__(self, prev: str, curr: str, delay: int):
+        self.name  = prev + "->" + curr
+        self.prev  = prev
+        self.curr  = curr
+        self.delay = delay
+
+
+class DFITimingsChecker(Module):
+    CMDS = [
+        # Name, cs & ras & cas & we value
+        ("PRE",  "0010"), # Precharge
+        ("REF",  "0001"), # Self refresh
+        ("ACT",  "0011"), # Activate
+        ("RD",   "0101"), # Read
+        ("WR",   "0100"), # Write
+        ("ZQCS", "0110"), # ZQCS
+    ]
+
+    RULES = [
+        # tRP
+        ("PRE",  "ACT", "tRP"),
+        ("PRE",  "REF", "tRP"),
+        # tRCD
+        ("ACT",  "WR",  "tRCD"),
+        ("ACT",  "RD",  "tRCD"),
+        # tRAS
+        ("ACT",  "PRE", "tRAS"),
+        # tRFC
+        ("REF",  "PRE", "tRFC"),
+        ("REF",  "ACT", "tRFC"),
+        # tCCD
+        ("WR",   "RD",  "tCCD"),
+        ("WR",   "WR",  "tCCD"),
+        ("RD",   "RD",  "tCCD"),
+        ("RD",   "WR",  "tCCD"),
+        # tRC
+        ("ACT",  "ACT", "tRC"),
+        # tWR
+        ("WR",   "PRE", "tWR"),
+        # tWTR
+        ("WR",   "RD",  "tWTR"),
+        # tZQCS
+        ("ZQCS", "ACT", "tZQCS"),
+    ]
+
+    def add_cmds(self):
+        self.cmds = {}
+        for idx, (name, pattern) in enumerate(self.CMDS):
+            self.cmds[name] = SDRAMCMD(name, int(pattern, 2), idx)
+
+    def add_rule(self, prev, curr, delay):
+        if not isinstance(delay, int):
+            delay = self.timings[delay]
+        self.rules.append(TimingRule(prev, curr, delay))
+
+    def add_rules(self):
+        self.rules = []
+        for rule in self.RULES:
+            self.add_rule(*rule)
+
+    # Convert ns to ps
+    def ns_to_ps(self, val):
+        return int(val * 1e3)
+
+    def ck_ns_to_ps(self, val, tck):
+        c, t = val
+        c = 0 if c is None else c * tck
+        t = 0 if t is None else t
+        return self.ns_to_ps(max(c, t))
+
+    def prepare_timings(self, timings, refresh_mode, memtype):
+        CK_NS = ["tRFC", "tWTR", "tFAW", "tCCD", "tRRD", "tZQCS"]
+        REF   = ["tREFI", "tRFC"]
+        self.timings = timings
+        new_timings  = {}
+
+        tck = self.timings["tCK"]
+
+        for key, val in self.timings.items():
+            if refresh_mode is not None and key in REF:
+                val = val[refresh_mode]
+
+            if val is None:
+                val = 0
+            elif key in CK_NS:
+                val = self.ck_ns_to_ps(val, tck)
+            else:
+                val = self.ns_to_ps(val)
+
+            new_timings[key] = val
+
+        new_timings["tRC"] = new_timings["tRAS"] + new_timings["tRP"]
+
+        # Adjust timings relative to write burst - tWR & tWTR
+        wrburst = burst_lengths[memtype] if memtype == "SDR" else burst_lengths[memtype] // 2
+        wrburst = (new_timings["tCK"] * (wrburst - 1))
+        new_timings["tWR"]  = new_timings["tWR"]  + wrburst
+        new_timings["tWTR"] = new_timings["tWTR"] + wrburst
+
+        self.timings = new_timings
+
+    def __init__(self, dfi, nbanks, nphases, timings, refresh_mode, memtype, verbose=False):
+        self.prepare_timings(timings, refresh_mode, memtype)
+        self.add_cmds()
+        self.add_rules()
+
+        cnt = Signal(64)
+        self.sync += cnt.eq(cnt + nphases)
+
+        phases = [getattr(dfi, "p" + str(n)) for n in range(nphases)]
+
+        last_cmd_ps = [[Signal.like(cnt) for _ in range(len(self.cmds))] for _ in range(nbanks)]
+        last_cmd    = [Signal(4) for i in range(nbanks)]
+
+        act_ps   = Array([Signal().like(cnt) for i in range(4)])
+        act_curr = Signal(max=4)
+
+        ref_issued = Signal(nphases)
+
+        for np, phase in enumerate(phases):
+            ps = Signal().like(cnt)
+            self.comb += ps.eq((cnt + np)*self.timings["tCK"])
+            state = Signal(4)
+            self.comb += state.eq(Cat(phase.we_n, phase.cas_n, phase.ras_n, phase.cs_n))
+            all_banks = Signal()
+
+            self.comb += all_banks.eq(
+                (self.cmds["REF"].enc == state) |
+                ((self.cmds["PRE"].enc == state) & phase.address[10])
+            )
+
+            # tREFI
+            self.comb += ref_issued[np].eq(self.cmds["REF"].enc == state)
+
+            # Print debug information
+            if verbose:
+                for _, cmd in self.cmds.items():
+                    self.sync += [
+                        If(state == cmd.enc,
+                            If(all_banks,
+                                Display("[%016dps] P%0d " + cmd.name, ps, np)
+                            ).Else(
+                                Display("[%016dps] P%0d B%0d " + cmd.name, ps, np, phase.bank)
+                            )
+                        )
+                    ]
+
+            # Bank command monitoring
+            for i in range(nbanks):
+                for _, curr in self.cmds.items():
+                    cmd_recv = Signal()
+                    self.comb += cmd_recv.eq(((phase.bank == i) | all_banks) & (state == curr.enc))
+
+                    # Checking rules from self.rules
+                    for _, prev in self.cmds.items():
+                        for rule in self.rules:
+                            if rule.prev == prev.name and rule.curr == curr.name:
+                                self.sync += [
+                                    If(cmd_recv & (last_cmd[i] == prev.enc) &
+                                       (ps < (last_cmd_ps[i][prev.idx] + rule.delay)),
+                                        Display("[%016dps] {} violation on bank %0d".format(rule.name), ps, i)
+                                    )
+                                ]
+
+                    # Save command timestamp in an array
+                    self.sync += If(cmd_recv, last_cmd_ps[i][curr.idx].eq(ps), last_cmd[i].eq(state))
+
+                    # tRRD & tFAW
+                    if curr.name == "ACT":
+                        act_next = Signal().like(act_curr)
+                        self.comb += act_next.eq(act_curr+1)
+
+                        # act_curr points to newest ACT timestamp
+                        self.sync += [
+                            If(cmd_recv & (ps < (act_ps[act_curr] + self.timings["tRRD"])),
+                                Display("[%016dps] tRRD violation on bank %0d", ps, i)
+                            )
+                        ]
+
+                        # act_next points to the oldest ACT timestamp
+                        self.sync += [
+                            If(cmd_recv & (ps < (act_ps[act_next] + self.timings["tFAW"])),
+                                Display("[%016dps] tFAW violation on bank %0d", ps, i)
+                            )
+                        ]
+
+                        # Save ACT timestamp in a circular buffer
+                        self.sync += If(cmd_recv, act_ps[act_next].eq(ps), act_curr.eq(act_next))
+
+        # tREFI
+        ref_ps      = Signal().like(cnt)
+        ref_ps_mod  = Signal().like(cnt)
+        ref_ps_diff = Signal(min=-2**63, max=2**63)
+        curr_diff   = Signal().like(ref_ps_diff)
+
+        self.comb += curr_diff.eq(ps - (ref_ps + self.timings["tREFI"]))
+
+        # Work in 64ms periods
+        self.sync += [
+            If(ref_ps_mod < int(64e9),
+                ref_ps_mod.eq(ref_ps_mod + nphases * self.timings["tCK"])
+            ).Else(
+                ref_ps_mod.eq(0)
+            )
+        ]
+
+        # Update timestamp and difference
+        self.sync += If(ref_issued != 0, ref_ps.eq(ps), ref_ps_diff.eq(ref_ps_diff - curr_diff))
+
+        self.sync += [
+            If((ref_ps_mod == 0) & (ref_ps_diff > 0),
+                Display("[%016dps] tREFI violation (64ms period): %0d", ps, ref_ps_diff)
+            )
+        ]
+
+        # Report any refresh periods longer than tREFI
+        if verbose:
+            ref_done = Signal()
+            self.sync += [
+                If(ref_issued != 0,
+                    ref_done.eq(1),
+                    If(~ref_done,
+                        Display("[%016dps] Late refresh", ps)
+                    )
+                )
+            ]
+
+            self.sync += [
+                If((curr_diff > 0) & ref_done & (ref_issued == 0),
+                    Display("[%016dps] tREFI violation", ps),
+                    ref_done.eq(0)
+                )
+            ]
+
+        # There is a maximum delay between refreshes on >=DDR
+        ref_limit = {"1x": 9, "2x": 17, "4x": 36}
+        if memtype != "SDR":
+            refresh_mode = "1x" if refresh_mode is None else refresh_mode
+            ref_done = Signal()
+            self.sync += If(ref_issued != 0, ref_done.eq(1))
+            self.sync += [
+                If((ref_issued == 0) & ref_done &
+                   (ref_ps > (ps + ref_limit[refresh_mode] * self.timings['tREFI'])),
+                    Display("[%016dps] tREFI violation (too many postponed refreshes)", ps),
+                    ref_done.eq(0)
+                )
+            ]
+
+# SDRAM PHY Model ----------------------------------------------------------------------------------
+
+class SDRAMPHYModel(Module):
+    def __prepare_bank_init_data(self, init, nbanks, nrows, ncols, data_width, address_mapping):
+        mem_size          = (self.settings.databits//8)*(nrows*ncols*nbanks)
+        bank_size         = mem_size // nbanks
+        column_size       = bank_size // nrows
+        model_bank_size   = bank_size // (data_width//8)
+        model_column_size = model_bank_size // nrows
+        model_data_ratio  = data_width // 32
+        data_width_bytes  = data_width // 8
+        bank_init         = [[] for i in range(nbanks)]
+
+        # Pad init if too short
+        if len(init)%data_width_bytes != 0:
+            init.extend([0]*(data_width_bytes-len(init)%data_width_bytes))
+
+
+        # Convert init data width from 32-bit to data_width if needed
+        if model_data_ratio > 1:
+            new_init = [0]*(len(init)//model_data_ratio)
+            for i in range(0, len(init), model_data_ratio):
+                ints = init[i:i+model_data_ratio]
+                strs = "".join("{:08x}".format(x) for x in reversed(ints))
+                new_init[i//model_data_ratio] = int(strs, 16)
+            init = new_init
+        elif model_data_ratio == 0:
+            assert data_width_bytes in [1, 2]
+            model_data_ratio = 4 // data_width_bytes
+            struct_unpack_patterns = {1: "4B", 2: "2H"}
+            new_init = [0]*int(len(init)*model_data_ratio)
+            for i in range(len(init)):
+                new_init[model_data_ratio*i:model_data_ratio*(i+1)] = struct.unpack(
+                    struct_unpack_patterns[data_width_bytes],
+                    struct.pack("I", init[i])
+                )[0:model_data_ratio]
+            init = new_init
+
+        if address_mapping == "ROW_BANK_COL":
+            for row in range(nrows):
+                for bank in range(nbanks):
+                    start = (row*nbanks*model_column_size + bank*model_column_size)
+                    end   = min(start + model_column_size, len(init))
+                    if start > len(init):
+                        break
+                    bank_init[bank].extend(init[start:end])
+        elif address_mapping == "BANK_ROW_COL":
+            for bank in range(nbanks):
+                start = bank*model_bank_size
+                end   = min(start + model_bank_size, len(init))
+                if start > len(init):
+                    break
+                bank_init[bank] = init[start:end]
+
+        return bank_init
+
+    def __init__(self, module, settings, clk_freq=100e6,
+        we_granularity         = 8,
+        init                   = [],
+        address_mapping        = "ROW_BANK_COL",
+        verbosity              = SDRAM_VERBOSE_OFF):
+
+        # Parameters -------------------------------------------------------------------------------
+        burst_length = {
+            "SDR":   1,
+            "DDR":   2,
+            "LPDDR": 2,
+            "DDR2":  2,
+            "DDR3":  2,
+            "DDR4":  2,
+            }[settings.memtype]
+
+        addressbits   = module.geom_settings.addressbits
+        bankbits      = module.geom_settings.bankbits
+        rowbits       = module.geom_settings.rowbits
+        colbits       = module.geom_settings.colbits
+
+        self.settings = settings
+        self.module   = module
+
+        # DFI Interface ----------------------------------------------------------------------------
+        self.dfi = Interface(
+            addressbits = addressbits,
+            bankbits    = bankbits,
+            nranks      = self.settings.nranks,
+            databits    = self.settings.dfi_databits,
+            nphases     = self.settings.nphases
+        )
+
+        # # #
+
+        nphases    = self.settings.nphases
+        nbanks     = 2**bankbits
+        nrows      = 2**rowbits
+        ncols      = 2**colbits
+        data_width = self.settings.dfi_databits*self.settings.nphases
+
+        # DFI phases -------------------------------------------------------------------------------
+        phases = [DFIPhaseModel(self.dfi, n) for n in range(self.settings.nphases)]
+        self.submodules += phases
+
+        # DFI timing checker -----------------------------------------------------------------------
+        if verbosity > SDRAM_VERBOSE_OFF:
+            timings = {"tCK": (1e9 / clk_freq) / nphases}
+
+            for name in _speedgrade_timings + _technology_timings:
+                timings[name] = self.module.get(name)
+
+            timing_checker = DFITimingsChecker(
+                dfi          = self.dfi,
+                nbanks       = nbanks,
+                nphases      = nphases,
+                timings      = timings,
+                refresh_mode = self.module.timing_settings.fine_refresh_mode,
+                memtype      = settings.memtype,
+                verbose      = verbosity > SDRAM_VERBOSE_DBG)
+            self.submodules += timing_checker
+
+        # Bank init data ---------------------------------------------------------------------------
+        bank_init  = [[] for i in range(nbanks)]
+
+        if init:
+            bank_init = self.__prepare_bank_init_data(
+                init            = init,
+                nbanks          = nbanks,
+                nrows           = nrows,
+                ncols           = ncols,
+                data_width      = data_width,
+                address_mapping = address_mapping
+            )
+
+        # Banks ------------------------------------------------------------------------------------
+        banks = [BankModel(
+            data_width     = data_width,
+            nrows          = nrows,
+            ncols          = ncols,
+            burst_length   = burst_length,
+            nphases        = nphases,
+            we_granularity = we_granularity,
+            init           = bank_init[i]) for i in range(nbanks)]
+        self.submodules += banks
+
+        # Connect DFI phases to Banks (CMDs, Write datapath) ---------------------------------------
+        for nb, bank in enumerate(banks):
+            # Bank activate
+            activates = Signal(len(phases))
+            cases     = {}
+            for np, phase in enumerate(phases):
+                self.comb += activates[np].eq(phase.activate)
+                cases[2**np] = [
+                    bank.activate.eq(phase.bank == nb),
+                    bank.activate_row.eq(phase.address)
+                ]
+            self.comb += Case(activates, cases)
+
+            # Bank precharge
+            precharges = Signal(len(phases))
+            cases      = {}
+            for np, phase in enumerate(phases):
+                self.comb += precharges[np].eq(phase.precharge)
+                cases[2**np] = [
+                    bank.precharge.eq((phase.bank == nb) | phase.address[10])
+                ]
+            self.comb += Case(precharges, cases)
+
+            # Bank writes
+            bank_write = Signal()
+            bank_write_col = Signal(max=ncols)
+            writes = Signal(len(phases))
+            cases  = {}
+            for np, phase in enumerate(phases):
+                self.comb += writes[np].eq(phase.write)
+                cases[2**np] = [
+                    bank_write.eq(phase.bank == nb),
+                    bank_write_col.eq(phase.address)
+                ]
+            self.comb += Case(writes, cases)
+            self.comb += [
+                bank.write_data.eq(Cat(*[phase.wrdata for phase in phases])),
+                bank.write_mask.eq(Cat(*[phase.wrdata_mask for phase in phases]))
+            ]
+
+            # Simulate write latency
+            for i in range(self.settings.write_latency):
+                new_bank_write     = Signal()
+                new_bank_write_col = Signal(max=ncols)
+                self.sync += [
+                    new_bank_write.eq(bank_write),
+                    new_bank_write_col.eq(bank_write_col)
+                ]
+                bank_write = new_bank_write
+                bank_write_col = new_bank_write_col
+
+            self.comb += [
+                bank.write.eq(bank_write),
+                bank.write_col.eq(bank_write_col)
+            ]
+
+            # Bank reads
+            reads = Signal(len(phases))
+            cases = {}
+            for np, phase in enumerate(phases):
+                self.comb += reads[np].eq(phase.read)
+                cases[2**np] = [
+                    bank.read.eq(phase.bank == nb),
+                    bank.read_col.eq(phase.address)
+            ]
+            self.comb += Case(reads, cases)
+
+        # Connect Banks to DFI phases (CMDs, Read datapath) ----------------------------------------
+        banks_read      = Signal()
+        banks_read_data = Signal(data_width)
+        self.comb += [
+            banks_read.eq(reduce(or_, [bank.read for bank in banks])),
+            banks_read_data.eq(reduce(or_, [bank.read_data for bank in banks]))
+        ]
+
+        # Simulate read latency --------------------------------------------------------------------
+        for i in range(self.settings.read_latency):
+            new_banks_read      = Signal()
+            new_banks_read_data = Signal(data_width)
+            self.sync += [
+                new_banks_read.eq(banks_read),
+                new_banks_read_data.eq(banks_read_data)
+            ]
+            banks_read      = new_banks_read
+            banks_read_data = new_banks_read_data
+
+        self.comb += [
+            Cat(*[phase.rddata_valid for phase in phases]).eq(banks_read),
+            Cat(*[phase.rddata for phase in phases]).eq(banks_read_data)
+        ]
diff --git a/gram/stream.py b/gram/stream.py
new file mode 100644 (file)
index 0000000..bde9c6c
--- /dev/null
@@ -0,0 +1,108 @@
+from nmigen import *
+from nmigen.hdl.rec import *
+from nmigen.lib import fifo
+
+
+__all__ = ["Endpoint", "SyncFIFO", "AsyncFIFO"]
+
+
+def _make_fanout(layout):
+    r = []
+    for f in layout:
+        if isinstance(f[1], (int, tuple)):
+            r.append((f[0], f[1], DIR_FANOUT))
+        else:
+            r.append((f[0], _make_fanout(f[1])))
+    return r
+
+
+class EndpointDescription:
+    def __init__(self, payload_layout):
+        self.payload_layout = payload_layout
+
+    def get_full_layout(self):
+        reserved = {"valid", "ready", "first", "last", "payload"}
+        attributed = set()
+        for f in self.payload_layout:
+            if f[0] in attributed:
+                raise ValueError(f[0] + " already attributed in payload layout")
+            if f[0] in reserved:
+                raise ValueError(f[0] + " cannot be used in endpoint layout")
+            attributed.add(f[0])
+
+        full_layout = [
+            ("valid", 1, DIR_FANOUT),
+            ("ready", 1, DIR_FANIN),
+            ("first", 1, DIR_FANOUT),
+            ("last",  1, DIR_FANOUT),
+            ("payload", _make_fanout(self.payload_layout))
+        ]
+        return full_layout
+
+
+class Endpoint(Record):
+    def __init__(self, layout_or_description, **kwargs):
+        if isinstance(layout_or_description, EndpointDescription):
+            self.description = layout_or_description
+        else:
+            self.description = EndpointDescription(layout_or_description)
+        super().__init__(self.description.get_full_layout(), src_loc_at=1, **kwargs)
+
+    def __getattr__(self, name):
+        try:
+            return super().__getattr__(name)
+        except AttributeError:
+            return self.fields["payload"][name]
+
+
+class _FIFOWrapper:
+    def __init__(self, payload_layout):
+        self.sink   = Endpoint(payload_layout)
+        self.source = Endpoint(payload_layout)
+
+        self.layout = Layout([
+            ("payload", self.sink.description.payload_layout),
+            ("first",   1, DIR_FANOUT),
+            ("last",    1, DIR_FANOUT)
+        ])
+
+    def elaborate(self, platform):
+        m = Module()
+
+        fifo = m.submodules.fifo = self.fifo
+        fifo_din = Record(self.layout)
+        fifo_dout = Record(self.layout)
+        m.d.comb += [
+            fifo.w_data.eq(fifo_din),
+            fifo_dout.eq(fifo.r_data),
+
+            self.sink.ready.eq(fifo.w_rdy),
+            fifo.w_en.eq(self.sink.valid),
+            fifo_din.first.eq(self.sink.first),
+            fifo_din.last.eq(self.sink.last),
+            fifo_din.payload.eq(self.sink.payload),
+
+            self.source.valid.eq(fifo.r_rdy),
+            self.source.first.eq(fifo_dout.first),
+            self.source.last.eq(fifo_dout.last),
+            self.source.payload.eq(fifo_dout.payload),
+            fifo.r_en.eq(self.source.ready)
+        ]
+
+        return m
+
+
+class SyncFIFO(Elaboratable, _FIFOWrapper):
+    def __init__(self, layout, depth, fwft=True):
+        super().__init__(layout)
+        self.fifo = fifo.SyncFIFO(width=len(Record(self.layout)), depth=depth, fwft=fwft)
+        self.depth = self.fifo.depth
+        self.level = self.fifo.level
+
+
+class AsyncFIFO(Elaboratable, _FIFOWrapper):
+    def __init__(self, layout, depth, r_domain="read", w_domain="write"):
+        super().__init__(layout)
+        self.fifo = fifo.AsyncFIFO(width=len(Record(self.layout)), depth=depth,
+                                   r_domain=r_domain, w_domain=w_domain)
+        self.depth = self.fifo.depth
diff --git a/setup.py b/setup.py
new file mode 100755 (executable)
index 0000000..19bf8fd
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,20 @@
+#!/usr/bin/env python3
+
+from setuptools import setup
+from setuptools import find_packages
+
+
+setup(
+    name="gram",
+    description="Small footprint and configurable DRAM core",
+    author="Florent Kermarrec",
+    author_email="florent@enjoy-digital.fr",
+    url="http://enjoy-digital.fr",
+    download_url="https://github.com/enjoy-digital/litedram",
+    test_suite="test",
+    license="BSD",
+    python_requires="~=3.6",
+    install_requires=["pyyaml"],
+    packages=find_packages(exclude=("test*", "sim*", "doc*", "examples*")),
+    include_package_data=True,
+)
diff --git a/test/__init__.py b/test/__init__.py
new file mode 100644 (file)
index 0000000..e69de29
diff --git a/test/access_pattern.csv b/test/access_pattern.csv
new file mode 100644 (file)
index 0000000..12ace39
--- /dev/null
@@ -0,0 +1,1024 @@
+0x0000012e,0x61d2c8d5
+0x000000d9,0x8ec8f8f6
+0x00000135,0xf9f6145f
+0x0000033c,0xe653e338
+0x000002ff,0xbb609ae9
+0x0000037a,0xb42e5587
+0x00000228,0xce227592
+0x00000317,0xbd257969
+0x0000036e,0x7fe31c88
+0x0000011f,0xda6ba38d
+0x000002c7,0x141f40ad
+0x000000ab,0xa7c88a54
+0x000003f1,0xa48ddec4
+0x00000123,0xd800a7fe
+0x00000098,0x45a96b1d
+0x0000003b,0x4a75a15d
+0x0000004f,0x90e04e1a
+0x00000361,0x9af6660b
+0x000000fc,0xa4830b72
+0x00000033,0xb063ccb3
+0x000000c1,0xf60e53ac
+0x000002d2,0x47d9b283
+0x0000013f,0xe19843f7
+0x0000031d,0x30920818
+0x000001c3,0x2a830514
+0x000002c6,0xee6a0e7e
+0x00000104,0x6ab2c91c
+0x00000100,0x2dc79361
+0x000001e7,0xe9bf7c25
+0x00000149,0x29adfad6
+0x00000109,0x4f6c5d4a
+0x00000285,0x9ec316ff
+0x000001c4,0xf7768be4
+0x00000082,0x7478d35c
+0x000001a9,0x1832672d
+0x000001e5,0x12449eee
+0x000002f3,0x90a205e0
+0x00000217,0x266d0830
+0x0000039a,0xe6a207ac
+0x00000265,0x8fd00e38
+0x0000005e,0xc71ce3cc
+0x00000116,0x03808ba0
+0x0000025d,0x9db362a9
+0x000002d8,0x04ab3a95
+0x0000018b,0xd8ccace2
+0x0000007d,0x9443ab73
+0x00000187,0x7933b972
+0x0000015e,0xcf6daccc
+0x000002bc,0xbb9aa01c
+0x00000274,0x790bce70
+0x00000227,0xa2e56a6d
+0x0000002a,0xf2a9f0c7
+0x00000173,0x0cd5e422
+0x00000164,0x96c0513f
+0x000001fb,0x5d766d17
+0x000001a0,0x55ea28bb
+0x000002bd,0x38d65dfa
+0x00000142,0x7212fb34
+0x000000b5,0x3b5fa506
+0x00000354,0x6a89900a
+0x000001ac,0x2ce6be3f
+0x00000342,0xb956396b
+0x00000281,0x118b84ad
+0x00000300,0x6e091a3c
+0x0000029f,0x854141b2
+0x000000c6,0x3aafa04b
+0x00000107,0x907c2cb4
+0x0000000a,0xc965666f
+0x0000024c,0x2792b3a0
+0x00000203,0x1d19c1e1
+0x00000332,0x9f8ff8ce
+0x00000092,0x86c77fe4
+0x00000101,0x5d59d5f3
+0x00000025,0x5dedd914
+0x00000258,0x9c886d9e
+0x00000243,0x90a6e12a
+0x00000247,0x75f3803e
+0x000001ad,0x68843b6d
+0x00000071,0x63b4de04
+0x0000026e,0x622eab11
+0x0000034b,0xb42696c1
+0x00000195,0x7685880a
+0x000002ce,0x008d2d65
+0x000001c5,0xc064168f
+0x00000019,0xf93530a6
+0x000001b3,0xec1b06db
+0x000002f9,0x7ce508d3
+0x00000078,0xae78c8eb
+0x00000134,0x64c63926
+0x00000255,0xd3646539
+0x00000010,0xa158c47d
+0x000000f1,0xf18d4c56
+0x0000001d,0x3a693620
+0x000000e5,0x76b47c71
+0x0000001e,0xa741d479
+0x0000017f,0x84f156b2
+0x000000cb,0x8cc414d8
+0x000001a4,0xcd6cd47f
+0x000003e4,0x37307894
+0x000001a1,0x041c0b6e
+0x00000236,0x67741810
+0x000000b1,0x573222c0
+0x00000248,0x68e8f7f4
+0x00000316,0x16265757
+0x000001ed,0x85df14f1
+0x0000028e,0x54ec8e2f
+0x000002c8,0xfae5e756
+0x000003a4,0x2cd32729
+0x0000035b,0x8ce13b5a
+0x0000022d,0x943ce80f
+0x00000345,0x278ff629
+0x0000008d,0x94a9c2ec
+0x00000111,0xba4e5642
+0x00000011,0x22b3909c
+0x000000f7,0x55338938
+0x00000186,0x9218fd6c
+0x000000e0,0x3c48a497
+0x0000033a,0xda233663
+0x00000048,0x58855816
+0x0000029d,0x4df9feb1
+0x00000382,0x9f0f2502
+0x00000132,0xddaed3fe
+0x000001a7,0xfde6bdba
+0x00000128,0x448fce9a
+0x0000037e,0x42aaaa1e
+0x000001a8,0x4ea6f4df
+0x000000ec,0x912aeb2c
+0x00000056,0xde7d8aeb
+0x000003e6,0x15f855a1
+0x0000001c,0xe0225e1e
+0x00000080,0x3686bfb7
+0x000000da,0x7724b050
+0x000001b2,0xfec50f9a
+0x000003c8,0x68d72fa8
+0x00000225,0x65df6eb6
+0x00000049,0x5b181430
+0x0000027e,0x35ec0cd3
+0x00000348,0xc1f5138a
+0x0000036a,0x47b36f4d
+0x00000178,0x073f863c
+0x000001fc,0xf6ccbf5f
+0x000003b3,0x3729f5ba
+0x00000356,0x7f6d4988
+0x00000378,0x31d541ce
+0x000001bf,0xe26033b2
+0x000002f4,0x4d009701
+0x0000012a,0x383e7b20
+0x000002f6,0x4ecb429b
+0x000002ea,0xac934081
+0x000001d7,0xdf607028
+0x0000005b,0x3d48a4d7
+0x000000dc,0x9b2eed8e
+0x00000089,0xe8170872
+0x0000007c,0x64439e56
+0x0000000e,0x3bb95a3c
+0x00000239,0x36e6a900
+0x000001d8,0xc4c42852
+0x0000002d,0xa3a1a282
+0x0000000f,0x4f3c81e2
+0x000002c2,0x5d291767
+0x000002fb,0x2e48dcfa
+0x00000119,0xc07a0c3c
+0x000002e8,0x1cce5cbd
+0x00000333,0x3a3a7e63
+0x00000110,0x4b501b3a
+0x000001af,0xb1287fe3
+0x000001db,0xee4a7258
+0x0000018c,0xe9276e99
+0x0000013c,0x5bf99b5f
+0x0000010b,0x2b1a09ff
+0x0000031b,0xd9f78b82
+0x000003d9,0x505c7a02
+0x0000000d,0x9c568cbf
+0x00000330,0x6f043b79
+0x000001f1,0x994dda6a
+0x0000000b,0xd09629fa
+0x00000136,0x41991be6
+0x0000004d,0x5e6a2d20
+0x00000198,0x0e0474d7
+0x000000ac,0x951f892d
+0x00000022,0xc9ec5c7c
+0x0000000c,0x95301582
+0x000002ef,0x08da98c7
+0x000003fb,0x43b4fd33
+0x000001eb,0x9e9b01ba
+0x000000a3,0x325f8442
+0x00000209,0x74878d2f
+0x00000159,0x4f1ec90f
+0x00000172,0x6089924d
+0x0000021a,0xaedaaeb8
+0x0000029a,0x7107bf13
+0x000001e1,0x584d3369
+0x00000106,0x75579cb1
+0x00000148,0x34bb1175
+0x00000398,0xb5d2e270
+0x000001ff,0x5a4cd7c1
+0x000003df,0x5ab6637c
+0x0000032f,0xa588b2ab
+0x0000033d,0x893d9f9c
+0x00000183,0x6c833e74
+0x0000019d,0x9584ba23
+0x000000d4,0x7499251f
+0x00000384,0x38c04aee
+0x000002d7,0x2f780083
+0x0000003c,0x25ea371f
+0x0000014d,0xa422c342
+0x000000a5,0xbb8ca7b1
+0x000000b4,0x600bfbb7
+0x000003a2,0x7536026b
+0x0000019f,0xf3056983
+0x000003b5,0x14819baa
+0x0000025b,0x22bfc5b4
+0x000001f4,0xf3dcc407
+0x000003d4,0xdd204ebc
+0x000000e8,0x19371324
+0x0000031a,0x388f82b1
+0x000000b8,0xaa3c9d73
+0x00000370,0xdf89cab0
+0x00000251,0x149453d0
+0x00000177,0x060e6abb
+0x000001d3,0x64c800bf
+0x000002e0,0x197fe27c
+0x000001bc,0x112c0c05
+0x00000339,0x5c1d0545
+0x0000011d,0xa9cb28e3
+0x00000020,0x9d16cec1
+0x0000037b,0xf46e9e3d
+0x00000182,0x99e052fd
+0x00000389,0xbd93a71e
+0x000002b4,0x4bdf2842
+0x000002fc,0xa93be104
+0x00000192,0x1fd37c83
+0x0000020e,0x542b2943
+0x00000001,0x6a1469d3
+0x00000146,0xbfca75e9
+0x0000010e,0x8e00b2ee
+0x00000141,0x4a5c4324
+0x00000023,0x7cfd7c93
+0x00000235,0xce9539ab
+0x000002a0,0xe31e7f99
+0x00000231,0xbb3e3d23
+0x00000214,0x6eaa5dfc
+0x00000319,0x9f719f0d
+0x00000175,0x5ae51df8
+0x000002b0,0xa993d4c0
+0x000002da,0x9bb2a247
+0x00000053,0xd8d2dec4
+0x000003e0,0xaae00f7b
+0x00000309,0x3f866f74
+0x00000276,0x4cf2e344
+0x00000055,0x4ba9a459
+0x00000216,0xee2ceffb
+0x00000139,0xc9e720fb
+0x00000338,0xc6204e59
+0x00000271,0xc98ed7f6
+0x000002a4,0xfd31d029
+0x00000286,0xdbcc04cb
+0x0000037f,0x19bfb426
+0x00000064,0x0def53ae
+0x00000335,0xbd34fdea
+0x00000205,0x331768f4
+0x0000035c,0xa0e11fea
+0x00000375,0x43bd9bcf
+0x00000246,0xf1f27dd0
+0x00000018,0x3a703637
+0x00000388,0x4ca11d1c
+0x00000169,0x7a808c4a
+0x00000143,0xde360dec
+0x00000088,0xf788f2f8
+0x000002c3,0x20fc77bb
+0x0000018a,0x171ffa1c
+0x000003bd,0xc1285057
+0x0000032b,0x47a437ab
+0x000003ce,0xab34d83e
+0x00000066,0x9c21cc65
+0x00000163,0x16b89209
+0x000001a5,0x39ed216e
+0x000000d7,0x24e89199
+0x00000125,0xcd425b63
+0x000002e7,0xa3d10901
+0x00000219,0x16d97533
+0x00000363,0xceb7cdc8
+0x000002c9,0xb7ca8bb3
+0x000002a1,0xc9a18b20
+0x00000027,0xaf3a93d3
+0x00000343,0x724cf43c
+0x0000016d,0x709cdc2b
+0x000003fa,0x0bb57a27
+0x000003d0,0x50ebce7d
+0x000003b4,0xcca0fcbd
+0x000001d2,0xd47c15f9
+0x00000171,0x23e44721
+0x0000014b,0xfa40a59c
+0x000002bf,0x2669def1
+0x000003de,0x029d6b69
+0x00000390,0xd6467cb3
+0x00000013,0x2fa1f2fe
+0x00000329,0x39b5b021
+0x00000391,0x7155d9e6
+0x000000a4,0xe65f6a8b
+0x0000010d,0xeb081ac3
+0x000001d9,0xc3333042
+0x0000001a,0xa7b3a7a1
+0x0000005c,0x15cc2a0b
+0x000001df,0x58349e33
+0x00000081,0xbce62295
+0x00000129,0x4d34e0a5
+0x0000037d,0xae0fb82d
+0x0000033f,0xbab64099
+0x000000ae,0x19cf9f0c
+0x00000085,0xf65025dc
+0x0000030d,0x156184eb
+0x000000f3,0xd44874cb
+0x000003c4,0x234a08dc
+0x00000073,0x89be0b06
+0x0000008c,0x9c2f3f36
+0x0000006a,0x6dbd974e
+0x000003ae,0xb1c0fac8
+0x0000036c,0x0158019e
+0x0000003f,0x60d2350b
+0x000000c3,0xe0bf851a
+0x0000005d,0x57f4331e
+0x00000160,0x2d01f727
+0x00000074,0xe7420f16
+0x0000001b,0x65193a81
+0x000003b6,0x5404408b
+0x000000fe,0x4a912205
+0x0000020f,0xaf1ec7e3
+0x000003f2,0x65284d10
+0x00000038,0xc38902f9
+0x0000006b,0x93f02e10
+0x00000029,0xb73538b1
+0x000002ab,0x3832c214
+0x000001f3,0x7f04e0fe
+0x000002e2,0x863662da
+0x0000010f,0x6a7b7b52
+0x00000360,0x313fa92d
+0x000002e1,0x29574441
+0x00000133,0x8503d7a1
+0x00000072,0xdf037127
+0x00000318,0x1f0a5242
+0x00000154,0x5d171f74
+0x00000197,0xa236ca17
+0x000000c5,0x344b386f
+0x0000023b,0xbbfca537
+0x000002dc,0x8ecdbd48
+0x000000d3,0x68417535
+0x000002c4,0xa87d737e
+0x00000302,0x5b4ac57d
+0x000003b1,0x7208c47a
+0x00000264,0xe967870e
+0x00000230,0x6b8a64a4
+0x000001c0,0xa90e1999
+0x000000d0,0xfe6134c9
+0x000000e4,0x0d1e3e4f
+0x000003ed,0x2765a290
+0x0000038a,0xadefbc7a
+0x00000290,0x2253d279
+0x000001da,0xef8c14da
+0x00000204,0xfa11139c
+0x00000024,0x51773a21
+0x0000022c,0x82e40079
+0x000003e1,0x0d25dc31
+0x000002fa,0xae982f52
+0x000000aa,0xbdf45ea4
+0x0000026f,0x4441d881
+0x00000362,0x9129843f
+0x000000d8,0x34175578
+0x000001f6,0x01663b92
+0x000003cc,0x341926d0
+0x00000352,0x4dc5fcc0
+0x00000036,0x1566a8eb
+0x00000273,0xfdc7e15c
+0x000000af,0xbaed2374
+0x00000041,0x1a0b8317
+0x000001dd,0xd18f699a
+0x000000a8,0x90cb209a
+0x00000257,0xbf67c32e
+0x0000003d,0x0cbb59c4
+0x00000093,0xfe60eac3
+0x00000336,0xcf5be5dc
+0x00000084,0x660900fc
+0x00000320,0xaf5e2207
+0x0000032e,0x397069f3
+0x000001b7,0x9ed88604
+0x0000005a,0xd9e0f4a5
+0x000001cb,0xf2081a29
+0x00000270,0xe42a12a4
+0x0000027b,0x48fb577a
+0x000003c3,0x239ff442
+0x0000015f,0xcef290ee
+0x0000002b,0xe894ca29
+0x000002e6,0x00605a5d
+0x000003d8,0x8a3309b1
+0x000003ba,0x715efaae
+0x00000113,0xa0d1f6a8
+0x000001e8,0x744fd2ca
+0x00000065,0x326f3881
+0x00000305,0xf508ccfc
+0x00000215,0x594d1b47
+0x000002d4,0x89b4ec73
+0x000001a2,0xf12e7ff3
+0x000003ab,0x270a33a1
+0x000002d9,0x95c40cc3
+0x000001f0,0xadb2d1e6
+0x00000334,0x1f8b03c2
+0x00000359,0x0c8c22df
+0x00000016,0xfdef90f8
+0x00000321,0x6d16c2b6
+0x0000012d,0x4a4a85aa
+0x00000112,0x1b9612a0
+0x000000c2,0x11b7b67a
+0x000003a8,0x0d2ddd6b
+0x0000028c,0x9bac58e1
+0x0000001f,0x38060223
+0x00000299,0xa69b4430
+0x0000034e,0x92075ea3
+0x00000068,0x24693370
+0x00000091,0xe20db412
+0x0000029e,0x78539957
+0x00000385,0x7a646c06
+0x0000002c,0xd00f800a
+0x00000188,0x0846d6bc
+0x000001e0,0x1702b043
+0x000002cb,0x1f438aea
+0x0000005f,0xa43309be
+0x00000147,0x1609f9c1
+0x000002f7,0x7b851f8e
+0x000002dd,0x6bf738a3
+0x000000ca,0x1a50ad66
+0x000001e9,0x37a9593f
+0x0000006f,0x6de45853
+0x0000015c,0xeb6cb24e
+0x00000030,0xce267ad7
+0x0000002e,0x4ec1453b
+0x000001d1,0xfc14bec4
+0x00000017,0x876d7392
+0x000002f8,0x9ac12c44
+0x000000f6,0xf524d27d
+0x0000035d,0x30a11607
+0x00000394,0xd3b26a18
+0x00000355,0xee8a455a
+0x00000261,0x98c20113
+0x000000d5,0x37fbdd7f
+0x00000325,0x85269178
+0x0000011e,0xf74a97ad
+0x0000028d,0xb144ba63
+0x000000ef,0xec40f103
+0x000002be,0xda839587
+0x00000061,0xe116a87a
+0x000002ae,0xb615d223
+0x000001ab,0x81c40cec
+0x000000cd,0xe644b747
+0x0000006e,0x82bd6a8c
+0x00000000,0xf000849b
+0x0000032d,0xb22bc4c2
+0x00000368,0x7510c7f3
+0x00000067,0xe616e21e
+0x00000358,0xb2b4e853
+0x000002a3,0x1a809df7
+0x000000a0,0xbd9649c3
+0x000003d2,0x93cb8b68
+0x00000046,0xf4aa0c1b
+0x0000026a,0xb84137a0
+0x000000f4,0x5519ed9c
+0x0000025e,0x27785441
+0x000000e2,0xf7c05a5c
+0x00000346,0x99448f63
+0x0000008b,0xf7808cc8
+0x0000036b,0xa20fdf9e
+0x000000bc,0x0d624554
+0x0000017b,0x328aa94c
+0x0000023f,0xab882098
+0x0000033b,0xcb5d14ad
+0x00000114,0x62fca8d3
+0x000003fc,0x80a34a21
+0x000001bd,0x2027292c
+0x00000328,0x6b1ed1f1
+0x000000b6,0x3cd89d38
+0x0000027c,0xf3f04d4e
+0x000003d5,0x482e2cce
+0x00000054,0x5d084c63
+0x000002df,0x9694542a
+0x000003b8,0x823d6559
+0x00000289,0xc3adcb32
+0x0000035f,0x88e8e6db
+0x0000024f,0xc288d6fb
+0x000001cc,0xf5a0c23e
+0x0000015a,0x04c6ac85
+0x000000e6,0xa2c708a6
+0x000000cc,0x214ae76e
+0x0000039e,0x75bf1bc8
+0x000003c9,0x3191a7eb
+0x000000e9,0xec7d07db
+0x00000060,0xe0710b88
+0x000002a7,0x73d0cd4e
+0x000003db,0x1e017f85
+0x000000a2,0x489b1f6c
+0x00000076,0x60529d31
+0x0000004b,0x93b6355e
+0x00000063,0xe9691ee0
+0x000001ea,0xf1d3e628
+0x000000a6,0x3eaf45d5
+0x0000034a,0x079bc1db
+0x00000003,0x2b83ee22
+0x000003cb,0xb38d5007
+0x00000315,0x005c5340
+0x000003f9,0xf61bec1d
+0x000003fe,0x459a3987
+0x000001b4,0x955aa611
+0x000000c9,0x0b8502a7
+0x0000025c,0x919b4b7f
+0x00000323,0x4e0b307c
+0x0000039c,0x25e20b80
+0x00000035,0x15d35def
+0x00000155,0xed7988b9
+0x0000007a,0x0d259437
+0x0000031c,0xc448416c
+0x00000379,0x588b1ea1
+0x000000ee,0xda9033f2
+0x0000017c,0x5d8510dd
+0x0000017d,0x7a845fad
+0x0000009d,0x285e125f
+0x000003ac,0xc3a8f4f8
+0x00000044,0x562f95f9
+0x000001c2,0xcbcbfc47
+0x000001f8,0x8bb3c481
+0x000002f1,0x5eb9554d
+0x0000007e,0x3cd4d757
+0x000003c2,0xf24687c9
+0x00000208,0x22fe40f5
+0x000001c6,0xbd394a93
+0x00000207,0x9f8abb23
+0x0000003e,0x3b084161
+0x00000310,0x5dd566f4
+0x0000038e,0x93cfc737
+0x0000015d,0x248175bd
+0x0000009c,0x06a757d2
+0x00000220,0x6298f764
+0x000001cd,0xb7493bd3
+0x00000340,0xcab3638c
+0x0000016a,0xba6f41df
+0x000000de,0xb583bd95
+0x000000b3,0x55ee3276
+0x00000105,0xe60a6ea0
+0x00000292,0xfa17da23
+0x000001c7,0xc02731ee
+0x0000039b,0x314a1f3f
+0x00000324,0xaa0e0330
+0x000003ef,0x5606084c
+0x000000bb,0x3f139afc
+0x0000027d,0x04af2287
+0x0000025f,0x1ddf8f9e
+0x00000242,0x0f8a411a
+0x000003c1,0xc6518d07
+0x00000303,0x465f710f
+0x0000002f,0xed4d052e
+0x0000024b,0x1343c957
+0x00000393,0x5c0a5fe4
+0x000002c5,0x0465e58d
+0x0000009e,0x2c09e0a8
+0x00000008,0x853a3b86
+0x0000009f,0x0bb6972d
+0x000002d3,0x961b173f
+0x000000f0,0x9756f025
+0x00000245,0x5d446cad
+0x000001b9,0xdd7862a5
+0x000002b5,0xc2d1e49b
+0x00000090,0x5dcb1a93
+0x0000019b,0x17c5f622
+0x0000007f,0x2048e019
+0x000000bf,0xe575efda
+0x00000050,0x29f4ff94
+0x00000326,0x5af3c8fa
+0x0000019e,0xae8ad590
+0x000002ad,0x4325de4c
+0x0000030c,0xd00530a4
+0x000002cc,0xf3f7fcf7
+0x0000032c,0xbc35a67f
+0x0000016c,0x2ad3a928
+0x000000f8,0x4ddf8b47
+0x0000004c,0x349d1982
+0x0000039f,0x8e06d477
+0x000003ee,0x67e0cbc5
+0x0000021b,0xbbbd1879
+0x0000004a,0x2c55a027
+0x000002a2,0x2634f218
+0x000003cf,0x3d73d279
+0x000000eb,0x6e78b973
+0x0000021e,0xdd01d278
+0x00000127,0xd6fd8840
+0x00000059,0xadb7fd4a
+0x000002ec,0x3ced1d8a
+0x0000038f,0x1dad4ff2
+0x000002b3,0xa7b024d8
+0x000002d6,0xead8cd71
+0x00000184,0x1ee19ab9
+0x00000120,0xa362afd8
+0x00000260,0xb2d1429d
+0x00000005,0x6aa3df1a
+0x00000308,0x1ebb5208
+0x000000ff,0xa9df5014
+0x00000087,0x32938571
+0x00000126,0x59d446f1
+0x00000293,0xde9d5490
+0x0000029c,0xa779710c
+0x000002a9,0x62c7737d
+0x000003bc,0xa800328e
+0x000002ca,0xd5b99fa7
+0x00000249,0x06ec5c2f
+0x000003ff,0x588a9bdc
+0x00000371,0x33386477
+0x000000c4,0x2feeb727
+0x000000fd,0xe950d114
+0x0000033e,0xcccfdb62
+0x000003af,0x9cbb4ec8
+0x00000045,0x4e91087f
+0x000000ea,0xfa9c9aac
+0x000000be,0x6af216ea
+0x0000010c,0x632ae74f
+0x000001e2,0xb101bded
+0x0000020a,0x7a41e224
+0x00000238,0x4c4b3a6e
+0x00000158,0xb7328634
+0x000001be,0xe102181b
+0x000003a9,0x5c529dd4
+0x000003a7,0xec370158
+0x00000108,0x889d6ebe
+0x0000025a,0x95906629
+0x00000350,0xcf1cd4e7
+0x000001f5,0x7c295b29
+0x000003bb,0xeae747ee
+0x00000349,0x82ae2057
+0x000003b9,0x7a249f88
+0x000001b0,0x2563614b
+0x00000174,0x325549b4
+0x000000e3,0x6e51ae06
+0x00000006,0xb845331b
+0x000003b0,0xbf77e74d
+0x00000322,0x444d330a
+0x00000191,0x83d91767
+0x000000ad,0x477af9ba
+0x0000021f,0xc071f857
+0x0000008f,0x3e8a5d1a
+0x0000029b,0x9b0431fd
+0x000002a8,0xcebb2ae6
+0x00000165,0x1a2c3cd4
+0x00000157,0x1629e3fe
+0x000003d1,0x9dc7627c
+0x00000121,0xe1ff1567
+0x0000007b,0x51610f6c
+0x000000d1,0x7d5918b4
+0x000002b8,0x74025419
+0x00000395,0x6001080f
+0x000000b7,0xbdfa2c1a
+0x000001b1,0xe0b9b238
+0x0000038d,0xae0f8b86
+0x00000291,0x7d585c4d
+0x0000018f,0x7381b5ac
+0x000002ba,0xc3081096
+0x000000ce,0x7f15786e
+0x0000022e,0x424bc3fc
+0x0000006c,0xee2cc7a7
+0x000001a6,0xb3b219a0
+0x0000034d,0x9a9e73fe
+0x00000118,0x63d7a149
+0x0000026d,0x1d6d3ea4
+0x00000162,0x44b271c9
+0x00000241,0xd6da9f44
+0x0000030b,0x3f8a78d2
+0x000001ba,0xd2c0fe52
+0x000000bd,0x723c8910
+0x00000234,0xbf48bec5
+0x000001b8,0xa7874edb
+0x00000226,0xdd6aa284
+0x00000144,0xe7d2458c
+0x00000012,0xd6d8c04a
+0x0000017a,0xf6ae9915
+0x00000014,0xb00b0422
+0x000002f0,0x88041d25
+0x00000069,0x61278220
+0x0000030f,0xf2c3811d
+0x00000037,0x9d861d63
+0x000002d5,0x6c37dd6c
+0x000000c7,0xc80f3a17
+0x000001d4,0xdf893fdc
+0x000002ed,0xc5ec640d
+0x000002d1,0xa1bdec12
+0x000001ee,0x4e254439
+0x00000311,0x178b04fc
+0x0000019a,0x8da6f6b4
+0x000003e8,0x82797f9b
+0x00000152,0x9b9c0438
+0x0000014c,0x909b71e7
+0x000002b9,0x57eefc02
+0x00000279,0x21f30dd9
+0x0000013b,0x282ecf47
+0x0000012b,0xb8240d6c
+0x00000272,0x56ce2bbb
+0x00000040,0xba39ad57
+0x0000026c,0x35592672
+0x000003e3,0x4eb09c06
+0x000003f3,0x341149c8
+0x000001e3,0xe89b8254
+0x000002c0,0x2dd5663b
+0x0000009a,0x9e6c6b56
+0x00000021,0x12fd7034
+0x00000387,0xa027ea96
+0x000001ce,0x8af8e5c7
+0x000003f0,0x4d341384
+0x00000267,0xd2f19763
+0x0000018d,0x765671c5
+0x000000a1,0xaf382a64
+0x00000221,0x7fd9a647
+0x000002fe,0x7d1b99ca
+0x00000277,0x4db2b052
+0x000003dc,0xd5a05d52
+0x00000058,0x4ccbf2d4
+0x000000c0,0x33761998
+0x00000254,0xa34acbad
+0x000001d5,0xe2064af6
+0x0000021d,0x33c319ae
+0x00000083,0x8af6070c
+0x00000062,0x2c5c3595
+0x000001b5,0x53c1a11a
+0x00000365,0xb7641db3
+0x000000fa,0x8f168750
+0x000003d7,0x62700567
+0x0000035e,0x30cf6a3c
+0x00000297,0xaece2cae
+0x0000030e,0x4a431c09
+0x000001f2,0xb088d216
+0x0000023a,0x96ce06c1
+0x00000176,0x8a9abb34
+0x00000115,0x311a4837
+0x0000031e,0x5e85164b
+0x00000047,0x00a0eeb5
+0x000001a3,0xf84eca18
+0x000000f2,0x2cbee27a
+0x0000014e,0x2f191aee
+0x000000b9,0x3b12a538
+0x0000026b,0x472f6ac7
+0x00000233,0xa4337bf2
+0x00000052,0xf6959222
+0x000002b7,0xfe1c9ccf
+0x0000024a,0x4e6efdf3
+0x00000218,0x5496f22a
+0x0000024d,0xcfdaf597
+0x00000009,0x17453936
+0x0000032a,0x8c570977
+0x00000313,0xa5a96add
+0x00000179,0xfb73e3f9
+0x00000180,0x31c27b51
+0x000002fd,0x2d15a0cd
+0x0000012f,0xc475fc25
+0x000002a5,0xdf36df3c
+0x000003be,0xbccd34d7
+0x00000140,0x3f11ac6c
+0x000002af,0x4f8f60de
+0x00000200,0x6b3ed957
+0x00000372,0x3134dec0
+0x000002bb,0x4677e498
+0x000000a9,0xc657ff78
+0x00000351,0xdaa8f98a
+0x00000170,0x865000f4
+0x000001c1,0x0daad104
+0x000003f6,0x54381f7c
+0x00000282,0xae0de4fd
+0x0000039d,0x930925cc
+0x0000020b,0xc04f5b79
+0x00000007,0xe60b3af3
+0x0000027f,0xcb230d4d
+0x0000034f,0x6e94cf18
+0x00000026,0x979b5acb
+0x000002b2,0xd69292e1
+0x00000137,0x298e7abb
+0x000001dc,0xaab2730d
+0x0000020d,0x50b57ca0
+0x00000298,0x5f9276c5
+0x0000022a,0x63391f81
+0x00000314,0x00f61207
+0x0000030a,0x1d94bc11
+0x0000017e,0xece45f2a
+0x00000196,0x3376ea4f
+0x00000212,0x394c781e
+0x0000015b,0xaa45c8e4
+0x00000262,0xee189cde
+0x000000b2,0x0cf81cad
+0x000003e5,0x6a469268
+0x00000122,0x0fe0bf0f
+0x000003ca,0x299b8fa4
+0x0000035a,0x18a3db69
+0x00000266,0x0387ddf8
+0x000001c8,0x76d2e831
+0x000001d0,0xd3132dbd
+0x00000206,0xfa3a0f73
+0x00000373,0x837f807f
+0x00000015,0x8f88a8ab
+0x00000386,0x3c2ee566
+0x000003f7,0x56d51358
+0x00000223,0xe644ada5
+0x000002f5,0x8f20bddd
+0x0000011b,0xa72467ed
+0x000001ef,0x0eeb3073
+0x000000d2,0xfceccbcc
+0x000002f2,0x722f9ca7
+0x000002ac,0x134a4fbb
+0x00000301,0x3cc98027
+0x0000014f,0xfbec5bf5
+0x000003d3,0x628b4ab9
+0x000001f7,0xf8e6291c
+0x000002d0,0x326e58d8
+0x000001ae,0x78256d64
+0x00000057,0x403e8c61
+0x0000008a,0x1d7338bc
+0x0000013e,0x1e11634f
+0x000002de,0xaf0f3eb1
+0x00000199,0xae5c4f27
+0x00000102,0xb0bac110
+0x000003c6,0x485052b8
+0x0000016e,0x5a38a789
+0x00000252,0x54542916
+0x00000002,0x39b180bd
+0x00000250,0x230f1e18
+0x000002db,0x1da31dad
+0x0000018e,0xbf914fb7
+0x000003c0,0x63f13c95
+0x00000369,0xbc276edd
+0x00000288,0xf2e5c78c
+0x00000211,0x8be09b81
+0x00000193,0xa9dd3901
+0x000003d6,0x564698ff
+0x000000dd,0x654586cd
+0x0000013d,0x17f8cdf8
+0x000003dd,0xf55532d7
+0x000000e1,0xd21d0301
+0x00000344,0x8e5c90b7
+0x00000347,0xce2ea106
+0x000002b6,0xe1456d48
+0x00000096,0x1690a90b
+0x000002e4,0xbfcf1ee5
+0x00000284,0x9bbd41e7
+0x00000376,0x0a1b239d
+0x00000194,0xb8c0425c
+0x000001cf,0x5a5f67b8
+0x0000019c,0x8c1365bf
+0x00000269,0x7ccc3095
+0x000001fe,0xeff19023
+0x00000028,0x4c6c96e1
+0x00000075,0x6031fafc
+0x000000e7,0x6d066ff7
+0x00000213,0x83a12826
+0x000003a1,0xdff42be1
+0x000000d6,0x74721815
+0x00000094,0x5e9436ba
+0x000003ec,0x0f91f2dd
+0x00000167,0x2f5d9a5c
+0x000001bb,0x41b606be
+0x00000222,0x455c3f18
+0x0000038c,0x41938755
+0x00000034,0xf3e676b0
+0x00000031,0xb3b105f0
+0x00000043,0xaebbd49c
+0x00000079,0x32e9f285
+0x00000130,0x90514309
+0x00000253,0xda947617
+0x000003c7,0x3a454ada
+0x00000131,0x6d9bb5fa
+0x00000278,0x5c094e3b
+0x00000256,0x19969979
+0x00000357,0x8f966e1a
+0x00000185,0xba65d2f8
+0x00000263,0x19417509
+0x000000db,0x1eaadeab
+0x00000232,0xef46402c
+0x00000240,0x215c8ec5
+0x000003b7,0x1d7489dd
+0x00000086,0xa9d49edc
+0x000000f5,0xb85554dc
+0x00000099,0x5d964de7
+0x000003fd,0x6722db64
+0x00000312,0x829a6b53
+0x000001fd,0xf5abdab3
+0x000002c1,0xe5662f67
+0x000003c5,0x2a5f11dd
+0x000002ee,0x69769d62
+0x000001d6,0x76155587
+0x0000016f,0xf474b542
+0x000003b2,0x0e03d080
+0x0000006d,0xcce03f59
+0x00000244,0xae332051
+0x00000366,0xa5d17a7b
+0x0000022b,0x0bb8f348
+0x00000150,0xd9d81433
+0x000000f9,0x06f2b1ca
+0x000000ed,0x10f3cbb5
+0x0000028a,0xb92f3a08
+0x0000038b,0x2e7839fc
+0x0000036d,0x8a55891d
+0x00000306,0x004434c7
+0x0000027a,0x2c2fe04e
+0x0000010a,0x23317858
+0x000003cd,0xb2e47f17
+0x000003a0,0xed86f8e0
+0x0000013a,0xf295d26e
+0x00000042,0xbd71be15
+0x0000016b,0xf52fcd29
+0x000003a3,0x80b123f1
+0x000003f4,0xc7b11df1
+0x000003ad,0x93a20006
+0x000003eb,0x52d781ac
+0x000002b1,0x6a5e69a3
+0x0000011a,0x0f63315c
+0x00000117,0x33659391
+0x00000051,0x1a05f763
+0x0000009b,0xde4ef3eb
+0x00000161,0xcd7d1638
+0x000001ec,0xee053da6
+0x000001ca,0x42044d3f
+0x000001de,0xcd1d0123
+0x0000028f,0x51f7b994
+0x000003bf,0x108f008e
+0x00000032,0x1ec7d1cb
+0x00000124,0xab39af81
+0x00000189,0x1678f062
+0x000002cd,0x510dc040
+0x00000331,0x4d33628a
+0x00000229,0x944e008a
+0x00000341,0x5a7a5372
+0x00000166,0xa92e5b7d
+0x00000337,0x174610f0
+0x000002e3,0x38695e89
+0x0000023e,0x96675159
+0x000003da,0x64ab9558
+0x00000097,0x3f86cd0f
+0x000000fb,0x107b81b4
+0x000002eb,0x7fd9a144
+0x00000039,0x30a2b470
+0x00000145,0x5f730f7d
+0x0000011c,0x966cf066
+0x00000275,0x21d87efc
+0x00000202,0xa470e81e
+0x00000380,0x7f1c9cfe
+0x000002a6,0x83ef8a4a
+0x00000392,0x7f080fa5
+0x00000304,0x24a98eb9
+0x000003f8,0xd1a6e7cb
+0x00000224,0xc8497258
+0x000003a6,0x6b304020
+0x00000201,0x30733eea
+0x0000004e,0xe5996b9a
+0x00000383,0x11421776
+0x000000cf,0x35b5d61d
+0x00000367,0x630ff9ae
+0x000001aa,0xbee7db59
+0x0000021c,0x341d6960
+0x00000283,0x89c0976d
+0x00000070,0x599deb7b
+0x000003a5,0xb79a547e
+0x000003aa,0x990a9aeb
+0x00000399,0xd867d08c
+0x000001b6,0x9c822c9e
+0x00000153,0x05f7124a
+0x00000296,0x1014c48d
+0x000002e5,0xc948f761
+0x00000190,0x74483f9b
+0x00000077,0x805f10a1
+0x00000280,0xb6fdd0fe
+0x000000c8,0xb7147ac4
+0x000003e9,0x39daa1ed
+0x00000259,0xc3d82fe5
+0x000002e9,0x1781b102
+0x000002aa,0xfb0674fd
+0x00000396,0xa2f79ac4
+0x00000353,0x796131a3
+0x00000294,0x3a4f253c
+0x00000268,0x8819efeb
+0x00000364,0xbbac1595
+0x000003f5,0x6468a5f3
+0x00000103,0xd39a7cf1
+0x00000004,0x2d311b72
+0x000002cf,0xbeda8a15
+0x0000031f,0x92221439
+0x00000374,0xb790d8a9
+0x0000020c,0x45db5760
+0x000003e2,0x6332d00f
+0x0000028b,0xcfb7b189
+0x000001c9,0x6380385f
+0x0000037c,0x443714c5
+0x00000397,0x3a84c720
+0x00000377,0x6cd3807c
+0x000000a7,0xeed1718a
+0x00000168,0xb2d6b00f
+0x000000ba,0xd4143f6b
+0x000001e6,0x70a5cba8
+0x0000003a,0x6db46c23
+0x00000138,0x8b1cbc57
+0x00000327,0x1417447b
+0x0000023d,0xf824f9fe
+0x000000b0,0xa4afe544
+0x0000024e,0xcdeefb90
+0x00000156,0x9116659d
+0x00000095,0xbec1d9ff
+0x000003ea,0x418440ad
+0x000001e4,0x60b154d8
+0x00000381,0x05bee376
+0x0000023c,0x56313370
+0x000000df,0x2271ed24
+0x00000237,0x5e79fb1a
+0x0000034c,0x00a28d23
+0x00000307,0x9a60280a
+0x00000287,0x4aebb908
+0x0000036f,0x15b250b7
+0x000003e7,0xc03cd972
+0x0000022f,0x07b8b4f6
+0x00000295,0xba38ebb6
+0x00000210,0xba34a72a
+0x000001f9,0x3f3d8c6d
+0x000001fa,0xeec12a22
+0x00000151,0xcf258683
+0x0000012c,0x52c63dee
+0x00000181,0x7a1b33cb
+0x0000014a,0x87b6f8b2
+0x0000008e,0x18c0f3a6
diff --git a/test/benchmark.py b/test/benchmark.py
new file mode 100755 (executable)
index 0000000..2a71126
--- /dev/null
@@ -0,0 +1,267 @@
+#!/usr/bin/env python3
+
+# This file is Copyright (c) 2020 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+import csv
+import logging
+import argparse
+from operator import and_
+from functools import reduce
+from itertools import zip_longest
+
+from migen import *
+from migen.genlib.misc import WaitTimer
+
+from litex.build.sim.config import SimConfig
+
+from litex.soc.interconnect.csr import *
+from litex.soc.integration.soc_sdram import *
+from litex.soc.integration.builder import *
+
+from litex.tools.litex_sim import SimSoC
+
+from litedram.frontend.bist import _LiteDRAMBISTGenerator, _LiteDRAMBISTChecker
+from litedram.frontend.bist import _LiteDRAMPatternGenerator, _LiteDRAMPatternChecker
+
+# LiteDRAM Benchmark SoC ---------------------------------------------------------------------------
+
+class LiteDRAMBenchmarkSoC(SimSoC):
+    def __init__(self,
+        mode             = "bist",
+        sdram_module     = "MT48LC16M16",
+        sdram_data_width = 32,
+        bist_base        = 0x0000000,
+        bist_end         = 0x0100000,
+        bist_length      = 1024,
+        bist_random      = False,
+        bist_alternating = False,
+        num_generators   = 1,
+        num_checkers     = 1,
+        access_pattern   = None,
+        **kwargs):
+        assert mode in ["bist", "pattern"]
+        assert not (mode == "pattern" and access_pattern is None)
+
+        # SimSoC -----------------------------------------------------------------------------------
+        SimSoC.__init__(self,
+            with_sdram       = True,
+            sdram_module     = sdram_module,
+            sdram_data_width = sdram_data_width,
+            **kwargs
+        )
+
+        # BIST/Pattern Generator / Checker ---------------------------------------------------------
+        if mode == "pattern":
+            make_generator = lambda: _LiteDRAMPatternGenerator(self.sdram.crossbar.get_port(), init=access_pattern)
+            make_checker   = lambda: _LiteDRAMPatternChecker(self.sdram.crossbar.get_port(),   init=access_pattern)
+        if mode == "bist":
+            make_generator = lambda: _LiteDRAMBISTGenerator(self.sdram.crossbar.get_port())
+            make_checker   = lambda: _LiteDRAMBISTChecker(self.sdram.crossbar.get_port())
+
+        generators = [make_generator() for _ in range(num_generators)]
+        checkers   = [make_checker()   for _ in range(num_checkers)]
+        self.submodules += generators + checkers
+
+        if mode == "pattern":
+            def bist_config(module):
+                return []
+
+            if not bist_alternating:
+                address_set = set()
+                for addr, _ in access_pattern:
+                    assert addr not in address_set, \
+                        "Duplicate address 0x%08x in access_pattern, write will overwrite previous value!" % addr
+                    address_set.add(addr)
+        if mode == "bist":
+            # Make sure that we perform at least one access
+            bist_length = max(bist_length, self.sdram.controller.interface.data_width // 8)
+            def bist_config(module):
+                return [
+                    module.base.eq(bist_base),
+                    module.end.eq(bist_end),
+                    module.length.eq(bist_length),
+                    module.random_addr.eq(bist_random),
+                ]
+
+            assert not (bist_random and not bist_alternating), \
+                "Write to random address may overwrite previously written data before reading!"
+
+            # Check address correctness
+            assert bist_end > bist_base
+            assert bist_end <= 2**(len(generators[0].end)) - 1, "End address outside of range"
+            bist_addr_range = bist_end - bist_base
+            assert bist_addr_range > 0 and bist_addr_range & (bist_addr_range - 1) == 0, \
+                "Length of the address range must be a power of 2"
+
+        def combined_read(modules, signal, operator):
+            sig = Signal()
+            self.comb += sig.eq(reduce(operator, (getattr(m, signal) for m in modules)))
+            return sig
+
+        def combined_write(modules, signal):
+            sig = Signal()
+            self.comb += [getattr(m, signal).eq(sig) for m in modules]
+            return sig
+
+        # Sequencer --------------------------------------------------------------------------------
+        class LiteDRAMCoreControl(Module, AutoCSR):
+            def __init__(self):
+                self.init_done  = CSRStorage()
+                self.init_error = CSRStorage()
+        self.submodules.ddrctrl = ddrctrl = LiteDRAMCoreControl()
+        self.add_csr("ddrctrl")
+
+        display = Signal()
+        finish  = Signal()
+        self.submodules.fsm = fsm = FSM(reset_state="WAIT-INIT")
+        fsm.act("WAIT-INIT",
+            If(self.ddrctrl.init_done.storage, # Written by CPU when initialization is done
+                NextState("BIST-GENERATOR")
+            )
+        )
+        if bist_alternating:
+            # Force generators to wait for checkers and vice versa. Connect them in pairs, with each
+            # unpaired connected to the first of the others.
+            bist_connections = []
+            for generator, checker in zip_longest(generators, checkers):
+                g = generator or generators[0]
+                c = checker   or checkers[0]
+                bist_connections += [
+                    g.run_cascade_in.eq(c.run_cascade_out),
+                    c.run_cascade_in.eq(g.run_cascade_out),
+                ]
+
+            fsm.act("BIST-GENERATOR",
+                combined_write(generators + checkers, "start").eq(1),
+                *bist_connections,
+                *map(bist_config, generators + checkers),
+                If(combined_read(checkers, "done", and_),
+                    NextState("DISPLAY")
+                )
+            )
+        else:
+            fsm.act("BIST-GENERATOR",
+                combined_write(generators, "start").eq(1),
+                *map(bist_config, generators),
+                If(combined_read(generators, "done", and_),
+                    NextState("BIST-CHECKER")
+                )
+            )
+            fsm.act("BIST-CHECKER",
+                combined_write(checkers, "start").eq(1),
+                *map(bist_config, checkers),
+                If(combined_read(checkers, "done", and_),
+                    NextState("DISPLAY")
+                )
+            )
+        fsm.act("DISPLAY",
+            display.eq(1),
+            NextState("FINISH")
+        )
+        fsm.act("FINISH",
+            finish.eq(1)
+        )
+
+        # Simulation Results -----------------------------------------------------------------------
+        def max_signal(signals):
+            signals = iter(signals)
+            s       = next(signals)
+            out     = Signal(len(s))
+            self.comb += out.eq(s)
+            for curr in signals:
+                prev = out
+                out = Signal(max(len(prev), len(curr)))
+                self.comb +=  If(prev > curr, out.eq(prev)).Else(out.eq(curr))
+            return out
+
+        generator_ticks = max_signal((g.ticks  for g in generators))
+        checker_errors  = max_signal((c.errors for c in checkers))
+        checker_ticks   = max_signal((c.ticks  for c in checkers))
+
+        self.sync += [
+            If(display,
+                Display("BIST-GENERATOR ticks:  %08d", generator_ticks),
+                Display("BIST-CHECKER errors:   %08d", checker_errors),
+                Display("BIST-CHECKER ticks:    %08d", checker_ticks),
+            )
+        ]
+
+        # Simulation End ---------------------------------------------------------------------------
+        end_timer = WaitTimer(2**16)
+        self.submodules += end_timer
+        self.comb += end_timer.wait.eq(finish)
+        self.sync += If(end_timer.done, Finish())
+
+# Build --------------------------------------------------------------------------------------------
+
+def load_access_pattern(filename):
+    with open(filename, newline="") as f:
+        reader = csv.reader(f)
+        access_pattern = [(int(addr, 0), int(data, 0)) for addr, data in reader]
+    return access_pattern
+
+def main():
+    parser = argparse.ArgumentParser(description="LiteDRAM Benchmark SoC Simulation")
+    builder_args(parser)
+    soc_sdram_args(parser)
+    parser.add_argument("--threads",          default=1,              help="Set number of threads (default=1)")
+    parser.add_argument("--sdram-module",     default="MT48LC16M16",  help="Select SDRAM chip")
+    parser.add_argument("--sdram-data-width", default=32,             help="Set SDRAM chip data width")
+    parser.add_argument("--sdram-verbosity",  default=0,              help="Set SDRAM checker verbosity")
+    parser.add_argument("--trace",            action="store_true",    help="Enable VCD tracing")
+    parser.add_argument("--trace-start",      default=0,              help="Cycle to start VCD tracing")
+    parser.add_argument("--trace-end",        default=-1,             help="Cycle to end VCD tracing")
+    parser.add_argument("--opt-level",        default="O0",           help="Compilation optimization level")
+    parser.add_argument("--bist-base",        default="0x00000000",   help="Base address of the test (default=0)")
+    parser.add_argument("--bist-length",      default="1024",         help="Length of the test (default=1024)")
+    parser.add_argument("--bist-random",      action="store_true",    help="Use random data during the test")
+    parser.add_argument("--bist-alternating", action="store_true",    help="Perform alternating writes/reads (WRWRWR... instead of WWW...RRR...)")
+    parser.add_argument("--num-generators",   default=1,              help="Number of BIST generators")
+    parser.add_argument("--num-checkers",     default=1,              help="Number of BIST checkers")
+    parser.add_argument("--access-pattern",                           help="Load access pattern (address, data) from CSV (ignores --bist-*)")
+    parser.add_argument("--log-level",        default="info",         help="Set logging verbosity",
+        choices=["critical", "error", "warning", "info", "debug"])
+    args = parser.parse_args()
+
+    root_logger = logging.getLogger()
+    root_logger.setLevel(getattr(logging, args.log_level.upper()))
+
+    soc_kwargs     = soc_sdram_argdict(args)
+    builder_kwargs = builder_argdict(args)
+
+    sim_config = SimConfig(default_clk="sys_clk")
+    sim_config.add_module("serial2console", "serial")
+
+    # Configuration --------------------------------------------------------------------------------
+    soc_kwargs["uart_name"]        = "sim"
+    soc_kwargs["sdram_module"]     = args.sdram_module
+    soc_kwargs["sdram_data_width"] = int(args.sdram_data_width)
+    soc_kwargs["sdram_verbosity"]  = int(args.sdram_verbosity)
+    soc_kwargs["bist_base"]        = int(args.bist_base, 0)
+    soc_kwargs["bist_length"]      = int(args.bist_length, 0)
+    soc_kwargs["bist_random"]      = args.bist_random
+    soc_kwargs["bist_alternating"] = args.bist_alternating
+    soc_kwargs["num_generators"]   = int(args.num_generators)
+    soc_kwargs["num_checkers"]     = int(args.num_checkers)
+
+    if args.access_pattern:
+        soc_kwargs["access_pattern"] = load_access_pattern(args.access_pattern)
+
+    # SoC ------------------------------------------------------------------------------------------
+    soc = LiteDRAMBenchmarkSoC(mode="pattern" if args.access_pattern else "bist", **soc_kwargs)
+
+    # Build/Run ------------------------------------------------------------------------------------
+    builder_kwargs["csr_csv"] = "csr.csv"
+    builder = Builder(soc, **builder_kwargs)
+    vns = builder.build(
+        threads     = args.threads,
+        sim_config  = sim_config,
+        opt_level   = args.opt_level,
+        trace       = args.trace,
+        trace_start = int(args.trace_start),
+        trace_end   = int(args.trace_end)
+    )
+
+if __name__ == "__main__":
+    main()
diff --git a/test/benchmarks.yml b/test/benchmarks.yml
new file mode 100644 (file)
index 0000000..a77421b
--- /dev/null
@@ -0,0 +1,1047 @@
+# ============================================================
+# Auto-generated on 2020-02-12 15:41:38 by ./gen_config.py
+# ------------------------------------------------------------
+# {'name_format': 'test_%d',
+#  'sdram_module': ['MT41K128M16', 'MT46V32M16', 'MT48LC16M16'],
+#  'sdram_data_width': [32],
+#  'bist_alternating': [True, False],
+#  'bist_length': [1, 1024],
+#  'bist_random': [True, False],
+#  'num_generators': [1, 3],
+#  'num_checkers': [1, 3],
+#  'access_pattern': ['access_pattern.csv']}
+# ============================================================
+{
+    "test_0": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_1": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_2": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_3": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_4": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_5": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_6": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_7": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_8": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_9": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_10": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_11": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_12": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_13": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_14": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_15": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_16": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_17": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_18": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_19": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_20": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_21": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_22": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_23": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_24": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_25": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_26": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_27": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_28": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_29": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_30": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_31": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_32": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_33": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_34": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_35": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_36": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_37": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_38": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_39": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_40": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_41": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_42": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_43": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_44": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_45": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_46": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_47": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_48": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_49": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_50": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_51": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_52": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_53": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_54": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_55": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_56": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_57": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_58": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_59": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_60": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": true
+        }
+    },
+    "test_61": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_62": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": true
+        }
+    },
+    "test_63": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_64": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_65": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_66": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_67": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_68": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_69": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_70": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1,
+            "bist_random": false
+        }
+    },
+    "test_71": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "bist_length": 1024,
+            "bist_random": false
+        }
+    },
+    "test_72": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_73": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_74": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_75": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_76": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_77": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_78": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_79": {
+        "sdram_module": "MT41K128M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_80": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_81": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_82": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_83": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_84": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_85": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_86": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_87": {
+        "sdram_module": "MT46V32M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_88": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_89": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_90": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_91": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": true,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_92": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_93": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 1,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_94": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 1,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    },
+    "test_95": {
+        "sdram_module": "MT48LC16M16",
+        "sdram_data_width": 32,
+        "bist_alternating": false,
+        "num_generators": 3,
+        "num_checkers": 3,
+        "access_pattern": {
+            "pattern_file": "access_pattern.csv"
+        }
+    }
+}
diff --git a/test/common.py b/test/common.py
new file mode 100644 (file)
index 0000000..2fb0416
--- /dev/null
@@ -0,0 +1,647 @@
+# This file is Copyright (c) 2016-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2016 Tim 'mithro' Ansell <mithro@mithis.com>
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import os
+import random
+import itertools
+from functools import partial
+from operator import or_
+
+from migen import *
+
+
+def seed_to_data(seed, random=True, nbits=32):
+    if nbits == 32:
+        if random:
+            return (seed * 0x31415979 + 1) & 0xffffffff
+        else:
+            return seed
+    else:
+        assert nbits%32 == 0
+        data = 0
+        for i in range(nbits//32):
+            data = data << 32
+            data |= seed_to_data(seed*nbits//32 + i, random, 32)
+        return data
+
+
+@passive
+def timeout_generator(ticks):
+    # raise exception after given timeout effectively stopping simulation
+    # because of @passive, simulation can end even if this generator is still running
+    for _ in range(ticks):
+        yield
+    raise TimeoutError("Timeout after %d ticks" % ticks)
+
+
+class NativePortDriver:
+    """Generates sequences for reading/writing to LiteDRAMNativePort
+
+    The write/read versions with wait_data=False are a cheap way to perform
+    burst during which the port is being held locked, but this way all the
+    data is being lost (would require separate coroutine to handle data).
+    """
+    def __init__(self, port):
+        self.port = port
+
+    def read(self, address, wait_data=True):
+        yield self.port.cmd.valid.eq(1)
+        yield self.port.cmd.we.eq(0)
+        yield self.port.cmd.addr.eq(address)
+        yield
+        while (yield self.port.cmd.ready) == 0:
+            yield
+        yield self.port.cmd.valid.eq(0)
+        yield
+        if wait_data:
+            while (yield self.port.rdata.valid) == 0:
+                yield
+            data = (yield self.port.rdata.data)
+            yield self.port.rdata.ready.eq(1)
+            yield
+            yield self.port.rdata.ready.eq(0)
+            yield
+            return data
+        else:
+            yield self.port.rdata.ready.eq(1)
+
+    def write(self, address, data, we=None, wait_data=True):
+        if we is None:
+            we = 2**self.port.wdata.we.nbits - 1
+        yield self.port.cmd.valid.eq(1)
+        yield self.port.cmd.we.eq(1)
+        yield self.port.cmd.addr.eq(address)
+        yield
+        while (yield self.port.cmd.ready) == 0:
+            yield
+        yield self.port.cmd.valid.eq(0)
+        yield self.port.wdata.valid.eq(1)
+        yield self.port.wdata.data.eq(data)
+        yield self.port.wdata.we.eq(we)
+        yield
+        if wait_data:
+            while (yield self.port.wdata.ready) == 0:
+                yield
+            yield self.port.wdata.valid.eq(0)
+            yield
+
+
+class CmdRequestRWDriver:
+    """Simple driver for Endpoint(cmd_request_rw_layout())"""
+    def __init__(self, req, i=0, ep_layout=True, rw_layout=True):
+        self.req = req
+        self.rw_layout = rw_layout  # if False, omit is_* signals
+        self.ep_layout = ep_layout  # if False, omit endpoint signals (valid, etc.)
+
+        # used to distinguish commands
+        self.i = self.bank = self.row = self.col = i
+
+    def request(self, char):
+        # convert character to matching command invocation
+        return {
+            "w": self.write,
+            "r": self.read,
+            "W": partial(self.write, auto_precharge=True),
+            "R": partial(self.read, auto_precharge=True),
+            "a": self.activate,
+            "p": self.precharge,
+            "f": self.refresh,
+            "_": self.nop,
+        }[char]()
+
+    def activate(self):
+        yield from self._drive(valid=1, is_cmd=1, ras=1, a=self.row, ba=self.bank)
+
+    def precharge(self, all_banks=False):
+        a = 0 if not all_banks else (1 << 10)
+        yield from self._drive(valid=1, is_cmd=1, ras=1, we=1, a=a, ba=self.bank)
+
+    def refresh(self):
+        yield from self._drive(valid=1, is_cmd=1, cas=1, ras=1, ba=self.bank)
+
+    def write(self, auto_precharge=False):
+        assert not (self.col & (1 << 10))
+        col = self.col | (1 << 10) if auto_precharge else self.col
+        yield from self._drive(valid=1, is_write=1, cas=1, we=1, a=col, ba=self.bank)
+
+    def read(self, auto_precharge=False):
+        assert not (self.col & (1 << 10))
+        col = self.col | (1 << 10) if auto_precharge else self.col
+        yield from self._drive(valid=1, is_read=1, cas=1, a=col, ba=self.bank)
+
+    def nop(self):
+        yield from self._drive()
+
+    def _drive(self, **kwargs):
+        signals = ["a", "ba", "cas", "ras", "we"]
+        if self.rw_layout:
+            signals += ["is_cmd", "is_read", "is_write"]
+        if self.ep_layout:
+            signals += ["valid", "first", "last"]
+        for s in signals:
+            yield getattr(self.req, s).eq(kwargs.get(s, 0))
+        # drive ba even for nop, to be able to distinguish bank machines anyway
+        if "ba" not in kwargs:
+            yield self.req.ba.eq(self.bank)
+
+
+class DRAMMemory:
+    def __init__(self, width, depth, init=[]):
+        self.width = width
+        self.depth = depth
+        self.mem = []
+        for d in init:
+            self.mem.append(d)
+        for _ in range(depth-len(init)):
+            self.mem.append(0)
+
+        # "W" enables write msgs, "R" - read msgs and "1" both
+        self._debug = os.environ.get("DRAM_MEM_DEBUG", "0")
+
+    def show_content(self):
+        for addr in range(self.depth):
+            print("0x{:08x}: 0x{:0{dwidth}x}".format(addr, self.mem[addr], dwidth=self.width//4))
+
+    def _warn(self, address):
+        if address > self.depth * self.width:
+            print("! adr > 0x{:08x}".format(
+                self.depth * self.width))
+
+    def _write(self, address, data, we):
+        mask = reduce(or_, [0xff << (8 * bit) for bit in range(self.width//8)
+                            if (we & (1 << bit)) != 0], 0)
+        data = data & mask
+        self.mem[address%self.depth] = data
+        if self._debug in ["1", "W"]:
+            print("W 0x{:08x}: 0x{:0{dwidth}x}".format(address, self.mem[address%self.depth],
+                                                       dwidth=self.width//4))
+            self._warn(address)
+
+    def _read(self, address):
+        if self._debug in ["1", "R"]:
+            print("R 0x{:08x}: 0x{:0{dwidth}x}".format(address, self.mem[address%self.depth],
+                                                       dwidth=self.width//4))
+            self._warn(address)
+        return self.mem[address%self.depth]
+
+    @passive
+    def read_handler(self, dram_port, rdata_valid_random=0):
+        address = 0
+        pending = 0
+        prng = random.Random(42)
+        yield dram_port.cmd.ready.eq(0)
+        while True:
+            yield dram_port.rdata.valid.eq(0)
+            if pending:
+                while prng.randrange(100) < rdata_valid_random:
+                    yield
+                yield dram_port.rdata.valid.eq(1)
+                yield dram_port.rdata.data.eq(self._read(address))
+                yield
+                yield dram_port.rdata.valid.eq(0)
+                yield dram_port.rdata.data.eq(0)
+                pending = 0
+            elif (yield dram_port.cmd.valid):
+                pending = not (yield dram_port.cmd.we)
+                address = (yield dram_port.cmd.addr)
+                if pending:
+                    yield dram_port.cmd.ready.eq(1)
+                    yield
+                    yield dram_port.cmd.ready.eq(0)
+            yield
+
+    @passive
+    def write_handler(self, dram_port, wdata_ready_random=0):
+        address = 0
+        pending = 0
+        prng = random.Random(42)
+        yield dram_port.cmd.ready.eq(0)
+        while True:
+            yield dram_port.wdata.ready.eq(0)
+            if pending:
+                while (yield dram_port.wdata.valid) == 0:
+                    yield
+                while prng.randrange(100) < wdata_ready_random:
+                    yield
+                yield dram_port.wdata.ready.eq(1)
+                yield
+                self._write(address, (yield dram_port.wdata.data), (yield dram_port.wdata.we))
+                yield dram_port.wdata.ready.eq(0)
+                yield
+                pending = 0
+                yield
+            elif (yield dram_port.cmd.valid):
+                pending = (yield dram_port.cmd.we)
+                address = (yield dram_port.cmd.addr)
+                if pending:
+                    yield dram_port.cmd.ready.eq(1)
+                    yield
+                    yield dram_port.cmd.ready.eq(0)
+            yield
+
+
+class MemoryTestDataMixin:
+    @property
+    def bist_test_data(self):
+        data = {
+            "8bit": dict(
+                base     = 2,
+                end      = 2 + 8,  # (end - base) must be pow of 2
+                length   = 5,
+                #                       2     3     4     5     6     7=2+5
+                expected = [0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x00],
+            ),
+            "32bit": dict(
+                base     = 0x04,
+                end      = 0x04 + 8,
+                length   = 5 * 4,
+                expected = [
+                    0x00000000,  # 0x00
+                    0x00000000,  # 0x04
+                    0x00000001,  # 0x08
+                    0x00000002,  # 0x0c
+                    0x00000003,  # 0x10
+                    0x00000004,  # 0x14
+                    0x00000000,  # 0x18
+                    0x00000000,  # 0x1c
+                ],
+            ),
+            "64bit": dict(
+                base     = 0x10,
+                end      = 0x10 + 8,
+                length   = 5 * 8,
+                expected = [
+                    0x0000000000000000,  # 0x00
+                    0x0000000000000000,  # 0x08
+                    0x0000000000000000,  # 0x10
+                    0x0000000000000001,  # 0x18
+                    0x0000000000000002,  # 0x20
+                    0x0000000000000003,  # 0x28
+                    0x0000000000000004,  # 0x30
+                    0x0000000000000000,  # 0x38
+                ],
+            ),
+            "32bit_masked": dict(
+                base     = 0x04,
+                end      = 0x04 + 0x04,  # TODO: fix address masking to be consistent
+                length   = 6 * 4,
+                expected = [  # due to masking
+                    0x00000000,  # 0x00
+                    0x00000004,  # 0x04
+                    0x00000005,  # 0x08
+                    0x00000002,  # 0x0c
+                    0x00000003,  # 0x10
+                    0x00000000,  # 0x14
+                    0x00000000,  # 0x18
+                    0x00000000,  # 0x1c
+                ],
+            ),
+        }
+        data["32bit_long_sequential"] = dict(
+            base     = 16,
+            end      = 16 + 128,
+            length   = 64,
+            expected = [0x00000000] * 128
+        )
+        expected = data["32bit_long_sequential"]["expected"]
+        expected[16//4:(16 + 64)//4] = list(range(64//4))
+        return data
+
+    @property
+    def pattern_test_data(self):
+        data = {
+            "8bit": dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0xaa),
+                    (0x05, 0xbb),
+                    (0x02, 0xcc),
+                    (0x07, 0xdd),
+                ],
+                expected=[
+                    # data, address
+                    0xaa,  # 0x00
+                    0x00,  # 0x01
+                    0xcc,  # 0x02
+                    0x00,  # 0x03
+                    0x00,  # 0x04
+                    0xbb,  # 0x05
+                    0x00,  # 0x06
+                    0xdd,  # 0x07
+                ],
+            ),
+            "32bit": dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0xabadcafe),
+                    (0x07, 0xbaadf00d),
+                    (0x02, 0xcafefeed),
+                    (0x01, 0xdeadc0de),
+                ],
+                expected=[
+                    # data, address
+                    0xabadcafe,  # 0x00
+                    0xdeadc0de,  # 0x04
+                    0xcafefeed,  # 0x08
+                    0x00000000,  # 0x0c
+                    0x00000000,  # 0x10
+                    0x00000000,  # 0x14
+                    0x00000000,  # 0x18
+                    0xbaadf00d,  # 0x1c
+                ],
+            ),
+            "64bit": dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0x0ddf00dbadc0ffee),
+                    (0x05, 0xabadcafebaadf00d),
+                    (0x02, 0xcafefeedfeedface),
+                    (0x07, 0xdeadc0debaadbeef),
+                ],
+                expected=[
+                    # data, address
+                    0x0ddf00dbadc0ffee,  # 0x00
+                    0x0000000000000000,  # 0x08
+                    0xcafefeedfeedface,  # 0x10
+                    0x0000000000000000,  # 0x18
+                    0x0000000000000000,  # 0x20
+                    0xabadcafebaadf00d,  # 0x28
+                    0x0000000000000000,  # 0x30
+                    0xdeadc0debaadbeef,  # 0x38
+                ],
+            ),
+            "64bit_to_32bit": dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0x0d15ea5e00facade),
+                    (0x05, 0xabadcafe8badf00d),
+                    (0x01, 0xcafefeedbaadf00d),
+                    (0x02, 0xfee1deaddeadc0de),
+                ],
+                expected=[
+                    # data, word, address
+                    0x00facade,  #  0 0x00
+                    0x0d15ea5e,  #  1 0x04
+                    0xbaadf00d,  #  2 0x08
+                    0xcafefeed,  #  3 0x0c
+                    0xdeadc0de,  #  4 0x10
+                    0xfee1dead,  #  5 0x14
+                    0x00000000,  #  6 0x18
+                    0x00000000,  #  7 0x1c
+                    0x00000000,  #  8 0x20
+                    0x00000000,  #  9 0x24
+                    0x8badf00d,  # 10 0x28
+                    0xabadcafe,  # 11 0x2c
+                    0x00000000,  # 12 0x30
+                ]
+            ),
+            "32bit_to_8bit": dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0x00112233),
+                    (0x05, 0x44556677),
+                    (0x01, 0x8899aabb),
+                    (0x02, 0xccddeeff),
+                ],
+                expected=[
+                    # data, address
+                    0x33,  # 0x00
+                    0x22,  # 0x01
+                    0x11,  # 0x02
+                    0x00,  # 0x03
+                    0xbb,  # 0x04
+                    0xaa,  # 0x05
+                    0x99,  # 0x06
+                    0x88,  # 0x07
+                    0xff,  # 0x08
+                    0xee,  # 0x09
+                    0xdd,  # 0x0a
+                    0xcc,  # 0x0b
+                    0x00,  # 0x0c
+                    0x00,  # 0x0d
+                    0x00,  # 0x0e
+                    0x00,  # 0x0f
+                    0x00,  # 0x10
+                    0x00,  # 0x11
+                    0x00,  # 0x12
+                    0x00,  # 0x13
+                    0x77,  # 0x14
+                    0x66,  # 0x15
+                    0x55,  # 0x16
+                    0x44,  # 0x17
+                    0x00,  # 0x18
+                    0x00,  # 0x19
+                ]
+            ),
+            "8bit_to_32bit": dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0x00),
+                    (0x01, 0x11),
+                    (0x02, 0x22),
+                    (0x03, 0x33),
+                    (0x10, 0x44),
+                    (0x11, 0x55),
+                    (0x12, 0x66),
+                    (0x13, 0x77),
+                    (0x08, 0x88),
+                    (0x09, 0x99),
+                    (0x0a, 0xaa),
+                    (0x0b, 0xbb),
+                    (0x0c, 0xcc),
+                    (0x0d, 0xdd),
+                    (0x0e, 0xee),
+                    (0x0f, 0xff),
+                ],
+                expected=[
+                    # data, address
+                    0x33221100,  # 0x00
+                    0x00000000,  # 0x04
+                    0xbbaa9988,  # 0x08
+                    0xffeeddcc,  # 0x0c
+                    0x77665544,  # 0x10
+                    0x00000000,  # 0x14
+                    0x00000000,  # 0x18
+                    0x00000000,  # 0x1c
+                ]
+            ),
+            "8bit_to_32bit_not_aligned": dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0x00),
+                    (0x05, 0x11),
+                    (0x0a, 0x22),
+                    (0x0f, 0x33),
+                    (0x1d, 0x44),
+                    (0x15, 0x55),
+                    (0x13, 0x66),
+                    (0x18, 0x77),
+                ],
+                expected=[
+                    # data, address
+                    0x00000000,  # 0x00
+                    0x00001100,  # 0x04
+                    0x00220000,  # 0x08
+                    0x33000000,  # 0x0c
+                    0x66000000,  # 0x10
+                    0x00005500,  # 0x14
+                    0x00000077,  # 0x18
+                    0x00440000,  # 0x1c
+                ]
+            ),
+            "32bit_to_256bit":  dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0x00000000),
+                    (0x01, 0x11111111),
+                    (0x02, 0x22222222),
+                    (0x03, 0x33333333),
+                    (0x04, 0x44444444),
+                    (0x05, 0x55555555),
+                    (0x06, 0x66666666),
+                    (0x07, 0x77777777),
+                    (0x10, 0x88888888),
+                    (0x11, 0x99999999),
+                    (0x12, 0xaaaaaaaa),
+                    (0x13, 0xbbbbbbbb),
+                    (0x14, 0xcccccccc),
+                    (0x15, 0xdddddddd),
+                    (0x16, 0xeeeeeeee),
+                    (0x17, 0xffffffff),
+                ],
+                expected=[
+                    # data, address
+                    0x7777777766666666555555554444444433333333222222221111111100000000,  # 0x00
+                    0x0000000000000000000000000000000000000000000000000000000000000000,  # 0x20
+                    0xffffffffeeeeeeeeddddddddccccccccbbbbbbbbaaaaaaaa9999999988888888,  # 0x40
+                    0x0000000000000000000000000000000000000000000000000000000000000000,  # 0x60
+                ]
+            ),
+            "32bit_to_256bit_not_aligned":  dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0x00000000),
+                    (0x01, 0x11111111),
+                    (0x02, 0x22222222),
+                    (0x03, 0x33333333),
+                    (0x04, 0x44444444),
+                    (0x05, 0x55555555),
+                    (0x06, 0x66666666),
+                    (0x07, 0x77777777),
+                    (0x14, 0x88888888),
+                    (0x15, 0x99999999),
+                    (0x16, 0xaaaaaaaa),
+                    (0x17, 0xbbbbbbbb),
+                    (0x18, 0xcccccccc),
+                    (0x19, 0xdddddddd),
+                    (0x1a, 0xeeeeeeee),
+                    (0x1b, 0xffffffff),
+                ],
+                expected=[
+                    # data, address
+                    0x7777777766666666555555554444444433333333222222221111111100000000,  # 0x00
+                    0x0000000000000000000000000000000000000000000000000000000000000000,  # 0x20
+                    0xbbbbbbbbaaaaaaaa999999998888888800000000000000000000000000000000,  # 0x40
+                    0x00000000000000000000000000000000ffffffffeeeeeeeeddddddddcccccccc,  # 0x60
+                ]
+            ),
+            "32bit_not_aligned": dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0xabadcafe),
+                    (0x07, 0xbaadf00d),
+                    (0x02, 0xcafefeed),
+                    (0x01, 0xdeadc0de),
+                ],
+                expected=[
+                    # data, address
+                    0xabadcafe,  # 0x00
+                    0xdeadc0de,  # 0x04
+                    0xcafefeed,  # 0x08
+                    0x00000000,  # 0x0c
+                    0x00000000,  # 0x10
+                    0x00000000,  # 0x14
+                    0x00000000,  # 0x18
+                    0xbaadf00d,  # 0x1c
+                ],
+            ),
+            "32bit_duplicates": dict(
+                pattern=[
+                    # address, data
+                    (0x00, 0xabadcafe),
+                    (0x07, 0xbaadf00d),
+                    (0x00, 0xcafefeed),
+                    (0x07, 0xdeadc0de),
+                ],
+                expected=[
+                    # data, address
+                    0xcafefeed,  # 0x00
+                    0x00000000,  # 0x04
+                    0x00000000,  # 0x08
+                    0x00000000,  # 0x0c
+                    0x00000000,  # 0x10
+                    0x00000000,  # 0x14
+                    0x00000000,  # 0x18
+                    0xdeadc0de,  # 0x1c
+                ],
+            ),
+            "32bit_sequential": dict(
+                pattern=[
+                    # address, data
+                    (0x02, 0xabadcafe),
+                    (0x03, 0xbaadf00d),
+                    (0x04, 0xcafefeed),
+                    (0x05, 0xdeadc0de),
+                ],
+                expected=[
+                    # data, address
+                    0x00000000,  # 0x00
+                    0x00000000,  # 0x04
+                    0xabadcafe,  # 0x08
+                    0xbaadf00d,  # 0x0c
+                    0xcafefeed,  # 0x10
+                    0xdeadc0de,  # 0x14
+                    0x00000000,  # 0x18
+                    0x00000000,  # 0x1c
+                ],
+            ),
+            "32bit_long_sequential": dict(pattern=[], expected=[0] * 64),
+        }
+
+        # 32bit_long_sequential
+        for i in range(32):
+            data["32bit_long_sequential"]["pattern"].append((i, 64 + i))
+            data["32bit_long_sequential"]["expected"][i] = 64 + i
+
+        def half_width(data, from_width):
+            half_mask = 2**(from_width//2) - 1
+            chunks = [(val & half_mask, (val >> from_width//2) & half_mask) for val in data]
+            return list(itertools.chain.from_iterable(chunks))
+
+        # down conversion
+        data["64bit_to_16bit"] = dict(
+            pattern  = data["64bit_to_32bit"]["pattern"].copy(),
+            expected = half_width(data["64bit_to_32bit"]["expected"], from_width=32),
+        )
+        data["64bit_to_8bit"] = dict(
+            pattern  = data["64bit_to_16bit"]["pattern"].copy(),
+            expected = half_width(data["64bit_to_16bit"]["expected"], from_width=16),
+        )
+
+        # up conversion
+        data["8bit_to_16bit"] = dict(
+            pattern  = data["8bit_to_32bit"]["pattern"].copy(),
+            expected = half_width(data["8bit_to_32bit"]["expected"], from_width=32),
+        )
+        data["32bit_to_128bit"] = dict(
+            pattern  = data["32bit_to_256bit"]["pattern"].copy(),
+            expected = half_width(data["32bit_to_256bit"]["expected"], from_width=256),
+        )
+        data["32bit_to_64bit"] = dict(
+            pattern  = data["32bit_to_128bit"]["pattern"].copy(),
+            expected = half_width(data["32bit_to_128bit"]["expected"], from_width=128),
+        )
+
+        return data
diff --git a/test/gen_access_pattern.py b/test/gen_access_pattern.py
new file mode 100755 (executable)
index 0000000..ba52145
--- /dev/null
@@ -0,0 +1,35 @@
+#!/usr/bin/env python3
+
+import random
+import argparse
+
+def main():
+    description = """
+    Generate random access pattern for LiteDRAM Pattern Generator/Checker.
+
+    Each address in range [base, base+length) will be accessed only once, but in random order.
+    This ensures that no data will be overwritten.
+    """
+    parser = argparse.ArgumentParser(description=description)
+    parser.add_argument("base",       help="Base address")
+    parser.add_argument("length",     help="Number of (address, data) pairs")
+    parser.add_argument("data_width", help="Width of data (used to determine max value)")
+    parser.add_argument("--seed",     help="Use given random seed (int)")
+    args = parser.parse_args()
+
+    if args.seed:
+        random.seed(int(args.seed, 0))
+
+    base       = int(args.base, 0)
+    length     = int(args.length, 0)
+    data_width = int(args.data_width, 0)
+
+    address = list(range(length))
+    random.shuffle(address)
+    data = [random.randrange(0, 2**data_width) for _ in range(length)]
+
+    for a, d in zip(address, data):
+        print("0x{:08x}, 0x{:08x}".format(a, d))
+
+if __name__ == "__main__":
+    main()
diff --git a/test/gen_config.py b/test/gen_config.py
new file mode 100755 (executable)
index 0000000..b778c1f
--- /dev/null
@@ -0,0 +1,138 @@
+#!/usr/bin/env python3
+
+import sys
+import json
+import pprint
+import argparse
+import datetime
+import itertools
+
+
+defaults = {
+    "--sdram-module": [
+        "IS42S16160",
+        "IS42S16320",
+        "MT48LC4M16",
+        "MT48LC16M16",
+        "AS4C16M16",
+        "AS4C32M16",
+        "AS4C32M8",
+        "M12L64322A",
+        "M12L16161A",
+        "MT46V32M16",
+        "MT46H32M16",
+        "MT46H32M32",
+        "MT47H128M8",
+        "MT47H32M16",
+        "MT47H64M16",
+        "P3R1GE4JGF",
+        "MT41K64M16",
+        "MT41J128M16",
+        "MT41K128M16",
+        "MT41J256M16",
+        "MT41K256M16",
+        "K4B1G0446F",
+        "K4B2G1646F",
+        "H5TC4G63CFR",
+        "IS43TR16128B",
+        "MT8JTF12864",
+        "MT8KTF51264",
+        #"MT18KSF1G72HZ",
+        #"AS4C256M16D3A",
+        #"MT16KTF1G64HZ",
+        #"EDY4016A",
+        #"MT40A1G8",
+        #"MT40A512M16",
+    ],
+    "--sdram-data-width": [32],
+    "--bist-alternating": [True, False],
+    "--bist-length":      [1, 4096],
+    "--bist-random":      [True, False],
+    "--num-generators":   [1],
+    "--num-checkers":     [1],
+    "--access-pattern":   ["access_pattern.csv"]
+}
+
+
+def convert_string_arg(args, arg, type):
+    map_func = {
+        bool: lambda s: {"false": False, "true": True}[s.lower()],
+        int:  lambda s: int(s, 0),
+    }
+    setattr(args, arg, [map_func[type](val) if not isinstance(val, type) else val for val in getattr(args, arg)])
+
+
+def generate_header(args):
+    header = "Auto-generated on {} by {}".format(
+        datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+        sys.argv[0],
+    )
+    #args_str = pprint.pformat(vars(args), sort_dicts=False) # FIXME: python3.7 specific?
+    args_str = pprint.pformat(vars(args))
+    arg_lines = args_str.split("\n")
+    lines = [60*"=", header, 60*"-", *arg_lines, 60*"="]
+    return "\n".join("# " + line for line in lines)
+
+
+def main():
+    parser = argparse.ArgumentParser(description="Generate configuration for all possible argument combinations.",
+                                     formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+    parser.add_argument("--name-format", default="test_%d", help="Name format for i-th test")
+    for name, default in defaults.items():
+        parser.add_argument(name, nargs="+", default=default, help="%s options" % name)
+    args = parser.parse_args()
+
+    # Make sure not to write those as strings
+    convert_string_arg(args, "sdram_data_width", int)
+    convert_string_arg(args, "bist_alternating", bool)
+    convert_string_arg(args, "bist_length",      int)
+    convert_string_arg(args, "bist_random",      bool)
+    convert_string_arg(args, "num_generators",   int)
+    convert_string_arg(args, "num_checkers",     int)
+
+    common_args            = ("sdram_module", "sdram_data_width", "bist_alternating", "num_generators", "num_checkers")
+    generated_pattern_args = ("bist_length", "bist_random")
+    custom_pattern_args    = ("access_pattern", )
+
+    def generated_pattern_configuration(values):
+        config = dict(zip(common_args + generated_pattern_args, values))
+        # Move access pattern parameters deeper
+        config["access_pattern"] = {
+            "bist_length": config.pop("bist_length"),
+            "bist_random": config.pop("bist_random"),
+        }
+        return config
+
+    def custom_pattern_configuration(values):
+        config = dict(zip(common_args + custom_pattern_args, values))
+        # "rename" --access-pattern to access_pattern.pattern_file due to name difference between
+        # command line args and run_benchmarks.py configuration format
+        config["access_pattern"] = {
+            "pattern_file": config.pop("access_pattern"),
+        }
+        return config
+
+    # Iterator over the product of given command line arguments
+    def args_product(names):
+        return itertools.product(*(getattr(args, name) for name in names))
+
+    generated_pattern_iter = zip(itertools.repeat(generated_pattern_configuration), args_product(common_args + generated_pattern_args))
+    custom_pattern_iter    = zip(itertools.repeat(custom_pattern_configuration), args_product(common_args + custom_pattern_args))
+
+    i = 0
+    configurations = {}
+    for config_generator, values in itertools.chain(generated_pattern_iter, custom_pattern_iter):
+        config = config_generator(values)
+        # Ignore unsupported case: bist_random=True and bist_alternating=False
+        if config["access_pattern"].get("bist_random", False) and not config["bist_alternating"]:
+            continue
+        configurations[args.name_format % i] = config
+        i += 1
+
+    json_str = json.dumps(configurations, indent=4)
+    print(generate_header(args))
+    print(json_str)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/test/reference/ddr3_init.h b/test/reference/ddr3_init.h
new file mode 100644 (file)
index 0000000..e8f2f06
--- /dev/null
@@ -0,0 +1,117 @@
+#ifndef __GENERATED_SDRAM_PHY_H
+#define __GENERATED_SDRAM_PHY_H
+#include <hw/common.h>
+#include <generated/csr.h>
+
+#define DFII_CONTROL_SEL        0x01
+#define DFII_CONTROL_CKE        0x02
+#define DFII_CONTROL_ODT        0x04
+#define DFII_CONTROL_RESET_N    0x08
+
+#define DFII_COMMAND_CS         0x01
+#define DFII_COMMAND_WE         0x02
+#define DFII_COMMAND_CAS        0x04
+#define DFII_COMMAND_RAS        0x08
+#define DFII_COMMAND_WRDATA     0x10
+#define DFII_COMMAND_RDDATA     0x20
+
+#define SDRAM_PHY_K7DDRPHY
+#define SDRAM_PHY_PHASES 4
+#define SDRAM_PHY_WRITE_LEVELING_CAPABLE
+#define SDRAM_PHY_READ_LEVELING_CAPABLE
+#define SDRAM_PHY_MODULES DFII_PIX_DATA_BYTES/2
+#define SDRAM_PHY_DELAYS 32
+#define SDRAM_PHY_BITSLIPS 16
+
+static void cdelay(int i);
+
+__attribute__((unused)) static void command_p0(int cmd)
+{
+    sdram_dfii_pi0_command_write(cmd);
+    sdram_dfii_pi0_command_issue_write(1);
+}
+__attribute__((unused)) static void command_p1(int cmd)
+{
+    sdram_dfii_pi1_command_write(cmd);
+    sdram_dfii_pi1_command_issue_write(1);
+}
+__attribute__((unused)) static void command_p2(int cmd)
+{
+    sdram_dfii_pi2_command_write(cmd);
+    sdram_dfii_pi2_command_issue_write(1);
+}
+__attribute__((unused)) static void command_p3(int cmd)
+{
+    sdram_dfii_pi3_command_write(cmd);
+    sdram_dfii_pi3_command_issue_write(1);
+}
+
+
+#define sdram_dfii_pird_address_write(X) sdram_dfii_pi1_address_write(X)
+#define sdram_dfii_piwr_address_write(X) sdram_dfii_pi1_address_write(X)
+#define sdram_dfii_pird_baddress_write(X) sdram_dfii_pi1_baddress_write(X)
+#define sdram_dfii_piwr_baddress_write(X) sdram_dfii_pi1_baddress_write(X)
+#define command_prd(X) command_p1(X)
+#define command_pwr(X) command_p1(X)
+
+#define DFII_PIX_DATA_SIZE CSR_SDRAM_DFII_PI0_WRDATA_SIZE
+
+const unsigned long sdram_dfii_pix_wrdata_addr[SDRAM_PHY_PHASES] = {
+       CSR_SDRAM_DFII_PI0_WRDATA_ADDR,
+       CSR_SDRAM_DFII_PI1_WRDATA_ADDR,
+       CSR_SDRAM_DFII_PI2_WRDATA_ADDR,
+       CSR_SDRAM_DFII_PI3_WRDATA_ADDR
+};
+
+const unsigned long sdram_dfii_pix_rddata_addr[SDRAM_PHY_PHASES] = {
+       CSR_SDRAM_DFII_PI0_RDDATA_ADDR,
+       CSR_SDRAM_DFII_PI1_RDDATA_ADDR,
+       CSR_SDRAM_DFII_PI2_RDDATA_ADDR,
+       CSR_SDRAM_DFII_PI3_RDDATA_ADDR
+};
+
+#define DDRX_MR1 6
+
+static void init_sequence(void)
+{
+       /* Release reset */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(0);
+       sdram_dfii_control_write(DFII_CONTROL_ODT|DFII_CONTROL_RESET_N);
+       cdelay(50000);
+
+       /* Bring CKE high */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(0);
+       sdram_dfii_control_write(DFII_CONTROL_CKE|DFII_CONTROL_ODT|DFII_CONTROL_RESET_N);
+       cdelay(10000);
+
+       /* Load Mode Register 2, CWL=6 */
+       sdram_dfii_pi0_address_write(0x208);
+       sdram_dfii_pi0_baddress_write(2);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Load Mode Register 3 */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(3);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Load Mode Register 1 */
+       sdram_dfii_pi0_address_write(0x6);
+       sdram_dfii_pi0_baddress_write(1);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Load Mode Register 0, CL=7, BL=8 */
+       sdram_dfii_pi0_address_write(0x930);
+       sdram_dfii_pi0_baddress_write(0);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+       cdelay(200);
+
+       /* ZQ Calibration */
+       sdram_dfii_pi0_address_write(0x400);
+       sdram_dfii_pi0_baddress_write(0);
+       command_p0(DFII_COMMAND_WE|DFII_COMMAND_CS);
+       cdelay(200);
+
+}
+#endif
diff --git a/test/reference/ddr3_init.py b/test/reference/ddr3_init.py
new file mode 100644 (file)
index 0000000..a530f98
--- /dev/null
@@ -0,0 +1,23 @@
+dfii_control_sel     = 0x01
+dfii_control_cke     = 0x02
+dfii_control_odt     = 0x04
+dfii_control_reset_n = 0x08
+
+dfii_command_cs     = 0x01
+dfii_command_we     = 0x02
+dfii_command_cas    = 0x04
+dfii_command_ras    = 0x08
+dfii_command_wrdata = 0x10
+dfii_command_rddata = 0x20
+
+ddrx_mr1 = 0x6
+
+init_sequence = [
+    ("Release reset", 0, 0, dfii_control_odt|dfii_control_reset_n, 50000),
+    ("Bring CKE high", 0, 0, dfii_control_cke|dfii_control_odt|dfii_control_reset_n, 10000),
+    ("Load Mode Register 2, CWL=6", 520, 2, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 0),
+    ("Load Mode Register 3", 0, 3, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 0),
+    ("Load Mode Register 1", 6, 1, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 0),
+    ("Load Mode Register 0, CL=7, BL=8", 2352, 0, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 200),
+    ("ZQ Calibration", 1024, 0, dfii_command_we|dfii_command_cs, 200),
+]
diff --git a/test/reference/ddr4_init.h b/test/reference/ddr4_init.h
new file mode 100644 (file)
index 0000000..ba42f19
--- /dev/null
@@ -0,0 +1,133 @@
+#ifndef __GENERATED_SDRAM_PHY_H
+#define __GENERATED_SDRAM_PHY_H
+#include <hw/common.h>
+#include <generated/csr.h>
+
+#define DFII_CONTROL_SEL        0x01
+#define DFII_CONTROL_CKE        0x02
+#define DFII_CONTROL_ODT        0x04
+#define DFII_CONTROL_RESET_N    0x08
+
+#define DFII_COMMAND_CS         0x01
+#define DFII_COMMAND_WE         0x02
+#define DFII_COMMAND_CAS        0x04
+#define DFII_COMMAND_RAS        0x08
+#define DFII_COMMAND_WRDATA     0x10
+#define DFII_COMMAND_RDDATA     0x20
+
+#define SDRAM_PHY_USDDRPHY
+#define SDRAM_PHY_PHASES 4
+#define SDRAM_PHY_WRITE_LEVELING_CAPABLE
+#define SDRAM_PHY_WRITE_LEVELING_REINIT
+#define SDRAM_PHY_READ_LEVELING_CAPABLE
+#define SDRAM_PHY_MODULES DFII_PIX_DATA_BYTES/2
+#define SDRAM_PHY_DELAYS 512
+#define SDRAM_PHY_BITSLIPS 16
+
+static void cdelay(int i);
+
+__attribute__((unused)) static void command_p0(int cmd)
+{
+    sdram_dfii_pi0_command_write(cmd);
+    sdram_dfii_pi0_command_issue_write(1);
+}
+__attribute__((unused)) static void command_p1(int cmd)
+{
+    sdram_dfii_pi1_command_write(cmd);
+    sdram_dfii_pi1_command_issue_write(1);
+}
+__attribute__((unused)) static void command_p2(int cmd)
+{
+    sdram_dfii_pi2_command_write(cmd);
+    sdram_dfii_pi2_command_issue_write(1);
+}
+__attribute__((unused)) static void command_p3(int cmd)
+{
+    sdram_dfii_pi3_command_write(cmd);
+    sdram_dfii_pi3_command_issue_write(1);
+}
+
+
+#define sdram_dfii_pird_address_write(X) sdram_dfii_pi1_address_write(X)
+#define sdram_dfii_piwr_address_write(X) sdram_dfii_pi2_address_write(X)
+#define sdram_dfii_pird_baddress_write(X) sdram_dfii_pi1_baddress_write(X)
+#define sdram_dfii_piwr_baddress_write(X) sdram_dfii_pi2_baddress_write(X)
+#define command_prd(X) command_p1(X)
+#define command_pwr(X) command_p2(X)
+
+#define DFII_PIX_DATA_SIZE CSR_SDRAM_DFII_PI0_WRDATA_SIZE
+
+const unsigned long sdram_dfii_pix_wrdata_addr[SDRAM_PHY_PHASES] = {
+       CSR_SDRAM_DFII_PI0_WRDATA_ADDR,
+       CSR_SDRAM_DFII_PI1_WRDATA_ADDR,
+       CSR_SDRAM_DFII_PI2_WRDATA_ADDR,
+       CSR_SDRAM_DFII_PI3_WRDATA_ADDR
+};
+
+const unsigned long sdram_dfii_pix_rddata_addr[SDRAM_PHY_PHASES] = {
+       CSR_SDRAM_DFII_PI0_RDDATA_ADDR,
+       CSR_SDRAM_DFII_PI1_RDDATA_ADDR,
+       CSR_SDRAM_DFII_PI2_RDDATA_ADDR,
+       CSR_SDRAM_DFII_PI3_RDDATA_ADDR
+};
+
+#define DDRX_MR1 769
+
+static void init_sequence(void)
+{
+       /* Release reset */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(0);
+       sdram_dfii_control_write(DFII_CONTROL_ODT|DFII_CONTROL_RESET_N);
+       cdelay(50000);
+
+       /* Bring CKE high */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(0);
+       sdram_dfii_control_write(DFII_CONTROL_CKE|DFII_CONTROL_ODT|DFII_CONTROL_RESET_N);
+       cdelay(10000);
+
+       /* Load Mode Register 3 */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(3);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Load Mode Register 6 */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(6);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Load Mode Register 5 */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(5);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Load Mode Register 4 */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(4);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Load Mode Register 2, CWL=9 */
+       sdram_dfii_pi0_address_write(0x200);
+       sdram_dfii_pi0_baddress_write(2);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Load Mode Register 1 */
+       sdram_dfii_pi0_address_write(0x301);
+       sdram_dfii_pi0_baddress_write(1);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Load Mode Register 0, CL=11, BL=8 */
+       sdram_dfii_pi0_address_write(0x110);
+       sdram_dfii_pi0_baddress_write(0);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+       cdelay(200);
+
+       /* ZQ Calibration */
+       sdram_dfii_pi0_address_write(0x400);
+       sdram_dfii_pi0_baddress_write(0);
+       command_p0(DFII_COMMAND_WE|DFII_COMMAND_CS);
+       cdelay(200);
+
+}
+#endif
diff --git a/test/reference/ddr4_init.py b/test/reference/ddr4_init.py
new file mode 100644 (file)
index 0000000..49051ac
--- /dev/null
@@ -0,0 +1,26 @@
+dfii_control_sel     = 0x01
+dfii_control_cke     = 0x02
+dfii_control_odt     = 0x04
+dfii_control_reset_n = 0x08
+
+dfii_command_cs     = 0x01
+dfii_command_we     = 0x02
+dfii_command_cas    = 0x04
+dfii_command_ras    = 0x08
+dfii_command_wrdata = 0x10
+dfii_command_rddata = 0x20
+
+ddrx_mr1 = 0x301
+
+init_sequence = [
+    ("Release reset", 0, 0, dfii_control_odt|dfii_control_reset_n, 50000),
+    ("Bring CKE high", 0, 0, dfii_control_cke|dfii_control_odt|dfii_control_reset_n, 10000),
+    ("Load Mode Register 3", 0, 3, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 0),
+    ("Load Mode Register 6", 0, 6, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 0),
+    ("Load Mode Register 5", 0, 5, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 0),
+    ("Load Mode Register 4", 0, 4, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 0),
+    ("Load Mode Register 2, CWL=9", 512, 2, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 0),
+    ("Load Mode Register 1", 769, 1, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 0),
+    ("Load Mode Register 0, CL=11, BL=8", 272, 0, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 200),
+    ("ZQ Calibration", 1024, 0, dfii_command_we|dfii_command_cs, 200),
+]
diff --git a/test/reference/sdr_init.h b/test/reference/sdr_init.h
new file mode 100644 (file)
index 0000000..81f975a
--- /dev/null
@@ -0,0 +1,90 @@
+#ifndef __GENERATED_SDRAM_PHY_H
+#define __GENERATED_SDRAM_PHY_H
+#include <hw/common.h>
+#include <generated/csr.h>
+
+#define DFII_CONTROL_SEL        0x01
+#define DFII_CONTROL_CKE        0x02
+#define DFII_CONTROL_ODT        0x04
+#define DFII_CONTROL_RESET_N    0x08
+
+#define DFII_COMMAND_CS         0x01
+#define DFII_COMMAND_WE         0x02
+#define DFII_COMMAND_CAS        0x04
+#define DFII_COMMAND_RAS        0x08
+#define DFII_COMMAND_WRDATA     0x10
+#define DFII_COMMAND_RDDATA     0x20
+
+#define SDRAM_PHY_GENSDRPHY
+#define SDRAM_PHY_PHASES 1
+
+static void cdelay(int i);
+
+__attribute__((unused)) static void command_p0(int cmd)
+{
+    sdram_dfii_pi0_command_write(cmd);
+    sdram_dfii_pi0_command_issue_write(1);
+}
+
+
+#define sdram_dfii_pird_address_write(X) sdram_dfii_pi0_address_write(X)
+#define sdram_dfii_piwr_address_write(X) sdram_dfii_pi0_address_write(X)
+#define sdram_dfii_pird_baddress_write(X) sdram_dfii_pi0_baddress_write(X)
+#define sdram_dfii_piwr_baddress_write(X) sdram_dfii_pi0_baddress_write(X)
+#define command_prd(X) command_p0(X)
+#define command_pwr(X) command_p0(X)
+
+#define DFII_PIX_DATA_SIZE CSR_SDRAM_DFII_PI0_WRDATA_SIZE
+
+const unsigned long sdram_dfii_pix_wrdata_addr[SDRAM_PHY_PHASES] = {
+       CSR_SDRAM_DFII_PI0_WRDATA_ADDR
+};
+
+const unsigned long sdram_dfii_pix_rddata_addr[SDRAM_PHY_PHASES] = {
+       CSR_SDRAM_DFII_PI0_RDDATA_ADDR
+};
+
+static void init_sequence(void)
+{
+       /* Bring CKE high */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(0);
+       sdram_dfii_control_write(DFII_CONTROL_CKE|DFII_CONTROL_ODT|DFII_CONTROL_RESET_N);
+       cdelay(20000);
+
+       /* Precharge All */
+       sdram_dfii_pi0_address_write(0x400);
+       sdram_dfii_pi0_baddress_write(0);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Load Mode Register / Reset DLL, CL=2, BL=1 */
+       sdram_dfii_pi0_address_write(0x120);
+       sdram_dfii_pi0_baddress_write(0);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+       cdelay(200);
+
+       /* Precharge All */
+       sdram_dfii_pi0_address_write(0x400);
+       sdram_dfii_pi0_baddress_write(0);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+
+       /* Auto Refresh */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(0);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_CS);
+       cdelay(4);
+
+       /* Auto Refresh */
+       sdram_dfii_pi0_address_write(0x0);
+       sdram_dfii_pi0_baddress_write(0);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_CS);
+       cdelay(4);
+
+       /* Load Mode Register / CL=2, BL=1 */
+       sdram_dfii_pi0_address_write(0x20);
+       sdram_dfii_pi0_baddress_write(0);
+       command_p0(DFII_COMMAND_RAS|DFII_COMMAND_CAS|DFII_COMMAND_WE|DFII_COMMAND_CS);
+       cdelay(200);
+
+}
+#endif
diff --git a/test/reference/sdr_init.py b/test/reference/sdr_init.py
new file mode 100644 (file)
index 0000000..30e464f
--- /dev/null
@@ -0,0 +1,21 @@
+dfii_control_sel     = 0x01
+dfii_control_cke     = 0x02
+dfii_control_odt     = 0x04
+dfii_control_reset_n = 0x08
+
+dfii_command_cs     = 0x01
+dfii_command_we     = 0x02
+dfii_command_cas    = 0x04
+dfii_command_ras    = 0x08
+dfii_command_wrdata = 0x10
+dfii_command_rddata = 0x20
+
+init_sequence = [
+    ("Bring CKE high", 0, 0, dfii_control_cke|dfii_control_odt|dfii_control_reset_n, 20000),
+    ("Precharge All", 1024, 0, dfii_command_ras|dfii_command_we|dfii_command_cs, 0),
+    ("Load Mode Register / Reset DLL, CL=2, BL=1", 288, 0, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 200),
+    ("Precharge All", 1024, 0, dfii_command_ras|dfii_command_we|dfii_command_cs, 0),
+    ("Auto Refresh", 0, 0, dfii_command_ras|dfii_command_cas|dfii_command_cs, 4),
+    ("Auto Refresh", 0, 0, dfii_command_ras|dfii_command_cas|dfii_command_cs, 4),
+    ("Load Mode Register / CL=2, BL=1", 32, 0, dfii_command_ras|dfii_command_cas|dfii_command_we|dfii_command_cs, 200),
+]
diff --git a/test/run_benchmarks.py b/test/run_benchmarks.py
new file mode 100755 (executable)
index 0000000..8ae80bf
--- /dev/null
@@ -0,0 +1,670 @@
+#!/usr/bin/env python3
+
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+# Limitations/TODO
+# - add configurable sdram_clk_freq - using hardcoded value now
+# - sdram_controller_data_width - try to expose the value from litex_sim to avoid duplicated code
+
+import os
+import re
+import sys
+import json
+import argparse
+import datetime
+import subprocess
+from collections import defaultdict, namedtuple
+
+import yaml
+try:
+    import numpy as np
+    import pandas as pd
+    import matplotlib
+    from matplotlib.ticker import FuncFormatter, PercentFormatter, ScalarFormatter
+    _summary = True
+except ImportError as e:
+    _summary = False
+    print("[WARNING] Results summary not available:", e, file=sys.stderr)
+
+from litex.tools.litex_sim import get_sdram_phy_settings, sdram_module_nphases
+from litedram import modules as litedram_modules
+from litedram.common import Settings as _Settings
+
+from test import benchmark
+
+# Benchmark configuration --------------------------------------------------------------------------
+
+class Settings(_Settings):
+    def as_dict(self):
+        d = dict()
+        for attr, value in vars(self).items():
+            if attr == "self" or attr.startswith("_"):
+                continue
+            if isinstance(value, Settings):
+                value = value.as_dict()
+            d[attr] = value
+        return d
+
+
+class GeneratedAccess(Settings):
+    def __init__(self, bist_length, bist_random):
+        self.set_attributes(locals())
+
+    @property
+    def length(self):
+        return self.bist_length
+
+    def as_args(self):
+        args = ["--bist-length=%d" % self.bist_length]
+        if self.bist_random:
+            args.append("--bist-random")
+        return args
+
+
+class CustomAccess(Settings):
+    def __init__(self, pattern_file):
+        self.set_attributes(locals())
+
+    @property
+    def pattern(self):
+        # We have to load the file to know pattern length, cache it when requested
+        if not hasattr(self, "_pattern"):
+            path = self.pattern_file
+            if not os.path.isabs(path):
+                benchmark_dir = os.path.dirname(benchmark.__file__)
+                path = os.path.join(benchmark_dir, path)
+            self._pattern = benchmark.load_access_pattern(path)
+        return self._pattern
+
+    @property
+    def length(self):
+        return len(self.pattern)
+
+    def as_args(self):
+        return ["--access-pattern=%s" % self.pattern_file]
+
+
+class BenchmarkConfiguration(Settings):
+    def __init__(self, name, sdram_module, sdram_data_width, bist_alternating,
+                 num_generators, num_checkers, access_pattern):
+        self.set_attributes(locals())
+
+    def as_args(self):
+        args = [
+            "--sdram-module=%s" % self.sdram_module,
+            "--sdram-data-width=%d" % self.sdram_data_width,
+            "--num-generators=%d" % self.num_generators,
+            "--num-checkers=%d" % self.num_checkers,
+        ]
+        if self.bist_alternating:
+            args.append("--bist-alternating")
+        args += self.access_pattern.as_args()
+        return args
+
+    def __eq__(self, other):
+        if not isinstance(other, BenchmarkConfiguration):
+            return NotImplemented
+        return self.as_dict() == other.as_dict()
+
+    @property
+    def length(self):
+        return self.access_pattern.length
+
+    @classmethod
+    def from_dict(cls, d):
+        access_cls = CustomAccess if "pattern_file" in d["access_pattern"] else GeneratedAccess
+        d["access_pattern"] = access_cls(**d["access_pattern"])
+        return cls(**d)
+
+    @classmethod
+    def load_yaml(cls, yaml_file):
+        with open(yaml_file) as f:
+            description = yaml.safe_load(f)
+        configs = []
+        for name, desc in description.items():
+            desc["name"] = name
+            configs.append(cls.from_dict(desc))
+        return configs
+
+    def __repr__(self):
+        return "BenchmarkConfiguration(%s)" % self.as_dict()
+
+    @property
+    def sdram_clk_freq(self):
+        return 100e6  # FIXME: Value of 100MHz is hardcoded in litex_sim
+
+    @property
+    def sdram_memtype(self):
+        # Use values from module class (no need to instantiate it)
+        sdram_module_cls = getattr(litedram_modules, self.sdram_module)
+        return sdram_module_cls.memtype
+
+    @property
+    def sdram_controller_data_width(self):
+        nphases = sdram_module_nphases[self.sdram_memtype]
+        dfi_databits = self.sdram_data_width * (1 if self.sdram_memtype == "SDR" else 2)
+        return dfi_databits * nphases
+
+# Benchmark results --------------------------------------------------------------------------------
+
+# Constructs python regex named group
+def ng(name, regex):
+    return r"(?P<{}>{})".format(name, regex)
+
+
+def _compiled_pattern(stage, var):
+    pattern_fmt = r"{stage}\s+{var}:\s+{value}"
+    pattern = pattern_fmt.format(
+        stage = stage,
+        var   = var,
+        value = ng("value", "[0-9]+"),
+    )
+    return re.compile(pattern)
+    result = re.search(pattern, benchmark_output)
+
+
+class BenchmarkResult:
+    # Pre-compiled patterns for all benchmarks
+    patterns = {
+        "generator_ticks": _compiled_pattern("BIST-GENERATOR", "ticks"),
+        "checker_errors":  _compiled_pattern("BIST-CHECKER", "errors"),
+        "checker_ticks":   _compiled_pattern("BIST-CHECKER", "ticks"),
+    }
+
+    @staticmethod
+    def find(pattern, output):
+        result = pattern.search(output)
+        assert result is not None, \
+            "Could not find pattern {} in output".format(pattern)
+        return int(result.group("value"))
+
+    def __init__(self, output):
+        self._output = output
+        for attr, pattern in self.patterns.items():
+            setattr(self, attr, self.find(pattern, output))
+
+    def __repr__(self):
+        d = {attr: getattr(self, attr) for attr in self.patterns.keys()}
+        return "BenchmarkResult(%s)" % d
+
+# Results summary ----------------------------------------------------------------------------------
+
+def human_readable(value):
+    binary_prefixes = ["", "k", "M", "G", "T"]
+    mult = 1.0
+    for prefix in binary_prefixes:
+        if value * mult < 1024:
+            break
+        mult /= 1024
+    return mult, prefix
+
+
+def clocks_fmt(clocks):
+    return "{:d} clk".format(int(clocks))
+
+
+def bandwidth_fmt(bw):
+    mult, prefix = human_readable(bw)
+    return "{:.1f} {}bps".format(bw * mult, prefix)
+
+
+def efficiency_fmt(eff):
+    return "{:.1f} %".format(eff * 100)
+
+
+def get_git_file_path(filename):
+    cmd  = ["git", "ls-files", "--full-name", filename]
+    proc = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=os.path.dirname(__file__))
+    return proc.stdout.decode().strip() if proc.returncode == 0 else ""
+
+
+def get_git_revision_hash(short=False):
+    short = ["--short"] if short else []
+    cmd   = ["git", "rev-parse", *short, "HEAD"]
+    proc  = subprocess.run(cmd, stdout=subprocess.PIPE, cwd=os.path.dirname(__file__))
+    return proc.stdout.decode().strip() if proc.returncode == 0 else ""
+
+
+class ResultsSummary:
+    def __init__(self, run_data, plots_dir="plots"):
+        self.plots_dir = plots_dir
+
+        # Because .sdram_controller_data_width may fail for unimplemented modules
+        def except_none(func):
+            try:
+                return func()
+            except:
+                return None
+
+        # Gather results into tabular data
+        column_mappings = {
+            "name":             lambda d: d.config.name,
+            "sdram_module":     lambda d: d.config.sdram_module,
+            "sdram_data_width": lambda d: d.config.sdram_data_width,
+            "bist_alternating": lambda d: d.config.bist_alternating,
+            "num_generators":   lambda d: d.config.num_generators,
+            "num_checkers":     lambda d: d.config.num_checkers,
+            "bist_length":      lambda d: getattr(d.config.access_pattern, "bist_length", None),
+            "bist_random":      lambda d: getattr(d.config.access_pattern, "bist_random", None),
+            "pattern_file":     lambda d: getattr(d.config.access_pattern, "pattern_file", None),
+            "length":           lambda d: d.config.length,
+            "generator_ticks":  lambda d: getattr(d.result, "generator_ticks", None),  # None means benchmark failure
+            "checker_errors":   lambda d: getattr(d.result, "checker_errors", None),
+            "checker_ticks":    lambda d: getattr(d.result, "checker_ticks", None),
+            "ctrl_data_width":  lambda d: except_none(lambda: d.config.sdram_controller_data_width),
+            "sdram_memtype":    lambda d: except_none(lambda: d.config.sdram_memtype),
+            "clk_freq":         lambda d: d.config.sdram_clk_freq,
+        }
+        columns = {name: [mapping(data) for data in run_data] for name, mapping, in column_mappings.items()}
+        self._df = df = pd.DataFrame(columns)
+
+        # Replace None with NaN
+        df.fillna(value=np.nan, inplace=True)
+
+        # Compute other metrics based on ticks and configuration parameters
+        df["clk_period"] = 1 / df["clk_freq"]
+        # Bandwidth is the number of bits per time
+        # in case with N generators/checkers we actually process N times more data
+        df["write_bandwidth"] = (8 * df["length"] * df["num_generators"]) / (df["generator_ticks"] * df["clk_period"])
+        df["read_bandwidth"]  = (8 * df["length"] * df["num_checkers"]) / (df["checker_ticks"] * df["clk_period"])
+
+        # Efficiency calculated as number of write/read commands to number of cycles spent on writing/reading (ticks)
+        # for multiple generators/checkers multiply by their number
+        df["cmd_count"]        = df["length"] / (df["ctrl_data_width"] / 8)
+        df["write_efficiency"] = df["cmd_count"] * df["num_generators"] / df["generator_ticks"]
+        df["read_efficiency"]  = df["cmd_count"] * df["num_checkers"] / df["checker_ticks"]
+
+        df["write_latency"] = df[df["bist_length"] == 1]["generator_ticks"]
+        df["read_latency"]  = df[df["bist_length"] == 1]["checker_ticks"]
+
+        # Boolean distinction between latency benchmarks and sequence benchmarks,
+        # as thier results differ significanly
+        df["is_latency"] = ~pd.isna(df["write_latency"])
+        assert (df["is_latency"] == ~pd.isna(df["read_latency"])).all(), \
+            "write_latency and read_latency should both have a value or both be NaN"
+
+        # Data formatting for text summary
+        self.text_formatters = {
+            "write_bandwidth":  bandwidth_fmt,
+            "read_bandwidth":   bandwidth_fmt,
+            "write_efficiency": efficiency_fmt,
+            "read_efficiency":  efficiency_fmt,
+            "write_latency":    clocks_fmt,
+            "read_latency":     clocks_fmt,
+        }
+
+        # Data formatting for plot summary
+        self.plot_xticks_formatters = {
+            "write_bandwidth":  FuncFormatter(lambda value, pos: bandwidth_fmt(value)),
+            "read_bandwidth":   FuncFormatter(lambda value, pos: bandwidth_fmt(value)),
+            "write_efficiency": PercentFormatter(1.0),
+            "read_efficiency":  PercentFormatter(1.0),
+            "write_latency":    ScalarFormatter(),
+            "read_latency":     ScalarFormatter(),
+        }
+
+    def df(self, ok=True, failures=False):
+        is_failure = lambda df: pd.isna(df["generator_ticks"]) | pd.isna(df["checker_ticks"]) | pd.isna(df["checker_errors"])
+        df = self._df
+        if not ok:  # remove ok
+            is_ok = ~is_failure(df)
+            df = df[~is_ok]
+        if not failures:  # remove failures
+            df = df[~is_failure(df)]
+        return df
+
+    def header(self, text):
+        return "===> {}".format(text)
+
+    def print_df(self, title, df):
+        # Make sure all data will be shown
+        with pd.option_context("display.max_rows", None, "display.max_columns", None, "display.width", None):
+            print(self.header(title + ":"))
+            print(df)
+
+    def get_summary(self, df, mask=None, columns=None, column_formatting=None, sort_kwargs=None):
+        # Work on a copy
+        df = df.copy()
+
+        if sort_kwargs is not None:
+            df = df.sort_values(**sort_kwargs)
+
+        if column_formatting is not None:
+            for column, mapping in column_formatting.items():
+                old        = "_{}".format(column)
+                df[old]    = df[column].copy()
+                df[column] = df[column].map(lambda value: mapping(value) if not pd.isna(value) else value)
+
+        df = df[mask] if mask is not None else df
+        df = df[columns] if columns is not None else df
+
+        return df
+
+    def text_summary(self):
+        for title, df in self.groupped_results():
+            self.print_df(title, df)
+            print()
+
+    def html_summary(self, output_dir):
+        import jinja2
+
+        tables = {}
+        names  = {}
+        for title, df in self.groupped_results():
+            table_id = title.lower().replace(" ", "_")
+
+            tables[table_id] = df.to_html(table_id=table_id, border=0)
+            names[table_id]  = title
+
+        template_dir = os.path.join(os.path.dirname(__file__), "summary")
+        env          = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
+        template     = env.get_template("summary.html.jinja2")
+
+        os.makedirs(output_dir, exist_ok=True)
+        with open(os.path.join(output_dir, "summary.html"), "w") as f:
+            f.write(template.render(
+                title           = "LiteDRAM benchmarks summary",
+                tables          = tables,
+                names           = names,
+                script_path     = get_git_file_path(__file__),
+                revision        = get_git_revision_hash(),
+                revision_short  = get_git_revision_hash(short=True),
+                generation_date = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+            ))
+
+    def groupped_results(self, formatters=None):
+        df = self.df()
+
+        if formatters is None:
+            formatters = self.text_formatters
+
+        common_columns = [
+            "name", "sdram_module", "sdram_memtype", "sdram_data_width",
+            "bist_alternating", "num_generators", "num_checkers"
+        ]
+        latency_columns = ["write_latency", "read_latency"]
+        performance_columns = [
+            "write_bandwidth", "read_bandwidth", "write_efficiency", "read_efficiency"
+        ]
+        failure_columns = [
+            "bist_length", "bist_random", "pattern_file", "length",
+            "generator_ticks", "checker_errors", "checker_ticks"
+        ]
+
+        yield "Latency", self.get_summary(df,
+            mask              = df["is_latency"] == True,
+            columns           = common_columns + latency_columns,
+            column_formatting = formatters,
+        )
+        yield "Custom access pattern", self.get_summary(df,
+            mask              = (df["is_latency"] == False) & (~pd.isna(df["pattern_file"])),
+            columns           = common_columns + ["length", "pattern_file"] + performance_columns,
+            column_formatting = formatters,
+        ),
+        yield "Sequential access pattern", self.get_summary(df,
+            mask              = (df["is_latency"] == False) & (pd.isna(df["pattern_file"])) & (df["bist_random"] == False),
+            columns           = common_columns + ["bist_length"] + performance_columns, # could be length
+            column_formatting = formatters,
+        ),
+        yield "Random access pattern", self.get_summary(df,
+            mask              = (df["is_latency"] == False) & (pd.isna(df["pattern_file"])) & (df["bist_random"] == True),
+            columns           = common_columns + ["bist_length"] + performance_columns,
+            column_formatting = formatters,
+        ),
+        yield "Failures", self.get_summary(self.df(ok=False, failures=True),
+            columns           = common_columns + failure_columns,
+            column_formatting = None,
+        ),
+
+    def plot_summary(self, plots_dir="plots", backend="Agg", theme="default", save_format="png", **savefig_kw):
+        matplotlib.use(backend)
+        import matplotlib.pyplot as plt
+        plt.style.use(theme)
+
+        for title, df in self.groupped_results(formatters={}):
+            for column in self.plot_xticks_formatters.keys():
+                if column not in df.columns or df[column].empty:
+                    continue
+                axis = self.plot_df(title, df, column)
+
+                # construct path
+                def path_name(name):
+                    return name.lower().replace(" ", "_")
+
+                filename = "{}.{}".format(path_name(column), save_format)
+                path     = os.path.join(plots_dir, path_name(title), filename)
+                os.makedirs(os.path.dirname(path), exist_ok=True)
+
+                # save figure
+                axis.get_figure().savefig(path, **savefig_kw)
+
+        if backend != "Agg":
+            plt.show()
+
+    def plot_df(self, title, df, column, fig_width=6.4, fig_min_height=2.2, save_format="png", save_filename=None):
+        if save_filename is None:
+            save_filename = os.path.join(self.plots_dir, title.lower().replace(" ", "_"))
+
+        axis = df.plot(kind="barh", x="name", y=column, title=title, grid=True, legend=False)
+        fig = axis.get_figure()
+
+        if column in self.plot_xticks_formatters:
+            axis.xaxis.set_major_formatter(self.plot_xticks_formatters[column])
+            axis.xaxis.set_tick_params(rotation=15)
+        axis.spines["top"].set_visible(False)
+        axis.spines["right"].set_visible(False)
+        axis.set_axisbelow(True)
+        axis.set_ylabel("")  # No need for label as we have only one series
+
+        # For large number of rows, the bar labels start overlapping
+        # use fixed ratio between number of rows and height of figure
+        n_ok = 16
+        new_height = (fig_width / n_ok) * len(df)
+        fig.set_size_inches(fig_width, max(fig_min_height, new_height))
+
+        # Remove empty spaces
+        fig.tight_layout()
+
+        return axis
+
+# Run ----------------------------------------------------------------------------------------------
+
+class RunCache(list):
+    RunData = namedtuple("RunData", ["config", "result"])
+
+    def dump_json(self, filename):
+        json_data = [{"config": data.config.as_dict(), "output": getattr(data.result, "_output", None) } for data in self]
+        with open(filename, "w") as f:
+            json.dump(json_data, f)
+
+    @classmethod
+    def load_json(cls, filename):
+        with open(filename, "r") as f:
+            json_data = json.load(f)
+        loaded = []
+        for data in json_data:
+            config = BenchmarkConfiguration.from_dict(data["config"])
+            result = BenchmarkResult(data["output"]) if data["output"] is not None else None
+            loaded.append(cls.RunData(config=config, result=result))
+        return loaded
+
+
+def run_python(script, args, **kwargs):
+    command = ["python3", script, *args]
+    proc = subprocess.run(command, stdout=subprocess.PIPE, cwd=os.path.dirname(script), **kwargs)
+    return str(proc.stdout)
+
+
+BenchmarkArgs = namedtuple("BenchmarkArgs", ["config", "output_dir", "ignore_failures", "timeout"])
+
+
+def run_single_benchmark(fargs):
+    # Run as separate process, because else we cannot capture all output from verilator
+    print("  {}: {}".format(fargs.config.name, " ".join(fargs.config.as_args())))
+    try:
+        args   = fargs.config.as_args() + ["--output-dir", fargs.output_dir, "--log-level", "warning"]
+        output = run_python(benchmark.__file__, args, timeout=fargs.timeout)
+        result = BenchmarkResult(output)
+        # Exit if checker had any read error
+        if result.checker_errors != 0:
+            raise RuntimeError("Error during benchmark: checker_errors = {}, args = {}".format(
+                result.checker_errors, fargs.config.as_args()
+            ))
+    except Exception as e:
+        if fargs.ignore_failures:
+            print("  {}: ERROR: {}".format(fargs.config.name, e))
+            return None
+        else:
+            raise
+    print("  {}: ok".format(fargs.config.name))
+    return result
+
+
+InQueueItem = namedtuple("InQueueItem", ["index", "config"])
+OutQueueItem = namedtuple("OutQueueItem", ["index", "result"])
+
+
+def run_parallel(configurations, output_base_dir, njobs, ignore_failures, timeout):
+    from multiprocessing import Process, Queue
+    import queue
+
+    def worker(in_queue, out_queue, out_dir):
+        while True:
+            in_item = in_queue.get()
+            if in_item is None:
+                return
+            fargs  = BenchmarkArgs(in_item.config, out_dir, ignore_failures, timeout)
+            result = run_single_benchmark(fargs)
+            out_queue.put(OutQueueItem(in_item.index, result))
+
+    if njobs == 0:
+        njobs = os.cpu_count()
+    print("Using {:d} parallel jobs".format(njobs))
+
+    # Use one directory per worker, as running each benchmark in separate directory
+    # takes too much disk space (~2GB per 100 benchmarks)
+    dir_pool = [os.path.join(output_base_dir, "worker_%02d" % i) for i in range(njobs)]
+
+    in_queue, out_queue = Queue(), Queue()
+    workers = [Process(target=worker, args=(in_queue, out_queue, dir)) for dir in dir_pool]
+    for w in workers:
+        w.start()
+
+    # Put all benchmark configurations with index to retrieve them in order
+    for i, config in enumerate(configurations):
+        in_queue.put(InQueueItem(i, config))
+
+    # Send "finish signal" for each worker
+    for _ in workers:
+        in_queue.put(None)
+
+    # Retrieve results in proper order
+    out_items = [out_queue.get() for _ in configurations]
+    results   = [out.result for out in sorted(out_items, key=lambda o: o.index)]
+
+    for p in workers:
+        p.join()
+
+    return results
+
+
+def run_benchmarks(configurations, output_base_dir, njobs, ignore_failures, timeout):
+    print("Running {:d} benchmarks ...".format(len(configurations)))
+    if njobs == 1:
+        results = [run_single_benchmark(BenchmarkArgs(config, output_base_dir, ignore_failures, timeout))
+                   for config in configurations]
+    else:
+        results = run_parallel(configurations, output_base_dir, njobs, ignore_failures, timeout)
+    run_data = [RunCache.RunData(config, result) for config, result in zip(configurations, results)]
+    return run_data
+
+
+def main(argv=None):
+    parser = argparse.ArgumentParser(description="Run LiteDRAM benchmarks and collect the results.")
+    parser.add_argument("config",                                  help="YAML config file")
+    parser.add_argument("--names",            nargs="*",           help="Limit benchmarks to given names")
+    parser.add_argument("--regex",                                 help="Limit benchmarks to names matching the regex")
+    parser.add_argument("--not-regex",                             help="Limit benchmarks to names not matching the regex")
+    parser.add_argument("--html",             action="store_true", help="Generate HTML summary")
+    parser.add_argument("--html-output-dir",  default="html",      help="Output directory for generated HTML")
+    parser.add_argument("--plot",             action="store_true", help="Generate plots with results summary")
+    parser.add_argument("--plot-format",      default="png",       help="Specify plots file format (default=png)")
+    parser.add_argument("--plot-backend",     default="Agg",       help="Optionally specify matplotlib GUI backend")
+    parser.add_argument("--plot-transparent", action="store_true", help="Use transparent background when saving plots")
+    parser.add_argument("--plot-output-dir",  default="plots",     help="Specify where to save the plots")
+    parser.add_argument("--plot-theme",       default="default",   help="Use different matplotlib theme")
+    parser.add_argument("--fail-fast",        action="store_true", help="Exit on any benchmark error, do not continue")
+    parser.add_argument("--output-dir",       default="build",     help="Directory to store benchmark build output")
+    parser.add_argument("--njobs",            default=0, type=int, help="Use N parallel jobs to run benchmarks (default=0, which uses CPU count)")
+    parser.add_argument("--heartbeat",        default=0, type=int, help="Print heartbeat message with given interval (default=0 => never)")
+    parser.add_argument("--timeout",          default=None,        help="Set timeout for a single benchmark")
+    parser.add_argument("--results-cache",                         help="""Use given JSON file as results cache. If the file exists,
+                                                                           it will be loaded instead of running actual benchmarks,
+                                                                           else benchmarks will be run normally, and then saved
+                                                                           to the given file. This allows to easily rerun the script
+                                                                           to generate different summary without having to rerun benchmarks.""")
+    args = parser.parse_args(argv)
+
+    if not args.results_cache and not _summary:
+        print("Summary not available and not running with --results-cache - run would not produce any results! Aborting.",
+              file=sys.stderr)
+        sys.exit(1)
+
+    # Load and filter configurations
+    configurations = BenchmarkConfiguration.load_yaml(args.config)
+    filters = {
+        "regex":     lambda config: re.search(args.regex, config.name),
+        "not_regex": lambda config: not re.search(args.not_regex, config.name),
+        "names":     lambda config: config.name in args.names,
+    }
+    for arg, f in filters.items():
+        if getattr(args, arg):
+            configurations = filter(f, configurations)
+    configurations = list(configurations)
+
+    # Load outputs from cache if it exsits
+    cache_exists = args.results_cache and os.path.isfile(args.results_cache)
+    if args.results_cache and cache_exists:
+        cache = RunCache.load_json(args.results_cache)
+
+        # Take only those that match configurations
+        names_to_load = [c.name for c in configurations]
+        run_data = [data for  data in cache if data.config.name in names_to_load]
+    else:  # Run all the benchmarks normally
+        if args.heartbeat:
+            heartbeat_cmd = ["/bin/sh", "-c", "while true; do sleep %d; echo Heartbeat...; done" % args.heartbeat]
+            heartbeat = subprocess.Popen(heartbeat_cmd)
+        if args.timeout is not None:
+            args.timeout = int(args.timeout)
+        run_data = run_benchmarks(configurations, args.output_dir, args.njobs, not args.fail_fast, args.timeout)
+        if args.heartbeat:
+            heartbeat.kill()
+
+    # Store outputs in cache
+    if args.results_cache and not cache_exists:
+        cache = RunCache(run_data)
+        cache.dump_json(args.results_cache)
+
+    # Display summary
+    if _summary:
+        summary = ResultsSummary(run_data)
+        summary.text_summary()
+        if args.html:
+            summary.html_summary(args.html_output_dir)
+        if args.plot:
+            summary.plot_summary(
+                plots_dir=args.plot_output_dir,
+                backend=args.plot_backend,
+                theme=args.plot_theme,
+                save_format=args.plot_format,
+                transparent=args.plot_transparent,
+            )
+
+    # Exit with error when there is no single benchmark that succeeded
+    succeeded = sum(1 if d.result is not None else 0 for d in run_data)
+    if succeeded == 0:
+        sys.exit(1)
+
+if __name__ == "__main__":
+    main()
diff --git a/test/spd_data/MT16KTF1G64HZ-1G6P1.csv b/test/spd_data/MT16KTF1G64HZ-1G6P1.csv
new file mode 100644 (file)
index 0000000..08eb780
--- /dev/null
@@ -0,0 +1,76 @@
+Part Number,Byte Number,Byte Description,Byte Value\r
+MT16KTF1G64HZ-1G6P1,0,DDR3-CRC RANGE; EEPROM BYTES; BYTES USED,92\r
+MT16KTF1G64HZ-1G6P1,1,DDR3-SPD REVISON,13\r
+MT16KTF1G64HZ-1G6P1,2,DDR3-DRAM DEVICE TYPE,0B\r
+MT16KTF1G64HZ-1G6P1,3,DDR3-MODULE TYPE (FORM FACTOR),03\r
+MT16KTF1G64HZ-1G6P1,4,DDR3-SDRAM DEVICE DENSITY  BANKS,04\r
+MT16KTF1G64HZ-1G6P1,5,DDR3-SDRAM DEVICE ROW  COLUMN COUNT,21\r
+MT16KTF1G64HZ-1G6P1,6,DDR3-MODULE NOMINAL VDD,02\r
+MT16KTF1G64HZ-1G6P1,7,DDR3-MODULE RANKS   DEVICE DQ COUNT,09\r
+MT16KTF1G64HZ-1G6P1,8,DDR3-ECC TAG  MODULE MEMORY BUS WIDTH,03\r
+MT16KTF1G64HZ-1G6P1,9,DDR3-FINE TIMEBASE DIVIDEND/DIVISOR,11\r
+MT16KTF1G64HZ-1G6P1,10,DDR3-MEDIUM TIMEBASE DIVIDEND,01\r
+MT16KTF1G64HZ-1G6P1,11,DDR3-MEDIUM TIMEBASE DIVISOR,08\r
+MT16KTF1G64HZ-1G6P1,12,DDR3-MIN SDRAM CYCLE TIME (TCKMIN),0A\r
+MT16KTF1G64HZ-1G6P1,13,DDR3-BYTE 13 RESERVED,00\r
+MT16KTF1G64HZ-1G6P1,14,DDR3-CAS LATENCIES SUPPORTED (CL4 => CL11),FE\r
+MT16KTF1G64HZ-1G6P1,15,DDR3-CAS LATENCIES SUPPORTED (CL12 => CL18),00\r
+MT16KTF1G64HZ-1G6P1,16,DDR3-MIN CAS LATENCY TIME (TAAMIN),69\r
+MT16KTF1G64HZ-1G6P1,17,DDR3-MIN WRITE RECOVERY TIME (TWRMIN),78\r
+MT16KTF1G64HZ-1G6P1,18,DDR3-MIN RAS# TO CAS# DELAY (TRCDMIN),69\r
+MT16KTF1G64HZ-1G6P1,19,DDR3-MIN ROW ACTIVE TO ROW ACTIVE DELAY (TRRDMIN),30\r
+MT16KTF1G64HZ-1G6P1,20,DDR3-MIN ROW PRECHARGE DELAY (TRPMIN),69\r
+MT16KTF1G64HZ-1G6P1,21,DDR3-UPPER NIBBLE FOR TRAS  TRC,11\r
+MT16KTF1G64HZ-1G6P1,22,DDR3-MIN ACTIVE TO PRECHARGE DELAY (TRASMIN),18\r
+MT16KTF1G64HZ-1G6P1,23,DDR3-MIN ACTIVE TO ACTIVE/REFRESH DELAY (TRCMIN),81\r
+MT16KTF1G64HZ-1G6P1,24,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) LSB,20\r
+MT16KTF1G64HZ-1G6P1,25,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) MSB,08\r
+MT16KTF1G64HZ-1G6P1,26,DDR3-MIN INTERNAL WRITE TO READ CMD DELAY (TWTRMIN),3C\r
+MT16KTF1G64HZ-1G6P1,27,DDR3-MIN INTERNAL READ TO PRECHARGE CMD DELAY (TRTPMIN),3C\r
+MT16KTF1G64HZ-1G6P1,28,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) MSB,00\r
+MT16KTF1G64HZ-1G6P1,29,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) LSB,F0\r
+MT16KTF1G64HZ-1G6P1,30,DDR3-SDRAM DEVICE OUTPUT DRIVERS SUPPORTED,83\r
+MT16KTF1G64HZ-1G6P1,31,DDR3-SDRAM DEVICE THERMAL  REFRESH OPTIONS,05\r
+MT16KTF1G64HZ-1G6P1,32,DDR3-MODULE THERMAL SENSOR,00\r
+MT16KTF1G64HZ-1G6P1,33,DDR3-SDRAM DEVICE TYPE,00\r
+MT16KTF1G64HZ-1G6P1,34,DDR3-FINE OFFSET FOR TCKMIN,00\r
+MT16KTF1G64HZ-1G6P1,35,DDR3-FINE OFFSET FOR TAAMIN,00\r
+MT16KTF1G64HZ-1G6P1,36,DDR3-FINE OFFSET FOR TRCDMIN,00\r
+MT16KTF1G64HZ-1G6P1,37,DDR3-FINE OFFSET FOR TRPMIN,00\r
+MT16KTF1G64HZ-1G6P1,38,DDR3-FINE OFFSET FOR TRCMIN,00\r
+MT16KTF1G64HZ-1G6P1,39,DDR3-BYTE 39 RESERVED,00\r
+MT16KTF1G64HZ-1G6P1,40,DDR3-BYTE 40 RESERVED,00\r
+MT16KTF1G64HZ-1G6P1,41,DDR3-PTRR TMAW  MAC,88\r
+MT16KTF1G64HZ-1G6P1,42-59,DDR3-RESERVED BYTES 42-59,000000000000000000000000000000000000\r
+MT16KTF1G64HZ-1G6P1,60,DDR3-RC REV  NOM MODULE HEIGHT,0F\r
+MT16KTF1G64HZ-1G6P1,61,DDR3-MODULE THICKNESS (MAX),11\r
+MT16KTF1G64HZ-1G6P1,62,DDR3-REFERENCE RAW CARD ID,65\r
+MT16KTF1G64HZ-1G6P1,63,DDR3 - ADDRESS MAPPING/MODULE ATTRIBUTES,00\r
+MT16KTF1G64HZ-1G6P1,64,DDR3-HEATSPREADER SOLUTION,00\r
+MT16KTF1G64HZ-1G6P1,65,DDR3-REGISTER VENDOR ID (LSB),00\r
+MT16KTF1G64HZ-1G6P1,66,DDR3-REGISTER VENDOR ID (MSB),00\r
+MT16KTF1G64HZ-1G6P1,67,DDR3-REGISTER REVISON NUMBER,00\r
+MT16KTF1G64HZ-1G6P1,68,DDR3-REGISTER TYPE,00\r
+MT16KTF1G64HZ-1G6P1,69,DDR3-REG CTRL WORDS 1 AND ZERO,00\r
+MT16KTF1G64HZ-1G6P1,70,DDR3-REG CTRL WORDS 3 AND 2,00\r
+MT16KTF1G64HZ-1G6P1,71,DDR3-REG CTRL WORDS 5 AND 4,00\r
+MT16KTF1G64HZ-1G6P1,72,DDR3-REG CTRL WORDS 7 AND 6,00\r
+MT16KTF1G64HZ-1G6P1,73,DDR3-REG CTRL WORDS 9 AND 8,00\r
+MT16KTF1G64HZ-1G6P1,74,DDR3-REG CTRL WORDS 11 AND 10,00\r
+MT16KTF1G64HZ-1G6P1,75,DDR3-REG CTRL WORDS 13 AND 12,00\r
+MT16KTF1G64HZ-1G6P1,76,DDR3-REG CTRL WORDS 15 AND 14,00\r
+MT16KTF1G64HZ-1G6P1,77-116,DDR3-RESERVED BYTES 77-116,00000000000000000000000000000000000000000000000000000000000000000000000000000000\r
+MT16KTF1G64HZ-1G6P1,117,DDR3-MODULE MFR ID (LSB),80\r
+MT16KTF1G64HZ-1G6P1,118,DDR3-MODULE MFR ID (MSB),2C\r
+MT16KTF1G64HZ-1G6P1,119,DDR3-MODULE MFR LOCATION ID,00\r
+MT16KTF1G64HZ-1G6P1,120,DDR3-MODULE MFR YEAR,00\r
+MT16KTF1G64HZ-1G6P1,121,DDR3-MODULE MFR WEEK,00\r
+MT16KTF1G64HZ-1G6P1,122-125,DDR3-MODULE SERIAL NUMBER,00000000\r
+MT16KTF1G64HZ-1G6P1,126-127,DDR3-CRC,5759\r
+MT16KTF1G64HZ-1G6P1,128-145,DDR3-MODULE PART NUMBER,16KTF1G64HZ-1G6P1\r
+MT16KTF1G64HZ-1G6P1,146,DDR3-MODULE DIE REV,50\r
+MT16KTF1G64HZ-1G6P1,147,DDR3-MODULE PCB REV,31\r
+MT16KTF1G64HZ-1G6P1,148,DDR3-DRAM DEVICE MFR ID (LSB),80\r
+MT16KTF1G64HZ-1G6P1,149,DDR3-DRAM DEVICE MFR (MSB),2C\r
+MT16KTF1G64HZ-1G6P1,150-175,DDR3-MFR RESERVED BYTES 150-175,0000000000000000000000000000000000000000000000000000\r
+MT16KTF1G64HZ-1G6P1,176-255,DDR3-CUSTOMER RESERVED BYTES 176-255,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
\ No newline at end of file
diff --git a/test/spd_data/MT16KTF1G64HZ-1G9E1.csv b/test/spd_data/MT16KTF1G64HZ-1G9E1.csv
new file mode 100644 (file)
index 0000000..ac33154
--- /dev/null
@@ -0,0 +1,76 @@
+Part Number,Byte Number,Byte Description,Byte Value\r
+MT16KTF1G64HZ-1G9E1,0,DDR3-CRC RANGE; EEPROM BYTES; BYTES USED,92\r
+MT16KTF1G64HZ-1G9E1,1,DDR3-SPD REVISON,13\r
+MT16KTF1G64HZ-1G9E1,2,DDR3-DRAM DEVICE TYPE,0B\r
+MT16KTF1G64HZ-1G9E1,3,DDR3-MODULE TYPE (FORM FACTOR),03\r
+MT16KTF1G64HZ-1G9E1,4,DDR3-SDRAM DEVICE DENSITY  BANKS,04\r
+MT16KTF1G64HZ-1G9E1,5,DDR3-SDRAM DEVICE ROW  COLUMN COUNT,21\r
+MT16KTF1G64HZ-1G9E1,6,DDR3-MODULE NOMINAL VDD,02\r
+MT16KTF1G64HZ-1G9E1,7,DDR3-MODULE RANKS   DEVICE DQ COUNT,09\r
+MT16KTF1G64HZ-1G9E1,8,DDR3-ECC TAG  MODULE MEMORY BUS WIDTH,03\r
+MT16KTF1G64HZ-1G9E1,9,DDR3-FINE TIMEBASE DIVIDEND/DIVISOR,11\r
+MT16KTF1G64HZ-1G9E1,10,DDR3-MEDIUM TIMEBASE DIVIDEND,01\r
+MT16KTF1G64HZ-1G9E1,11,DDR3-MEDIUM TIMEBASE DIVISOR,08\r
+MT16KTF1G64HZ-1G9E1,12,DDR3-MIN SDRAM CYCLE TIME (TCKMIN),09\r
+MT16KTF1G64HZ-1G9E1,13,DDR3-BYTE 13 RESERVED,00\r
+MT16KTF1G64HZ-1G9E1,14,DDR3-CAS LATENCIES SUPPORTED (CL4 => CL11),FE\r
+MT16KTF1G64HZ-1G9E1,15,DDR3-CAS LATENCIES SUPPORTED (CL12 => CL18),02\r
+MT16KTF1G64HZ-1G9E1,16,DDR3-MIN CAS LATENCY TIME (TAAMIN),69\r
+MT16KTF1G64HZ-1G9E1,17,DDR3-MIN WRITE RECOVERY TIME (TWRMIN),78\r
+MT16KTF1G64HZ-1G9E1,18,DDR3-MIN RAS# TO CAS# DELAY (TRCDMIN),69\r
+MT16KTF1G64HZ-1G9E1,19,DDR3-MIN ROW ACTIVE TO ROW ACTIVE DELAY (TRRDMIN),28\r
+MT16KTF1G64HZ-1G9E1,20,DDR3-MIN ROW PRECHARGE DELAY (TRPMIN),69\r
+MT16KTF1G64HZ-1G9E1,21,DDR3-UPPER NIBBLE FOR TRAS  TRC,11\r
+MT16KTF1G64HZ-1G9E1,22,DDR3-MIN ACTIVE TO PRECHARGE DELAY (TRASMIN),10\r
+MT16KTF1G64HZ-1G9E1,23,DDR3-MIN ACTIVE TO ACTIVE/REFRESH DELAY (TRCMIN),79\r
+MT16KTF1G64HZ-1G9E1,24,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) LSB,20\r
+MT16KTF1G64HZ-1G9E1,25,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) MSB,08\r
+MT16KTF1G64HZ-1G9E1,26,DDR3-MIN INTERNAL WRITE TO READ CMD DELAY (TWTRMIN),3C\r
+MT16KTF1G64HZ-1G9E1,27,DDR3-MIN INTERNAL READ TO PRECHARGE CMD DELAY (TRTPMIN),3C\r
+MT16KTF1G64HZ-1G9E1,28,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) MSB,00\r
+MT16KTF1G64HZ-1G9E1,29,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) LSB,D8\r
+MT16KTF1G64HZ-1G9E1,30,DDR3-SDRAM DEVICE OUTPUT DRIVERS SUPPORTED,83\r
+MT16KTF1G64HZ-1G9E1,31,DDR3-SDRAM DEVICE THERMAL  REFRESH OPTIONS,05\r
+MT16KTF1G64HZ-1G9E1,32,DDR3-MODULE THERMAL SENSOR,00\r
+MT16KTF1G64HZ-1G9E1,33,DDR3-SDRAM DEVICE TYPE,00\r
+MT16KTF1G64HZ-1G9E1,34,DDR3-FINE OFFSET FOR TCKMIN,CA\r
+MT16KTF1G64HZ-1G9E1,35,DDR3-FINE OFFSET FOR TAAMIN,00\r
+MT16KTF1G64HZ-1G9E1,36,DDR3-FINE OFFSET FOR TRCDMIN,00\r
+MT16KTF1G64HZ-1G9E1,37,DDR3-FINE OFFSET FOR TRPMIN,00\r
+MT16KTF1G64HZ-1G9E1,38,DDR3-FINE OFFSET FOR TRCMIN,00\r
+MT16KTF1G64HZ-1G9E1,39,DDR3-BYTE 39 RESERVED,00\r
+MT16KTF1G64HZ-1G9E1,40,DDR3-BYTE 40 RESERVED,00\r
+MT16KTF1G64HZ-1G9E1,41,DDR3-PTRR TMAW  MAC,84\r
+MT16KTF1G64HZ-1G9E1,42-59,DDR3-RESERVED BYTES 42-59,000000000000000000000000000000000000\r
+MT16KTF1G64HZ-1G9E1,60,DDR3-RC REV  NOM MODULE HEIGHT,0F\r
+MT16KTF1G64HZ-1G9E1,61,DDR3-MODULE THICKNESS (MAX),11\r
+MT16KTF1G64HZ-1G9E1,62,DDR3-REFERENCE RAW CARD ID,05\r
+MT16KTF1G64HZ-1G9E1,63,DDR3 - ADDRESS MAPPING/MODULE ATTRIBUTES,00\r
+MT16KTF1G64HZ-1G9E1,64,DDR3-HEATSPREADER SOLUTION,00\r
+MT16KTF1G64HZ-1G9E1,65,DDR3-REGISTER VENDOR ID (LSB),00\r
+MT16KTF1G64HZ-1G9E1,66,DDR3-REGISTER VENDOR ID (MSB),00\r
+MT16KTF1G64HZ-1G9E1,67,DDR3-REGISTER REVISON NUMBER,00\r
+MT16KTF1G64HZ-1G9E1,68,DDR3-REGISTER TYPE,00\r
+MT16KTF1G64HZ-1G9E1,69,DDR3-REG CTRL WORDS 1 AND ZERO,00\r
+MT16KTF1G64HZ-1G9E1,70,DDR3-REG CTRL WORDS 3 AND 2,00\r
+MT16KTF1G64HZ-1G9E1,71,DDR3-REG CTRL WORDS 5 AND 4,00\r
+MT16KTF1G64HZ-1G9E1,72,DDR3-REG CTRL WORDS 7 AND 6,00\r
+MT16KTF1G64HZ-1G9E1,73,DDR3-REG CTRL WORDS 9 AND 8,00\r
+MT16KTF1G64HZ-1G9E1,74,DDR3-REG CTRL WORDS 11 AND 10,00\r
+MT16KTF1G64HZ-1G9E1,75,DDR3-REG CTRL WORDS 13 AND 12,00\r
+MT16KTF1G64HZ-1G9E1,76,DDR3-REG CTRL WORDS 15 AND 14,00\r
+MT16KTF1G64HZ-1G9E1,77-116,DDR3-RESERVED BYTES 77-116,00000000000000000000000000000000000000000000000000000000000000000000000000000000\r
+MT16KTF1G64HZ-1G9E1,117,DDR3-MODULE MFR ID (LSB),80\r
+MT16KTF1G64HZ-1G9E1,118,DDR3-MODULE MFR ID (MSB),2C\r
+MT16KTF1G64HZ-1G9E1,119,DDR3-MODULE MFR LOCATION ID,00\r
+MT16KTF1G64HZ-1G9E1,120,DDR3-MODULE MFR YEAR,00\r
+MT16KTF1G64HZ-1G9E1,121,DDR3-MODULE MFR WEEK,00\r
+MT16KTF1G64HZ-1G9E1,122-125,DDR3-MODULE SERIAL NUMBER,00000000\r
+MT16KTF1G64HZ-1G9E1,126-127,DDR3-CRC,DDA5\r
+MT16KTF1G64HZ-1G9E1,128-145,DDR3-MODULE PART NUMBER,16KTF1G64HZ-1G9E1\r
+MT16KTF1G64HZ-1G9E1,146,DDR3-MODULE DIE REV,45\r
+MT16KTF1G64HZ-1G9E1,147,DDR3-MODULE PCB REV,31\r
+MT16KTF1G64HZ-1G9E1,148,DDR3-DRAM DEVICE MFR ID (LSB),80\r
+MT16KTF1G64HZ-1G9E1,149,DDR3-DRAM DEVICE MFR (MSB),2C\r
+MT16KTF1G64HZ-1G9E1,150-175,DDR3-MFR RESERVED BYTES 150-175,0000000000000000000000000000000000000000000000000000\r
+MT16KTF1G64HZ-1G9E1,176-255,DDR3-CUSTOMER RESERVED BYTES 176-255,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
\ No newline at end of file
diff --git a/test/spd_data/MT18KSF1G72HZ-1G4E2.csv b/test/spd_data/MT18KSF1G72HZ-1G4E2.csv
new file mode 100644 (file)
index 0000000..76c5e16
--- /dev/null
@@ -0,0 +1,76 @@
+Part Number,Byte Number,Byte Description,Byte Value\r
+MT18KSF1G72HZ-1G4E2,0,DDR3-CRC RANGE; EEPROM BYTES; BYTES USED,92\r
+MT18KSF1G72HZ-1G4E2,1,DDR3-SPD REVISON,13\r
+MT18KSF1G72HZ-1G4E2,2,DDR3-DRAM DEVICE TYPE,0B\r
+MT18KSF1G72HZ-1G4E2,3,DDR3-MODULE TYPE (FORM FACTOR),08\r
+MT18KSF1G72HZ-1G4E2,4,DDR3-SDRAM DEVICE DENSITY  BANKS,04\r
+MT18KSF1G72HZ-1G4E2,5,DDR3-SDRAM DEVICE ROW  COLUMN COUNT,21\r
+MT18KSF1G72HZ-1G4E2,6,DDR3-MODULE NOMINAL VDD,02\r
+MT18KSF1G72HZ-1G4E2,7,DDR3-MODULE RANKS   DEVICE DQ COUNT,09\r
+MT18KSF1G72HZ-1G4E2,8,DDR3-ECC TAG  MODULE MEMORY BUS WIDTH,0B\r
+MT18KSF1G72HZ-1G4E2,9,DDR3-FINE TIMEBASE DIVIDEND/DIVISOR,11\r
+MT18KSF1G72HZ-1G4E2,10,DDR3-MEDIUM TIMEBASE DIVIDEND,01\r
+MT18KSF1G72HZ-1G4E2,11,DDR3-MEDIUM TIMEBASE DIVISOR,08\r
+MT18KSF1G72HZ-1G4E2,12,DDR3-MIN SDRAM CYCLE TIME (TCKMIN),0C\r
+MT18KSF1G72HZ-1G4E2,13,DDR3-BYTE 13 RESERVED,00\r
+MT18KSF1G72HZ-1G4E2,14,DDR3-CAS LATENCIES SUPPORTED (CL4 => CL11),7E\r
+MT18KSF1G72HZ-1G4E2,15,DDR3-CAS LATENCIES SUPPORTED (CL12 => CL18),00\r
+MT18KSF1G72HZ-1G4E2,16,DDR3-MIN CAS LATENCY TIME (TAAMIN),69\r
+MT18KSF1G72HZ-1G4E2,17,DDR3-MIN WRITE RECOVERY TIME (TWRMIN),78\r
+MT18KSF1G72HZ-1G4E2,18,DDR3-MIN RAS# TO CAS# DELAY (TRCDMIN),69\r
+MT18KSF1G72HZ-1G4E2,19,DDR3-MIN ROW ACTIVE TO ROW ACTIVE DELAY (TRRDMIN),30\r
+MT18KSF1G72HZ-1G4E2,20,DDR3-MIN ROW PRECHARGE DELAY (TRPMIN),69\r
+MT18KSF1G72HZ-1G4E2,21,DDR3-UPPER NIBBLE FOR TRAS  TRC,11\r
+MT18KSF1G72HZ-1G4E2,22,DDR3-MIN ACTIVE TO PRECHARGE DELAY (TRASMIN),20\r
+MT18KSF1G72HZ-1G4E2,23,DDR3-MIN ACTIVE TO ACTIVE/REFRESH DELAY (TRCMIN),89\r
+MT18KSF1G72HZ-1G4E2,24,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) LSB,20\r
+MT18KSF1G72HZ-1G4E2,25,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) MSB,08\r
+MT18KSF1G72HZ-1G4E2,26,DDR3-MIN INTERNAL WRITE TO READ CMD DELAY (TWTRMIN),3C\r
+MT18KSF1G72HZ-1G4E2,27,DDR3-MIN INTERNAL READ TO PRECHARGE CMD DELAY (TRTPMIN),3C\r
+MT18KSF1G72HZ-1G4E2,28,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) MSB,00\r
+MT18KSF1G72HZ-1G4E2,29,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) LSB,F0\r
+MT18KSF1G72HZ-1G4E2,30,DDR3-SDRAM DEVICE OUTPUT DRIVERS SUPPORTED,83\r
+MT18KSF1G72HZ-1G4E2,31,DDR3-SDRAM DEVICE THERMAL  REFRESH OPTIONS,05\r
+MT18KSF1G72HZ-1G4E2,32,DDR3-MODULE THERMAL SENSOR,80\r
+MT18KSF1G72HZ-1G4E2,33,DDR3-SDRAM DEVICE TYPE,00\r
+MT18KSF1G72HZ-1G4E2,34,DDR3-FINE OFFSET FOR TCKMIN,00\r
+MT18KSF1G72HZ-1G4E2,35,DDR3-FINE OFFSET FOR TAAMIN,00\r
+MT18KSF1G72HZ-1G4E2,36,DDR3-FINE OFFSET FOR TRCDMIN,00\r
+MT18KSF1G72HZ-1G4E2,37,DDR3-FINE OFFSET FOR TRPMIN,00\r
+MT18KSF1G72HZ-1G4E2,38,DDR3-FINE OFFSET FOR TRCMIN,00\r
+MT18KSF1G72HZ-1G4E2,39,DDR3-BYTE 39 RESERVED,00\r
+MT18KSF1G72HZ-1G4E2,40,DDR3-BYTE 40 RESERVED,00\r
+MT18KSF1G72HZ-1G4E2,41,DDR3-PTRR TMAW  MAC,84\r
+MT18KSF1G72HZ-1G4E2,42-59,DDR3-RESERVED BYTES 42-59,000000000000000000000000000000000000\r
+MT18KSF1G72HZ-1G4E2,60,DDR3-RC REV  NOM MODULE HEIGHT,0F\r
+MT18KSF1G72HZ-1G4E2,61,DDR3-MODULE THICKNESS (MAX),11\r
+MT18KSF1G72HZ-1G4E2,62,DDR3-REFERENCE RAW CARD ID,23\r
+MT18KSF1G72HZ-1G4E2,63,DDR3 - ADDRESS MAPPING/MODULE ATTRIBUTES,00\r
+MT18KSF1G72HZ-1G4E2,64,DDR3-HEATSPREADER SOLUTION,00\r
+MT18KSF1G72HZ-1G4E2,65,DDR3-REGISTER VENDOR ID (LSB),00\r
+MT18KSF1G72HZ-1G4E2,66,DDR3-REGISTER VENDOR ID (MSB),00\r
+MT18KSF1G72HZ-1G4E2,67,DDR3-REGISTER REVISON NUMBER,00\r
+MT18KSF1G72HZ-1G4E2,68,DDR3-REGISTER TYPE,00\r
+MT18KSF1G72HZ-1G4E2,69,DDR3-REG CTRL WORDS 1 AND ZERO,00\r
+MT18KSF1G72HZ-1G4E2,70,DDR3-REG CTRL WORDS 3 AND 2,00\r
+MT18KSF1G72HZ-1G4E2,71,DDR3-REG CTRL WORDS 5 AND 4,00\r
+MT18KSF1G72HZ-1G4E2,72,DDR3-REG CTRL WORDS 7 AND 6,00\r
+MT18KSF1G72HZ-1G4E2,73,DDR3-REG CTRL WORDS 9 AND 8,00\r
+MT18KSF1G72HZ-1G4E2,74,DDR3-REG CTRL WORDS 11 AND 10,00\r
+MT18KSF1G72HZ-1G4E2,75,DDR3-REG CTRL WORDS 13 AND 12,00\r
+MT18KSF1G72HZ-1G4E2,76,DDR3-REG CTRL WORDS 15 AND 14,00\r
+MT18KSF1G72HZ-1G4E2,77-116,DDR3-RESERVED BYTES 77-116,00000000000000000000000000000000000000000000000000000000000000000000000000000000\r
+MT18KSF1G72HZ-1G4E2,117,DDR3-MODULE MFR ID (LSB),80\r
+MT18KSF1G72HZ-1G4E2,118,DDR3-MODULE MFR ID (MSB),2C\r
+MT18KSF1G72HZ-1G4E2,119,DDR3-MODULE MFR LOCATION ID,00\r
+MT18KSF1G72HZ-1G4E2,120,DDR3-MODULE MFR YEAR,00\r
+MT18KSF1G72HZ-1G4E2,121,DDR3-MODULE MFR WEEK,00\r
+MT18KSF1G72HZ-1G4E2,122-125,DDR3-MODULE SERIAL NUMBER,00000000\r
+MT18KSF1G72HZ-1G4E2,126-127,DDR3-CRC,FCB1\r
+MT18KSF1G72HZ-1G4E2,128-145,DDR3-MODULE PART NUMBER,18KSF1G72HZ-1G4E2\r
+MT18KSF1G72HZ-1G4E2,146,DDR3-MODULE DIE REV,45\r
+MT18KSF1G72HZ-1G4E2,147,DDR3-MODULE PCB REV,32\r
+MT18KSF1G72HZ-1G4E2,148,DDR3-DRAM DEVICE MFR ID (LSB),80\r
+MT18KSF1G72HZ-1G4E2,149,DDR3-DRAM DEVICE MFR (MSB),2C\r
+MT18KSF1G72HZ-1G4E2,150-175,DDR3-MFR RESERVED BYTES 150-175,0000000000000000000000000000000000000000000000000000\r
+MT18KSF1G72HZ-1G4E2,176-255,DDR3-CUSTOMER RESERVED BYTES 176-255,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
\ No newline at end of file
diff --git a/test/spd_data/MT18KSF1G72HZ-1G6E2.csv b/test/spd_data/MT18KSF1G72HZ-1G6E2.csv
new file mode 100644 (file)
index 0000000..1cffcf3
--- /dev/null
@@ -0,0 +1,76 @@
+Part Number,Byte Number,Byte Description,Byte Value\r
+MT18KSF1G72HZ-1G6E2,0,DDR3-CRC RANGE; EEPROM BYTES; BYTES USED,92\r
+MT18KSF1G72HZ-1G6E2,1,DDR3-SPD REVISON,13\r
+MT18KSF1G72HZ-1G6E2,2,DDR3-DRAM DEVICE TYPE,0B\r
+MT18KSF1G72HZ-1G6E2,3,DDR3-MODULE TYPE (FORM FACTOR),08\r
+MT18KSF1G72HZ-1G6E2,4,DDR3-SDRAM DEVICE DENSITY  BANKS,04\r
+MT18KSF1G72HZ-1G6E2,5,DDR3-SDRAM DEVICE ROW  COLUMN COUNT,21\r
+MT18KSF1G72HZ-1G6E2,6,DDR3-MODULE NOMINAL VDD,02\r
+MT18KSF1G72HZ-1G6E2,7,DDR3-MODULE RANKS   DEVICE DQ COUNT,09\r
+MT18KSF1G72HZ-1G6E2,8,DDR3-ECC TAG  MODULE MEMORY BUS WIDTH,0B\r
+MT18KSF1G72HZ-1G6E2,9,DDR3-FINE TIMEBASE DIVIDEND/DIVISOR,11\r
+MT18KSF1G72HZ-1G6E2,10,DDR3-MEDIUM TIMEBASE DIVIDEND,01\r
+MT18KSF1G72HZ-1G6E2,11,DDR3-MEDIUM TIMEBASE DIVISOR,08\r
+MT18KSF1G72HZ-1G6E2,12,DDR3-MIN SDRAM CYCLE TIME (TCKMIN),0A\r
+MT18KSF1G72HZ-1G6E2,13,DDR3-BYTE 13 RESERVED,00\r
+MT18KSF1G72HZ-1G6E2,14,DDR3-CAS LATENCIES SUPPORTED (CL4 => CL11),FE\r
+MT18KSF1G72HZ-1G6E2,15,DDR3-CAS LATENCIES SUPPORTED (CL12 => CL18),00\r
+MT18KSF1G72HZ-1G6E2,16,DDR3-MIN CAS LATENCY TIME (TAAMIN),69\r
+MT18KSF1G72HZ-1G6E2,17,DDR3-MIN WRITE RECOVERY TIME (TWRMIN),78\r
+MT18KSF1G72HZ-1G6E2,18,DDR3-MIN RAS# TO CAS# DELAY (TRCDMIN),69\r
+MT18KSF1G72HZ-1G6E2,19,DDR3-MIN ROW ACTIVE TO ROW ACTIVE DELAY (TRRDMIN),30\r
+MT18KSF1G72HZ-1G6E2,20,DDR3-MIN ROW PRECHARGE DELAY (TRPMIN),69\r
+MT18KSF1G72HZ-1G6E2,21,DDR3-UPPER NIBBLE FOR TRAS  TRC,11\r
+MT18KSF1G72HZ-1G6E2,22,DDR3-MIN ACTIVE TO PRECHARGE DELAY (TRASMIN),18\r
+MT18KSF1G72HZ-1G6E2,23,DDR3-MIN ACTIVE TO ACTIVE/REFRESH DELAY (TRCMIN),81\r
+MT18KSF1G72HZ-1G6E2,24,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) LSB,20\r
+MT18KSF1G72HZ-1G6E2,25,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) MSB,08\r
+MT18KSF1G72HZ-1G6E2,26,DDR3-MIN INTERNAL WRITE TO READ CMD DELAY (TWTRMIN),3C\r
+MT18KSF1G72HZ-1G6E2,27,DDR3-MIN INTERNAL READ TO PRECHARGE CMD DELAY (TRTPMIN),3C\r
+MT18KSF1G72HZ-1G6E2,28,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) MSB,00\r
+MT18KSF1G72HZ-1G6E2,29,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) LSB,F0\r
+MT18KSF1G72HZ-1G6E2,30,DDR3-SDRAM DEVICE OUTPUT DRIVERS SUPPORTED,83\r
+MT18KSF1G72HZ-1G6E2,31,DDR3-SDRAM DEVICE THERMAL  REFRESH OPTIONS,05\r
+MT18KSF1G72HZ-1G6E2,32,DDR3-MODULE THERMAL SENSOR,80\r
+MT18KSF1G72HZ-1G6E2,33,DDR3-SDRAM DEVICE TYPE,00\r
+MT18KSF1G72HZ-1G6E2,34,DDR3-FINE OFFSET FOR TCKMIN,00\r
+MT18KSF1G72HZ-1G6E2,35,DDR3-FINE OFFSET FOR TAAMIN,00\r
+MT18KSF1G72HZ-1G6E2,36,DDR3-FINE OFFSET FOR TRCDMIN,00\r
+MT18KSF1G72HZ-1G6E2,37,DDR3-FINE OFFSET FOR TRPMIN,00\r
+MT18KSF1G72HZ-1G6E2,38,DDR3-FINE OFFSET FOR TRCMIN,00\r
+MT18KSF1G72HZ-1G6E2,39,DDR3-BYTE 39 RESERVED,00\r
+MT18KSF1G72HZ-1G6E2,40,DDR3-BYTE 40 RESERVED,00\r
+MT18KSF1G72HZ-1G6E2,41,DDR3-PTRR TMAW  MAC,84\r
+MT18KSF1G72HZ-1G6E2,42-59,DDR3-RESERVED BYTES 42-59,000000000000000000000000000000000000\r
+MT18KSF1G72HZ-1G6E2,60,DDR3-RC REV  NOM MODULE HEIGHT,0F\r
+MT18KSF1G72HZ-1G6E2,61,DDR3-MODULE THICKNESS (MAX),11\r
+MT18KSF1G72HZ-1G6E2,62,DDR3-REFERENCE RAW CARD ID,23\r
+MT18KSF1G72HZ-1G6E2,63,DDR3 - ADDRESS MAPPING/MODULE ATTRIBUTES,00\r
+MT18KSF1G72HZ-1G6E2,64,DDR3-HEATSPREADER SOLUTION,00\r
+MT18KSF1G72HZ-1G6E2,65,DDR3-REGISTER VENDOR ID (LSB),00\r
+MT18KSF1G72HZ-1G6E2,66,DDR3-REGISTER VENDOR ID (MSB),00\r
+MT18KSF1G72HZ-1G6E2,67,DDR3-REGISTER REVISON NUMBER,00\r
+MT18KSF1G72HZ-1G6E2,68,DDR3-REGISTER TYPE,00\r
+MT18KSF1G72HZ-1G6E2,69,DDR3-REG CTRL WORDS 1 AND ZERO,00\r
+MT18KSF1G72HZ-1G6E2,70,DDR3-REG CTRL WORDS 3 AND 2,00\r
+MT18KSF1G72HZ-1G6E2,71,DDR3-REG CTRL WORDS 5 AND 4,00\r
+MT18KSF1G72HZ-1G6E2,72,DDR3-REG CTRL WORDS 7 AND 6,00\r
+MT18KSF1G72HZ-1G6E2,73,DDR3-REG CTRL WORDS 9 AND 8,00\r
+MT18KSF1G72HZ-1G6E2,74,DDR3-REG CTRL WORDS 11 AND 10,00\r
+MT18KSF1G72HZ-1G6E2,75,DDR3-REG CTRL WORDS 13 AND 12,00\r
+MT18KSF1G72HZ-1G6E2,76,DDR3-REG CTRL WORDS 15 AND 14,00\r
+MT18KSF1G72HZ-1G6E2,77-116,DDR3-RESERVED BYTES 77-116,00000000000000000000000000000000000000000000000000000000000000000000000000000000\r
+MT18KSF1G72HZ-1G6E2,117,DDR3-MODULE MFR ID (LSB),80\r
+MT18KSF1G72HZ-1G6E2,118,DDR3-MODULE MFR ID (MSB),2C\r
+MT18KSF1G72HZ-1G6E2,119,DDR3-MODULE MFR LOCATION ID,00\r
+MT18KSF1G72HZ-1G6E2,120,DDR3-MODULE MFR YEAR,00\r
+MT18KSF1G72HZ-1G6E2,121,DDR3-MODULE MFR WEEK,00\r
+MT18KSF1G72HZ-1G6E2,122-125,DDR3-MODULE SERIAL NUMBER,00000000\r
+MT18KSF1G72HZ-1G6E2,126-127,DDR3-CRC,296F\r
+MT18KSF1G72HZ-1G6E2,128-145,DDR3-MODULE PART NUMBER,18KSF1G72HZ-1G6E2\r
+MT18KSF1G72HZ-1G6E2,146,DDR3-MODULE DIE REV,45\r
+MT18KSF1G72HZ-1G6E2,147,DDR3-MODULE PCB REV,32\r
+MT18KSF1G72HZ-1G6E2,148,DDR3-DRAM DEVICE MFR ID (LSB),80\r
+MT18KSF1G72HZ-1G6E2,149,DDR3-DRAM DEVICE MFR (MSB),2C\r
+MT18KSF1G72HZ-1G6E2,150-175,DDR3-MFR RESERVED BYTES 150-175,0000000000000000000000000000000000000000000000000000\r
+MT18KSF1G72HZ-1G6E2,176-255,DDR3-CUSTOMER RESERVED BYTES 176-255,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
\ No newline at end of file
diff --git a/test/spd_data/MT8JTF12864AZ-1G4G1.csv b/test/spd_data/MT8JTF12864AZ-1G4G1.csv
new file mode 100644 (file)
index 0000000..4d975f5
--- /dev/null
@@ -0,0 +1,68 @@
+Part Number,Byte Number,Byte Description,Byte Value\r
+MT8JTF12864AZ-1G4G1,0,DDR3-CRC RANGE; EEPROM BYTES; BYTES USED,92\r
+MT8JTF12864AZ-1G4G1,1,DDR3-SPD REVISON,10\r
+MT8JTF12864AZ-1G4G1,2,DDR3-DRAM DEVICE TYPE,0B\r
+MT8JTF12864AZ-1G4G1,3,DDR3-MODULE TYPE (FORM FACTOR),02\r
+MT8JTF12864AZ-1G4G1,4,DDR3-SDRAM DEVICE DENSITY  BANKS,02\r
+MT8JTF12864AZ-1G4G1,5,DDR3-SDRAM DEVICE ROW  COLUMN COUNT,11\r
+MT8JTF12864AZ-1G4G1,6,DDR3-MODULE NOMINAL VDD,00\r
+MT8JTF12864AZ-1G4G1,7,DDR3-MODULE RANKS   DEVICE DQ COUNT,01\r
+MT8JTF12864AZ-1G4G1,8,DDR3-ECC TAG  MODULE MEMORY BUS WIDTH,03\r
+MT8JTF12864AZ-1G4G1,9,DDR3-FINE TIMEBASE DIVIDEND/DIVISOR,52\r
+MT8JTF12864AZ-1G4G1,10,DDR3-MEDIUM TIMEBASE DIVIDEND,01\r
+MT8JTF12864AZ-1G4G1,11,DDR3-MEDIUM TIMEBASE DIVISOR,08\r
+MT8JTF12864AZ-1G4G1,12,DDR3-MIN SDRAM CYCLE TIME (TCKMIN),0C\r
+MT8JTF12864AZ-1G4G1,13,DDR3-BYTE 13 RESERVED,00\r
+MT8JTF12864AZ-1G4G1,14,DDR3-CAS LATENCIES SUPPORTED (CL4 => CL11),7E\r
+MT8JTF12864AZ-1G4G1,15,DDR3-CAS LATENCIES SUPPORTED (CL12 => CL18),00\r
+MT8JTF12864AZ-1G4G1,16,DDR3-MIN CAS LATENCY TIME (TAAMIN),69\r
+MT8JTF12864AZ-1G4G1,17,DDR3-MIN WRITE RECOVERY TIME (TWRMIN),78\r
+MT8JTF12864AZ-1G4G1,18,DDR3-MIN RAS# TO CAS# DELAY (TRCDMIN),69\r
+MT8JTF12864AZ-1G4G1,19,DDR3-MIN ROW ACTIVE TO ROW ACTIVE DELAY (TRRDMIN),30\r
+MT8JTF12864AZ-1G4G1,20,DDR3-MIN ROW PRECHARGE DELAY (TRPMIN),69\r
+MT8JTF12864AZ-1G4G1,21,DDR3-UPPER NIBBLE FOR TRAS  TRC,11\r
+MT8JTF12864AZ-1G4G1,22,DDR3-MIN ACTIVE TO PRECHARGE DELAY (TRASMIN),20\r
+MT8JTF12864AZ-1G4G1,23,DDR3-MIN ACTIVE TO ACTIVE/REFRESH DELAY (TRCMIN),89\r
+MT8JTF12864AZ-1G4G1,24,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) LSB,70\r
+MT8JTF12864AZ-1G4G1,25,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) MSB,03\r
+MT8JTF12864AZ-1G4G1,26,DDR3-MIN INTERNAL WRITE TO READ CMD DELAY (TWTRMIN),3C\r
+MT8JTF12864AZ-1G4G1,27,DDR3-MIN INTERNAL READ TO PRECHARGE CMD DELAY (TRTPMIN),3C\r
+MT8JTF12864AZ-1G4G1,28,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) MSB,00\r
+MT8JTF12864AZ-1G4G1,29,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) LSB,F0\r
+MT8JTF12864AZ-1G4G1,30,DDR3-SDRAM DEVICE OUTPUT DRIVERS SUPPORTED,82\r
+MT8JTF12864AZ-1G4G1,31,DDR3-SDRAM DEVICE THERMAL  REFRESH OPTIONS,05\r
+MT8JTF12864AZ-1G4G1,32,DDR3-MODULE THERMAL SENSOR,00\r
+MT8JTF12864AZ-1G4G1,33,DDR3-SDRAM DEVICE TYPE,00\r
+MT8JTF12864AZ-1G4G1,34-59,DDR3-RESERVED BYTES 34-59,0000000000000000000000000000000000000000000000000000\r
+MT8JTF12864AZ-1G4G1,60,DDR3-MODULE HEIGHT (NOMINAL),0F\r
+MT8JTF12864AZ-1G4G1,61,DDR3-MODULE THICKNESS (MAX),01\r
+MT8JTF12864AZ-1G4G1,62,DDR3-REFERENCE RAW CARD ID,00\r
+MT8JTF12864AZ-1G4G1,63,DDR3 - ADDRESS MAPPING/MODULE ATTRIBUTES,00\r
+MT8JTF12864AZ-1G4G1,64,DDR3-HEATSPREADER SOLUTION,00\r
+MT8JTF12864AZ-1G4G1,65,DDR3-REGISTER VENDOR ID (LSB),00\r
+MT8JTF12864AZ-1G4G1,66,DDR3-REGISTER VENDOR ID (MSB),00\r
+MT8JTF12864AZ-1G4G1,67,DDR3-REGISTER REVISON NUMBER,00\r
+MT8JTF12864AZ-1G4G1,68,DDR3-REGISTER TYPE,00\r
+MT8JTF12864AZ-1G4G1,69,DDR3-REG CTRL WORDS 1 AND ZERO,00\r
+MT8JTF12864AZ-1G4G1,70,DDR3-REG CTRL WORDS 3 AND 2,00\r
+MT8JTF12864AZ-1G4G1,71,DDR3-REG CTRL WORDS 5 AND 4,00\r
+MT8JTF12864AZ-1G4G1,72,DDR3-REG CTRL WORDS 7 AND 6,00\r
+MT8JTF12864AZ-1G4G1,73,DDR3-REG CTRL WORDS 9 AND 8,00\r
+MT8JTF12864AZ-1G4G1,74,DDR3-REG CTRL WORDS 11 AND 10,00\r
+MT8JTF12864AZ-1G4G1,75,DDR3-REG CTRL WORDS 13 AND 12,00\r
+MT8JTF12864AZ-1G4G1,76,DDR3-REG CTRL WORDS 15 AND 14,00\r
+MT8JTF12864AZ-1G4G1,77-116,DDR3-RESERVED BYTES 77-116,00000000000000000000000000000000000000000000000000000000000000000000000000000000\r
+MT8JTF12864AZ-1G4G1,117,DDR3-MODULE MFR ID (LSB),80\r
+MT8JTF12864AZ-1G4G1,118,DDR3-MODULE MFR ID (MSB),2C\r
+MT8JTF12864AZ-1G4G1,119,DDR3-MODULE MFR LOCATION ID,00\r
+MT8JTF12864AZ-1G4G1,120,DDR3-MODULE MFR YEAR,00\r
+MT8JTF12864AZ-1G4G1,121,DDR3-MODULE MFR WEEK,00\r
+MT8JTF12864AZ-1G4G1,122-125,DDR3-MODULE SERIAL NUMBER,00000000\r
+MT8JTF12864AZ-1G4G1,126-127,DDR3-CRC,1461\r
+MT8JTF12864AZ-1G4G1,128-145,DDR3-MODULE PART NUMBER,8JTF12864AZ-1G4G1\r
+MT8JTF12864AZ-1G4G1,146,DDR3-MODULE DIE REV,47\r
+MT8JTF12864AZ-1G4G1,147,DDR3-MODULE PCB REV,31\r
+MT8JTF12864AZ-1G4G1,148,DDR3-DRAM DEVICE MFR ID (LSB),80\r
+MT8JTF12864AZ-1G4G1,149,DDR3-DRAM DEVICE MFR (MSB),2C\r
+MT8JTF12864AZ-1G4G1,150-175,DDR3-MFR RESERVED BYTES 150-175,0000000000000000000000000000000000000000000000000000\r
+MT8JTF12864AZ-1G4G1,176-255,DDR3-CUSTOMER RESERVED BYTES 176-255,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
\ No newline at end of file
diff --git a/test/spd_data/MT8KTF51264HZ-1G4E1.csv b/test/spd_data/MT8KTF51264HZ-1G4E1.csv
new file mode 100644 (file)
index 0000000..a710ef0
--- /dev/null
@@ -0,0 +1,76 @@
+Part Number,Byte Number,Byte Description,Byte Value\r
+MT8KTF51264HZ-1G4E1,0,DDR3-CRC RANGE; EEPROM BYTES; BYTES USED,92\r
+MT8KTF51264HZ-1G4E1,1,DDR3-SPD REVISON,13\r
+MT8KTF51264HZ-1G4E1,2,DDR3-DRAM DEVICE TYPE,0B\r
+MT8KTF51264HZ-1G4E1,3,DDR3-MODULE TYPE (FORM FACTOR),03\r
+MT8KTF51264HZ-1G4E1,4,DDR3-SDRAM DEVICE DENSITY  BANKS,04\r
+MT8KTF51264HZ-1G4E1,5,DDR3-SDRAM DEVICE ROW  COLUMN COUNT,21\r
+MT8KTF51264HZ-1G4E1,6,DDR3-MODULE NOMINAL VDD,02\r
+MT8KTF51264HZ-1G4E1,7,DDR3-MODULE RANKS   DEVICE DQ COUNT,01\r
+MT8KTF51264HZ-1G4E1,8,DDR3-ECC TAG  MODULE MEMORY BUS WIDTH,03\r
+MT8KTF51264HZ-1G4E1,9,DDR3-FINE TIMEBASE DIVIDEND/DIVISOR,11\r
+MT8KTF51264HZ-1G4E1,10,DDR3-MEDIUM TIMEBASE DIVIDEND,01\r
+MT8KTF51264HZ-1G4E1,11,DDR3-MEDIUM TIMEBASE DIVISOR,08\r
+MT8KTF51264HZ-1G4E1,12,DDR3-MIN SDRAM CYCLE TIME (TCKMIN),0C\r
+MT8KTF51264HZ-1G4E1,13,DDR3-BYTE 13 RESERVED,00\r
+MT8KTF51264HZ-1G4E1,14,DDR3-CAS LATENCIES SUPPORTED (CL4 => CL11),7E\r
+MT8KTF51264HZ-1G4E1,15,DDR3-CAS LATENCIES SUPPORTED (CL12 => CL18),00\r
+MT8KTF51264HZ-1G4E1,16,DDR3-MIN CAS LATENCY TIME (TAAMIN),69\r
+MT8KTF51264HZ-1G4E1,17,DDR3-MIN WRITE RECOVERY TIME (TWRMIN),78\r
+MT8KTF51264HZ-1G4E1,18,DDR3-MIN RAS# TO CAS# DELAY (TRCDMIN),69\r
+MT8KTF51264HZ-1G4E1,19,DDR3-MIN ROW ACTIVE TO ROW ACTIVE DELAY (TRRDMIN),30\r
+MT8KTF51264HZ-1G4E1,20,DDR3-MIN ROW PRECHARGE DELAY (TRPMIN),69\r
+MT8KTF51264HZ-1G4E1,21,DDR3-UPPER NIBBLE FOR TRAS  TRC,11\r
+MT8KTF51264HZ-1G4E1,22,DDR3-MIN ACTIVE TO PRECHARGE DELAY (TRASMIN),20\r
+MT8KTF51264HZ-1G4E1,23,DDR3-MIN ACTIVE TO ACTIVE/REFRESH DELAY (TRCMIN),89\r
+MT8KTF51264HZ-1G4E1,24,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) LSB,20\r
+MT8KTF51264HZ-1G4E1,25,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) MSB,08\r
+MT8KTF51264HZ-1G4E1,26,DDR3-MIN INTERNAL WRITE TO READ CMD DELAY (TWTRMIN),3C\r
+MT8KTF51264HZ-1G4E1,27,DDR3-MIN INTERNAL READ TO PRECHARGE CMD DELAY (TRTPMIN),3C\r
+MT8KTF51264HZ-1G4E1,28,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) MSB,00\r
+MT8KTF51264HZ-1G4E1,29,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) LSB,F0\r
+MT8KTF51264HZ-1G4E1,30,DDR3-SDRAM DEVICE OUTPUT DRIVERS SUPPORTED,83\r
+MT8KTF51264HZ-1G4E1,31,DDR3-SDRAM DEVICE THERMAL  REFRESH OPTIONS,05\r
+MT8KTF51264HZ-1G4E1,32,DDR3-MODULE THERMAL SENSOR,00\r
+MT8KTF51264HZ-1G4E1,33,DDR3-SDRAM DEVICE TYPE,00\r
+MT8KTF51264HZ-1G4E1,34,DDR3-FINE OFFSET FOR TCKMIN,00\r
+MT8KTF51264HZ-1G4E1,35,DDR3-FINE OFFSET FOR TAAMIN,00\r
+MT8KTF51264HZ-1G4E1,36,DDR3-FINE OFFSET FOR TRCDMIN,00\r
+MT8KTF51264HZ-1G4E1,37,DDR3-FINE OFFSET FOR TRPMIN,00\r
+MT8KTF51264HZ-1G4E1,38,DDR3-FINE OFFSET FOR TRCMIN,00\r
+MT8KTF51264HZ-1G4E1,39,DDR3-BYTE 39 RESERVED,00\r
+MT8KTF51264HZ-1G4E1,40,DDR3-BYTE 40 RESERVED,00\r
+MT8KTF51264HZ-1G4E1,41,DDR3-PTRR TMAW  MAC,84\r
+MT8KTF51264HZ-1G4E1,42-59,DDR3-RESERVED BYTES 42-59,000000000000000000000000000000000000\r
+MT8KTF51264HZ-1G4E1,60,DDR3-RC REV  NOM MODULE HEIGHT,0F\r
+MT8KTF51264HZ-1G4E1,61,DDR3-MODULE THICKNESS (MAX),11\r
+MT8KTF51264HZ-1G4E1,62,DDR3-REFERENCE RAW CARD ID,41\r
+MT8KTF51264HZ-1G4E1,63,DDR3 - ADDRESS MAPPING/MODULE ATTRIBUTES,00\r
+MT8KTF51264HZ-1G4E1,64,DDR3-HEATSPREADER SOLUTION,00\r
+MT8KTF51264HZ-1G4E1,65,DDR3-REGISTER VENDOR ID (LSB),00\r
+MT8KTF51264HZ-1G4E1,66,DDR3-REGISTER VENDOR ID (MSB),00\r
+MT8KTF51264HZ-1G4E1,67,DDR3-REGISTER REVISON NUMBER,00\r
+MT8KTF51264HZ-1G4E1,68,DDR3-REGISTER TYPE,00\r
+MT8KTF51264HZ-1G4E1,69,DDR3-REG CTRL WORDS 1 AND ZERO,00\r
+MT8KTF51264HZ-1G4E1,70,DDR3-REG CTRL WORDS 3 AND 2,00\r
+MT8KTF51264HZ-1G4E1,71,DDR3-REG CTRL WORDS 5 AND 4,00\r
+MT8KTF51264HZ-1G4E1,72,DDR3-REG CTRL WORDS 7 AND 6,00\r
+MT8KTF51264HZ-1G4E1,73,DDR3-REG CTRL WORDS 9 AND 8,00\r
+MT8KTF51264HZ-1G4E1,74,DDR3-REG CTRL WORDS 11 AND 10,00\r
+MT8KTF51264HZ-1G4E1,75,DDR3-REG CTRL WORDS 13 AND 12,00\r
+MT8KTF51264HZ-1G4E1,76,DDR3-REG CTRL WORDS 15 AND 14,00\r
+MT8KTF51264HZ-1G4E1,77-116,DDR3-RESERVED BYTES 77-116,00000000000000000000000000000000000000000000000000000000000000000000000000000000\r
+MT8KTF51264HZ-1G4E1,117,DDR3-MODULE MFR ID (LSB),80\r
+MT8KTF51264HZ-1G4E1,118,DDR3-MODULE MFR ID (MSB),2C\r
+MT8KTF51264HZ-1G4E1,119,DDR3-MODULE MFR LOCATION ID,00\r
+MT8KTF51264HZ-1G4E1,120,DDR3-MODULE MFR YEAR,00\r
+MT8KTF51264HZ-1G4E1,121,DDR3-MODULE MFR WEEK,00\r
+MT8KTF51264HZ-1G4E1,122-125,DDR3-MODULE SERIAL NUMBER,00000000\r
+MT8KTF51264HZ-1G4E1,126-127,DDR3-CRC,3D17\r
+MT8KTF51264HZ-1G4E1,128-145,DDR3-MODULE PART NUMBER,8KTF51264HZ-1G4E1\r
+MT8KTF51264HZ-1G4E1,146,DDR3-MODULE DIE REV,45\r
+MT8KTF51264HZ-1G4E1,147,DDR3-MODULE PCB REV,31\r
+MT8KTF51264HZ-1G4E1,148,DDR3-DRAM DEVICE MFR ID (LSB),80\r
+MT8KTF51264HZ-1G4E1,149,DDR3-DRAM DEVICE MFR (MSB),2C\r
+MT8KTF51264HZ-1G4E1,150-175,DDR3-MFR RESERVED BYTES 150-175,0000000000000000000000000000000000000000000000000000\r
+MT8KTF51264HZ-1G4E1,176-255,DDR3-CUSTOMER RESERVED BYTES 176-255,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
\ No newline at end of file
diff --git a/test/spd_data/MT8KTF51264HZ-1G6E1.csv b/test/spd_data/MT8KTF51264HZ-1G6E1.csv
new file mode 100644 (file)
index 0000000..0bb0cbd
--- /dev/null
@@ -0,0 +1,76 @@
+Part Number,Byte Number,Byte Description,Byte Value\r
+MT8KTF51264HZ-1G6E1,0,DDR3-CRC RANGE; EEPROM BYTES; BYTES USED,92\r
+MT8KTF51264HZ-1G6E1,1,DDR3-SPD REVISON,13\r
+MT8KTF51264HZ-1G6E1,2,DDR3-DRAM DEVICE TYPE,0B\r
+MT8KTF51264HZ-1G6E1,3,DDR3-MODULE TYPE (FORM FACTOR),03\r
+MT8KTF51264HZ-1G6E1,4,DDR3-SDRAM DEVICE DENSITY  BANKS,04\r
+MT8KTF51264HZ-1G6E1,5,DDR3-SDRAM DEVICE ROW  COLUMN COUNT,21\r
+MT8KTF51264HZ-1G6E1,6,DDR3-MODULE NOMINAL VDD,02\r
+MT8KTF51264HZ-1G6E1,7,DDR3-MODULE RANKS   DEVICE DQ COUNT,01\r
+MT8KTF51264HZ-1G6E1,8,DDR3-ECC TAG  MODULE MEMORY BUS WIDTH,03\r
+MT8KTF51264HZ-1G6E1,9,DDR3-FINE TIMEBASE DIVIDEND/DIVISOR,11\r
+MT8KTF51264HZ-1G6E1,10,DDR3-MEDIUM TIMEBASE DIVIDEND,01\r
+MT8KTF51264HZ-1G6E1,11,DDR3-MEDIUM TIMEBASE DIVISOR,08\r
+MT8KTF51264HZ-1G6E1,12,DDR3-MIN SDRAM CYCLE TIME (TCKMIN),0A\r
+MT8KTF51264HZ-1G6E1,13,DDR3-BYTE 13 RESERVED,00\r
+MT8KTF51264HZ-1G6E1,14,DDR3-CAS LATENCIES SUPPORTED (CL4 => CL11),FE\r
+MT8KTF51264HZ-1G6E1,15,DDR3-CAS LATENCIES SUPPORTED (CL12 => CL18),00\r
+MT8KTF51264HZ-1G6E1,16,DDR3-MIN CAS LATENCY TIME (TAAMIN),69\r
+MT8KTF51264HZ-1G6E1,17,DDR3-MIN WRITE RECOVERY TIME (TWRMIN),78\r
+MT8KTF51264HZ-1G6E1,18,DDR3-MIN RAS# TO CAS# DELAY (TRCDMIN),69\r
+MT8KTF51264HZ-1G6E1,19,DDR3-MIN ROW ACTIVE TO ROW ACTIVE DELAY (TRRDMIN),30\r
+MT8KTF51264HZ-1G6E1,20,DDR3-MIN ROW PRECHARGE DELAY (TRPMIN),69\r
+MT8KTF51264HZ-1G6E1,21,DDR3-UPPER NIBBLE FOR TRAS  TRC,11\r
+MT8KTF51264HZ-1G6E1,22,DDR3-MIN ACTIVE TO PRECHARGE DELAY (TRASMIN),18\r
+MT8KTF51264HZ-1G6E1,23,DDR3-MIN ACTIVE TO ACTIVE/REFRESH DELAY (TRCMIN),81\r
+MT8KTF51264HZ-1G6E1,24,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) LSB,20\r
+MT8KTF51264HZ-1G6E1,25,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) MSB,08\r
+MT8KTF51264HZ-1G6E1,26,DDR3-MIN INTERNAL WRITE TO READ CMD DELAY (TWTRMIN),3C\r
+MT8KTF51264HZ-1G6E1,27,DDR3-MIN INTERNAL READ TO PRECHARGE CMD DELAY (TRTPMIN),3C\r
+MT8KTF51264HZ-1G6E1,28,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) MSB,00\r
+MT8KTF51264HZ-1G6E1,29,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) LSB,F0\r
+MT8KTF51264HZ-1G6E1,30,DDR3-SDRAM DEVICE OUTPUT DRIVERS SUPPORTED,83\r
+MT8KTF51264HZ-1G6E1,31,DDR3-SDRAM DEVICE THERMAL  REFRESH OPTIONS,05\r
+MT8KTF51264HZ-1G6E1,32,DDR3-MODULE THERMAL SENSOR,00\r
+MT8KTF51264HZ-1G6E1,33,DDR3-SDRAM DEVICE TYPE,00\r
+MT8KTF51264HZ-1G6E1,34,DDR3-FINE OFFSET FOR TCKMIN,00\r
+MT8KTF51264HZ-1G6E1,35,DDR3-FINE OFFSET FOR TAAMIN,00\r
+MT8KTF51264HZ-1G6E1,36,DDR3-FINE OFFSET FOR TRCDMIN,00\r
+MT8KTF51264HZ-1G6E1,37,DDR3-FINE OFFSET FOR TRPMIN,00\r
+MT8KTF51264HZ-1G6E1,38,DDR3-FINE OFFSET FOR TRCMIN,00\r
+MT8KTF51264HZ-1G6E1,39,DDR3-BYTE 39 RESERVED,00\r
+MT8KTF51264HZ-1G6E1,40,DDR3-BYTE 40 RESERVED,00\r
+MT8KTF51264HZ-1G6E1,41,DDR3-PTRR TMAW  MAC,84\r
+MT8KTF51264HZ-1G6E1,42-59,DDR3-RESERVED BYTES 42-59,000000000000000000000000000000000000\r
+MT8KTF51264HZ-1G6E1,60,DDR3-RC REV  NOM MODULE HEIGHT,0F\r
+MT8KTF51264HZ-1G6E1,61,DDR3-MODULE THICKNESS (MAX),11\r
+MT8KTF51264HZ-1G6E1,62,DDR3-REFERENCE RAW CARD ID,41\r
+MT8KTF51264HZ-1G6E1,63,DDR3 - ADDRESS MAPPING/MODULE ATTRIBUTES,00\r
+MT8KTF51264HZ-1G6E1,64,DDR3-HEATSPREADER SOLUTION,00\r
+MT8KTF51264HZ-1G6E1,65,DDR3-REGISTER VENDOR ID (LSB),00\r
+MT8KTF51264HZ-1G6E1,66,DDR3-REGISTER VENDOR ID (MSB),00\r
+MT8KTF51264HZ-1G6E1,67,DDR3-REGISTER REVISON NUMBER,00\r
+MT8KTF51264HZ-1G6E1,68,DDR3-REGISTER TYPE,00\r
+MT8KTF51264HZ-1G6E1,69,DDR3-REG CTRL WORDS 1 AND ZERO,00\r
+MT8KTF51264HZ-1G6E1,70,DDR3-REG CTRL WORDS 3 AND 2,00\r
+MT8KTF51264HZ-1G6E1,71,DDR3-REG CTRL WORDS 5 AND 4,00\r
+MT8KTF51264HZ-1G6E1,72,DDR3-REG CTRL WORDS 7 AND 6,00\r
+MT8KTF51264HZ-1G6E1,73,DDR3-REG CTRL WORDS 9 AND 8,00\r
+MT8KTF51264HZ-1G6E1,74,DDR3-REG CTRL WORDS 11 AND 10,00\r
+MT8KTF51264HZ-1G6E1,75,DDR3-REG CTRL WORDS 13 AND 12,00\r
+MT8KTF51264HZ-1G6E1,76,DDR3-REG CTRL WORDS 15 AND 14,00\r
+MT8KTF51264HZ-1G6E1,77-116,DDR3-RESERVED BYTES 77-116,00000000000000000000000000000000000000000000000000000000000000000000000000000000\r
+MT8KTF51264HZ-1G6E1,117,DDR3-MODULE MFR ID (LSB),80\r
+MT8KTF51264HZ-1G6E1,118,DDR3-MODULE MFR ID (MSB),2C\r
+MT8KTF51264HZ-1G6E1,119,DDR3-MODULE MFR LOCATION ID,00\r
+MT8KTF51264HZ-1G6E1,120,DDR3-MODULE MFR YEAR,00\r
+MT8KTF51264HZ-1G6E1,121,DDR3-MODULE MFR WEEK,00\r
+MT8KTF51264HZ-1G6E1,122-125,DDR3-MODULE SERIAL NUMBER,00000000\r
+MT8KTF51264HZ-1G6E1,126-127,DDR3-CRC,E8C9\r
+MT8KTF51264HZ-1G6E1,128-145,DDR3-MODULE PART NUMBER,8KTF51264HZ-1G6E1\r
+MT8KTF51264HZ-1G6E1,146,DDR3-MODULE DIE REV,45\r
+MT8KTF51264HZ-1G6E1,147,DDR3-MODULE PCB REV,31\r
+MT8KTF51264HZ-1G6E1,148,DDR3-DRAM DEVICE MFR ID (LSB),80\r
+MT8KTF51264HZ-1G6E1,149,DDR3-DRAM DEVICE MFR (MSB),2C\r
+MT8KTF51264HZ-1G6E1,150-175,DDR3-MFR RESERVED BYTES 150-175,0000000000000000000000000000000000000000000000000000\r
+MT8KTF51264HZ-1G6E1,176-255,DDR3-CUSTOMER RESERVED BYTES 176-255,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
\ No newline at end of file
diff --git a/test/spd_data/MT8KTF51264HZ-1G9P1.csv b/test/spd_data/MT8KTF51264HZ-1G9P1.csv
new file mode 100644 (file)
index 0000000..cd7a7ef
--- /dev/null
@@ -0,0 +1,76 @@
+Part Number,Byte Number,Byte Description,Byte Value\r
+MT8KTF51264HZ-1G9P1,0,DDR3-CRC RANGE; EEPROM BYTES; BYTES USED,92\r
+MT8KTF51264HZ-1G9P1,1,DDR3-SPD REVISON,13\r
+MT8KTF51264HZ-1G9P1,2,DDR3-DRAM DEVICE TYPE,0B\r
+MT8KTF51264HZ-1G9P1,3,DDR3-MODULE TYPE (FORM FACTOR),03\r
+MT8KTF51264HZ-1G9P1,4,DDR3-SDRAM DEVICE DENSITY  BANKS,04\r
+MT8KTF51264HZ-1G9P1,5,DDR3-SDRAM DEVICE ROW  COLUMN COUNT,21\r
+MT8KTF51264HZ-1G9P1,6,DDR3-MODULE NOMINAL VDD,02\r
+MT8KTF51264HZ-1G9P1,7,DDR3-MODULE RANKS   DEVICE DQ COUNT,01\r
+MT8KTF51264HZ-1G9P1,8,DDR3-ECC TAG  MODULE MEMORY BUS WIDTH,03\r
+MT8KTF51264HZ-1G9P1,9,DDR3-FINE TIMEBASE DIVIDEND/DIVISOR,11\r
+MT8KTF51264HZ-1G9P1,10,DDR3-MEDIUM TIMEBASE DIVIDEND,01\r
+MT8KTF51264HZ-1G9P1,11,DDR3-MEDIUM TIMEBASE DIVISOR,08\r
+MT8KTF51264HZ-1G9P1,12,DDR3-MIN SDRAM CYCLE TIME (TCKMIN),09\r
+MT8KTF51264HZ-1G9P1,13,DDR3-BYTE 13 RESERVED,00\r
+MT8KTF51264HZ-1G9P1,14,DDR3-CAS LATENCIES SUPPORTED (CL4 => CL11),FE\r
+MT8KTF51264HZ-1G9P1,15,DDR3-CAS LATENCIES SUPPORTED (CL12 => CL18),02\r
+MT8KTF51264HZ-1G9P1,16,DDR3-MIN CAS LATENCY TIME (TAAMIN),69\r
+MT8KTF51264HZ-1G9P1,17,DDR3-MIN WRITE RECOVERY TIME (TWRMIN),78\r
+MT8KTF51264HZ-1G9P1,18,DDR3-MIN RAS# TO CAS# DELAY (TRCDMIN),69\r
+MT8KTF51264HZ-1G9P1,19,DDR3-MIN ROW ACTIVE TO ROW ACTIVE DELAY (TRRDMIN),28\r
+MT8KTF51264HZ-1G9P1,20,DDR3-MIN ROW PRECHARGE DELAY (TRPMIN),69\r
+MT8KTF51264HZ-1G9P1,21,DDR3-UPPER NIBBLE FOR TRAS  TRC,11\r
+MT8KTF51264HZ-1G9P1,22,DDR3-MIN ACTIVE TO PRECHARGE DELAY (TRASMIN),10\r
+MT8KTF51264HZ-1G9P1,23,DDR3-MIN ACTIVE TO ACTIVE/REFRESH DELAY (TRCMIN),79\r
+MT8KTF51264HZ-1G9P1,24,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) LSB,20\r
+MT8KTF51264HZ-1G9P1,25,DDR3-MIN REFRESH RECOVERY DELAY (TRFCMIN) MSB,08\r
+MT8KTF51264HZ-1G9P1,26,DDR3-MIN INTERNAL WRITE TO READ CMD DELAY (TWTRMIN),3C\r
+MT8KTF51264HZ-1G9P1,27,DDR3-MIN INTERNAL READ TO PRECHARGE CMD DELAY (TRTPMIN),3C\r
+MT8KTF51264HZ-1G9P1,28,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) MSB,00\r
+MT8KTF51264HZ-1G9P1,29,DDR3-MIN FOUR ACTIVE WINDOW DELAY (TFAWMIN) LSB,D8\r
+MT8KTF51264HZ-1G9P1,30,DDR3-SDRAM DEVICE OUTPUT DRIVERS SUPPORTED,83\r
+MT8KTF51264HZ-1G9P1,31,DDR3-SDRAM DEVICE THERMAL  REFRESH OPTIONS,05\r
+MT8KTF51264HZ-1G9P1,32,DDR3-MODULE THERMAL SENSOR,00\r
+MT8KTF51264HZ-1G9P1,33,DDR3-SDRAM DEVICE TYPE,00\r
+MT8KTF51264HZ-1G9P1,34,DDR3-FINE OFFSET FOR TCKMIN,CA\r
+MT8KTF51264HZ-1G9P1,35,DDR3-FINE OFFSET FOR TAAMIN,00\r
+MT8KTF51264HZ-1G9P1,36,DDR3-FINE OFFSET FOR TRCDMIN,00\r
+MT8KTF51264HZ-1G9P1,37,DDR3-FINE OFFSET FOR TRPMIN,00\r
+MT8KTF51264HZ-1G9P1,38,DDR3-FINE OFFSET FOR TRCMIN,00\r
+MT8KTF51264HZ-1G9P1,39,DDR3-BYTE 39 RESERVED,00\r
+MT8KTF51264HZ-1G9P1,40,DDR3-BYTE 40 RESERVED,00\r
+MT8KTF51264HZ-1G9P1,41,DDR3-PTRR TMAW  MAC,88\r
+MT8KTF51264HZ-1G9P1,42-59,DDR3-RESERVED BYTES 42-59,000000000000000000000000000000000000\r
+MT8KTF51264HZ-1G9P1,60,DDR3-RC REV  NOM MODULE HEIGHT,0F\r
+MT8KTF51264HZ-1G9P1,61,DDR3-MODULE THICKNESS (MAX),11\r
+MT8KTF51264HZ-1G9P1,62,DDR3-REFERENCE RAW CARD ID,01\r
+MT8KTF51264HZ-1G9P1,63,DDR3 - ADDRESS MAPPING/MODULE ATTRIBUTES,00\r
+MT8KTF51264HZ-1G9P1,64,DDR3-HEATSPREADER SOLUTION,00\r
+MT8KTF51264HZ-1G9P1,65,DDR3-REGISTER VENDOR ID (LSB),00\r
+MT8KTF51264HZ-1G9P1,66,DDR3-REGISTER VENDOR ID (MSB),00\r
+MT8KTF51264HZ-1G9P1,67,DDR3-REGISTER REVISON NUMBER,00\r
+MT8KTF51264HZ-1G9P1,68,DDR3-REGISTER TYPE,00\r
+MT8KTF51264HZ-1G9P1,69,DDR3-REG CTRL WORDS 1 AND ZERO,00\r
+MT8KTF51264HZ-1G9P1,70,DDR3-REG CTRL WORDS 3 AND 2,00\r
+MT8KTF51264HZ-1G9P1,71,DDR3-REG CTRL WORDS 5 AND 4,00\r
+MT8KTF51264HZ-1G9P1,72,DDR3-REG CTRL WORDS 7 AND 6,00\r
+MT8KTF51264HZ-1G9P1,73,DDR3-REG CTRL WORDS 9 AND 8,00\r
+MT8KTF51264HZ-1G9P1,74,DDR3-REG CTRL WORDS 11 AND 10,00\r
+MT8KTF51264HZ-1G9P1,75,DDR3-REG CTRL WORDS 13 AND 12,00\r
+MT8KTF51264HZ-1G9P1,76,DDR3-REG CTRL WORDS 15 AND 14,00\r
+MT8KTF51264HZ-1G9P1,77-116,DDR3-RESERVED BYTES 77-116,00000000000000000000000000000000000000000000000000000000000000000000000000000000\r
+MT8KTF51264HZ-1G9P1,117,DDR3-MODULE MFR ID (LSB),80\r
+MT8KTF51264HZ-1G9P1,118,DDR3-MODULE MFR ID (MSB),2C\r
+MT8KTF51264HZ-1G9P1,119,DDR3-MODULE MFR LOCATION ID,00\r
+MT8KTF51264HZ-1G9P1,120,DDR3-MODULE MFR YEAR,00\r
+MT8KTF51264HZ-1G9P1,121,DDR3-MODULE MFR WEEK,00\r
+MT8KTF51264HZ-1G9P1,122-125,DDR3-MODULE SERIAL NUMBER,00000000\r
+MT8KTF51264HZ-1G9P1,126-127,DDR3-CRC,46D3\r
+MT8KTF51264HZ-1G9P1,128-145,DDR3-MODULE PART NUMBER,8KTF51264HZ-1G9P1\r
+MT8KTF51264HZ-1G9P1,146,DDR3-MODULE DIE REV,50\r
+MT8KTF51264HZ-1G9P1,147,DDR3-MODULE PCB REV,31\r
+MT8KTF51264HZ-1G9P1,148,DDR3-DRAM DEVICE MFR ID (LSB),80\r
+MT8KTF51264HZ-1G9P1,149,DDR3-DRAM DEVICE MFR (MSB),2C\r
+MT8KTF51264HZ-1G9P1,150-175,DDR3-MFR RESERVED BYTES 150-175,0000000000000000000000000000000000000000000000000000\r
+MT8KTF51264HZ-1G9P1,176-255,DDR3-CUSTOMER RESERVED BYTES 176-255,FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
\ No newline at end of file
diff --git a/test/summary/summary.css b/test/summary/summary.css
new file mode 100644 (file)
index 0000000..400225f
--- /dev/null
@@ -0,0 +1,100 @@
+body {
+  font-family: 'Roboto', sans-serif;
+}
+
+footer {
+  text-align: center;
+  font-size: 10px;
+  padding: 20px;
+}
+
+.dataTables_filter {
+  margin: 15px 50px 10px 50px;
+}
+
+.dataTables_filter input {
+  width: 400px;
+}
+
+.table-select {
+  width: 100%;
+  margin: 0 auto;
+}
+
+.table-select ul {
+  list-style-type: none;
+  margin: 0;
+  padding: 0;
+  overflow: hidden;
+}
+
+.table-select li {
+  float: left;
+}
+
+.table-select li a {
+  display: block;
+  padding: 10px 0px;
+  margin: 0px 20px;
+  text-align: center;
+  text-decoration: none;
+  font-size: 18px;
+  color:inherit;
+  border-bottom: 1px solid;
+  border-color: #ccc;
+  transition: 0.2s;
+}
+
+.table-select li a:hover {
+  border-color: #111;
+}
+
+/* did not work, .focus() couldn't turn it on */
+/* .table-select li a:focus { */
+/*   border-color: #222; */
+/* } */
+.table-select-active {
+  border-color: #111 !important;
+}
+
+.tables-wrapper {
+  width: 100%;
+  margin: auto;
+}
+
+.loading {
+  z-index: 999;
+  position: absolute;
+  top: 50%;
+  left: 50%;
+  margin-right: -50%;
+  transform: translate(-50%, -50%);
+}
+
+/* Loading animation */
+.lds-dual-ring {
+  display: inline-block;
+  width: 80px;
+  height: 80px;
+}
+.lds-dual-ring:after {
+  content: " ";
+  display: block;
+  width: 64px;
+  height: 64px;
+  margin: 8px;
+  border-radius: 50%;
+  border: 6px solid #fff;
+  border-color: #aaa transparent #aaa transparent;
+  animation: lds-dual-ring 1.2s linear infinite;
+}
+@keyframes lds-dual-ring {
+  0% {
+    transform: rotate(0deg);
+  }
+  100% {
+    transform: rotate(360deg);
+  }
+}
+
+/* vim: set ts=2 sw=2: */
diff --git a/test/summary/summary.html.jinja2 b/test/summary/summary.html.jinja2
new file mode 100644 (file)
index 0000000..4480e6c
--- /dev/null
@@ -0,0 +1,160 @@
+<!DOCTYPE html>
+<html>
+  <head>
+    <meta charset="UTF-8" />
+    <meta name="viewport" content="width=device-width, initial-scale=1">
+    <title>{{ title }}</title>
+
+    <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.4.1/jquery.min.js"></script>
+
+    <script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/1.10.19/js/jquery.dataTables.min.js"></script>
+    <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/1.10.19/css/jquery.dataTables.css">
+    <script type="text/javascript" charset="utf8" src="https://cdn.datatables.net/fixedheader/3.1.5/js/dataTables.fixedHeader.min.js"></script>
+    <link rel="stylesheet" type="text/css" href="https://cdn.datatables.net/fixedheader/3.1.5/css/fixedHeader.dataTables.min.css">
+
+    <style type="text/css" media="all">
+{% include 'summary.css' %}
+{# Calculate size of table selection elements so that they take up whole space #}
+.table-select li {
+  width: calc(100% / {{ tables.keys() | length }});
+}
+    </style>
+  </head>
+  <body>
+    {# Loading symbol that gets hidden after initialisation of all tables #}
+    <div class="loading lds-dual-ring"></div>
+
+    {# Bar for selecting the current data table #}
+    <div class="table-select">
+      <ul>
+        {% for id, name in names.items() %}
+          <li id='{{ id }}-button'><a href="#">{{ name }}</a></li>
+        {% endfor %}
+      </ul>
+      {# <hr/> #}
+    </div>
+
+    {# Display of the current data table #}
+    <div class="tables-wrapper">
+      {% for id, table in tables.items() %}
+        {# Hide tables not to show them before all DataTables are loaded #}
+        <div id="{{ id }}-div" style="display: none;">
+          {{ table }}
+        </div>
+      {% endfor %}
+    </div>
+  </body>
+
+  <footer id="footer">
+    <a href="https://github.com/enjoy-digital/litedram">LiteDRAM</a> is a part of <a href="https://github.com/enjoy-digital/litex">Litex</a>.
+    <br>
+    Generated using
+    <a href="https://github.com/enjoy-digital/litedram/blob/{{ revision }}/{{ script_path }}">{{ script_path }}</a>,
+    revision
+    <a href="https://github.com/enjoy-digital/litedram/commit/{{ revision }}">{{ revision_short }}</a>,
+    {{ generation_date }}.
+  </footer>
+
+  {# Script last, so that for large tables we get some content on the page before loading tables #}
+  <script>
+    {# Ids of the data tables #}
+    table_ids = [
+      {% for id in tables.keys() %}
+        '{{ id }}',
+    {% endfor %}
+    ];
+
+    {# Show table with given id and hide all the others #}
+    show_table = function(id) {
+      if (!table_ids.includes(id)) {
+        console.log('Error: show_table(' + id + ')');
+        return;
+      }
+      for (var table_div of $('.tables-wrapper').children()) {
+        if (table_div.id) {
+          var table_div = $('#' + table_div.id)
+          if (table_div.attr('id') == id + '-div') {
+            table_div.show();
+          } else {
+            table_div.hide();
+          }
+        }
+      }
+    }
+
+    // sort human-readable values assuming format "123 Kb", only first letter of unit is used
+    jQuery.fn.dataTable.ext.type.order['file-size-pre'] = function(data) {
+      var matches = data.match(/^(\d+(?:\.\d+)?)\s*(\S+)/i);
+      var multipliers = {
+        k: Math.pow(2, 10),
+        m: Math.pow(2, 20),
+        g: Math.pow(2, 30),
+        t: Math.pow(2, 40),
+      };
+
+      if (matches) {
+        var float = parseFloat(matches[1]);
+        var prefix = matches[2].toLowerCase()[0];
+        var multiplier = multipliers[prefix];
+        if (multiplier) {
+          float = float * multiplier;
+        }
+        return float;
+      } else {
+        return -1;
+      };
+    };
+
+    {# Initialization after DOM has been loaded #}
+    $(document).ready(function() {
+      // generate data tables
+      for (var id of table_ids) {
+        // add human readable class to all bandwidth columns
+        var columns = $('#' + id + ' > thead > tr > th').filter(function(index) {
+          var name = $(this).text().toLowerCase();
+          return name.includes('bandwidth') || name.includes('latency') || name.includes('efficiency');
+        });
+        columns.addClass('data-with-unit-human-readable');
+
+        // construct data table
+        table = $('#' + id);
+        table.DataTable({
+          paging: false,
+          fixedHeader: true,
+          columnDefs: [
+            { type: 'file-size', targets: [ 'data-with-unit-human-readable' ] },
+            { className: 'dt-body-right', targets: [ '_all' ] },
+            { className: 'dt-head-center', targets: [ '_all' ] },
+          ]
+        });
+        table.addClass("stripe");
+        table.addClass("hover");
+        table.addClass("order-column");
+        table.addClass("cell-border");
+        table.addClass("row-border");
+      }
+
+      // add click handlers that change the table being shown
+      for (var id of table_ids) {
+        var ahref = $('#' + id + '-button a');
+        // use nested closure so that we avoid the situation
+        // where all click handlers end up with the last id
+        ahref.click(function(table_id) {
+          return function() {
+            // get rid of this class after first click
+            $('.table-select a').removeClass('table-select-active');
+            $(this).addClass('table-select-active');
+            show_table(table_id);
+          }
+        }(id))
+      }
+
+      // show the first one
+      $('#' + table_ids[0] + '-button a:first').click();
+
+      // hide all elements of class loading
+      $('.loading').hide();
+    });
+  </script>
+</html>
+{# vim: set ts=2 sts=2 sw=2 et: #}
diff --git a/test/test_adaptation.py b/test/test_adaptation.py
new file mode 100644 (file)
index 0000000..e407d4c
--- /dev/null
@@ -0,0 +1,283 @@
+# This file is Copyright (c) 2017-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import unittest
+
+from migen import *
+
+from litex.soc.interconnect.stream import *
+
+from litedram.common import LiteDRAMNativeWritePort, LiteDRAMNativeReadPort
+from litedram.frontend.adaptation import LiteDRAMNativePortConverter, LiteDRAMNativePortCDC
+
+from test.common import *
+
+from litex.gen.sim import *
+
+
+class ConverterDUT(Module):
+    def __init__(self, user_data_width, native_data_width, mem_depth):
+        self.write_user_port     = LiteDRAMNativeWritePort(address_width=32, data_width=user_data_width)
+        self.write_crossbar_port = LiteDRAMNativeWritePort(address_width=32, data_width=native_data_width)
+        self.read_user_port      = LiteDRAMNativeReadPort( address_width=32, data_width=user_data_width)
+        self.read_crossbar_port  = LiteDRAMNativeReadPort( address_width=32, data_width=native_data_width)
+
+        # Memory
+        self.memory = DRAMMemory(native_data_width, mem_depth)
+
+    def do_finalize(self):
+        self.submodules.write_converter = LiteDRAMNativePortConverter(
+            self.write_user_port, self.write_crossbar_port)
+        self.submodules.read_converter = LiteDRAMNativePortConverter(
+            self.read_user_port, self.read_crossbar_port)
+
+    def read(self, address, read_data=True):
+        port = self.read_user_port
+        yield port.cmd.valid.eq(1)
+        yield port.cmd.we.eq(0)
+        yield port.cmd.addr.eq(address)
+        yield
+        while (yield port.cmd.ready) == 0:
+            yield
+        yield port.cmd.valid.eq(0)
+        yield
+        if read_data:
+            while (yield port.rdata.valid) == 0:
+                yield
+            data = (yield port.rdata.data)
+            yield port.rdata.ready.eq(1)
+            yield
+            yield port.rdata.ready.eq(0)
+            yield
+            return data
+
+    def write(self, address, data, we=None):
+        if we is None:
+            we = 2**self.write_user_port.wdata.we.nbits - 1
+        if self.write_user_port.data_width > self.write_crossbar_port.data_width:
+            yield from self._write_down(address, data, we)
+        else:
+            yield from self._write_up(address, data, we)
+
+    def _write_up(self, address, data, we):
+        port = self.write_user_port
+        yield port.cmd.valid.eq(1)
+        yield port.cmd.we.eq(1)
+        yield port.cmd.addr.eq(address)
+        yield
+        while (yield port.cmd.ready) == 0:
+            yield
+        yield port.cmd.valid.eq(0)
+        yield
+        yield port.wdata.valid.eq(1)
+        yield port.wdata.data.eq(data)
+        yield port.wdata.we.eq(we)
+        yield
+        while (yield port.wdata.ready) == 0:
+            yield
+        yield port.wdata.valid.eq(0)
+        yield
+
+    def _write_down(self, address, data, we):
+        # Down converter must have all the data available along with cmd, it will set
+        # user_port.cmd.ready only when it sends all input words.
+        port = self.write_user_port
+        yield port.cmd.valid.eq(1)
+        yield port.cmd.we.eq(1)
+        yield port.cmd.addr.eq(address)
+        yield port.wdata.valid.eq(1)
+        yield port.wdata.data.eq(data)
+        yield port.wdata.we.eq(we)
+        yield
+        # Ready goes up only after StrideConverter copied all words
+        while (yield port.cmd.ready) == 0:
+            yield
+        yield port.cmd.valid.eq(0)
+        yield
+        while (yield port.wdata.ready) == 0:
+            yield
+        yield port.wdata.valid.eq(0)
+        yield
+
+
+class CDCDUT(ConverterDUT):
+    def do_finalize(self):
+        # Change clock domains
+        self.write_user_port.clock_domain     = "user"
+        self.read_user_port.clock_domain      = "user"
+        self.write_crossbar_port.clock_domain = "native"
+        self.read_crossbar_port.clock_domain  = "native"
+
+        # Add CDC
+        self.submodules.write_converter = LiteDRAMNativePortCDC(
+            port_from = self.write_user_port,
+            port_to   = self.write_crossbar_port)
+        self.submodules.read_converter = LiteDRAMNativePortCDC(
+            port_from = self.read_user_port,
+            port_to   = self.read_crossbar_port)
+
+
+class TestAdaptation(MemoryTestDataMixin, unittest.TestCase):
+    def test_converter_down_ratio_must_be_integer(self):
+        with self.assertRaises(ValueError) as cm:
+            dut = ConverterDUT(user_data_width=64, native_data_width=24, mem_depth=128)
+            dut.finalize()
+        self.assertIn("ratio must be an int", str(cm.exception).lower())
+
+    def test_converter_up_ratio_must_be_integer(self):
+        with self.assertRaises(ValueError) as cm:
+            dut = ConverterDUT(user_data_width=32, native_data_width=48, mem_depth=128)
+            dut.finalize()
+        self.assertIn("ratio must be an int", str(cm.exception).lower())
+
+    def converter_readback_test(self, dut, pattern, mem_expected):
+        assert len(set(adr for adr, _ in pattern)) == len(pattern), "Pattern has duplicates!"
+        read_data = []
+
+        @passive
+        def read_handler(read_port):
+            yield read_port.rdata.ready.eq(1)
+            while True:
+                if (yield read_port.rdata.valid):
+                    read_data.append((yield read_port.rdata.data))
+                yield
+
+        def main_generator(dut, pattern):
+            for adr, data in pattern:
+                yield from dut.write(adr, data)
+
+            for adr, _ in pattern:
+                yield from dut.read(adr, read_data=False)
+
+            # Latency delay
+            for _ in range(32):
+                yield
+
+        generators = [
+            main_generator(dut, pattern),
+            read_handler(dut.read_user_port),
+            dut.memory.write_handler(dut.write_crossbar_port),
+            dut.memory.read_handler(dut.read_crossbar_port),
+            timeout_generator(5000),
+        ]
+        run_simulation(dut, generators)
+        self.assertEqual(dut.memory.mem, mem_expected)
+        self.assertEqual(read_data, [data for adr, data in pattern])
+
+    def test_converter_1to1(self):
+        # Verify 64-bit to 64-bit identify-conversion.
+        data = self.pattern_test_data["64bit"]
+        dut  = ConverterDUT(user_data_width=64, native_data_width=64, mem_depth=len(data["expected"]))
+        self.converter_readback_test(dut, data["pattern"], data["expected"])
+
+    def test_converter_2to1(self):
+        # Verify 64-bit to 32-bit down-conversion.
+        data = self.pattern_test_data["64bit_to_32bit"]
+        dut  = ConverterDUT(user_data_width=64, native_data_width=32, mem_depth=len(data["expected"]))
+        self.converter_readback_test(dut, data["pattern"], data["expected"])
+
+    def test_converter_4to1(self):
+        # Verify 32-bit to 8-bit down-conversion.
+        data = self.pattern_test_data["32bit_to_8bit"]
+        dut  = ConverterDUT(user_data_width=32, native_data_width=8, mem_depth=len(data["expected"]))
+        self.converter_readback_test(dut, data["pattern"], data["expected"])
+
+    def test_converter_8to1(self):
+        # Verify 64-bit to 8-bit down-conversion.
+        data = self.pattern_test_data["64bit_to_8bit"]
+        dut  = ConverterDUT(user_data_width=64, native_data_width=8, mem_depth=len(data["expected"]))
+        self.converter_readback_test(dut, data["pattern"], data["expected"])
+
+    def test_converter_1to2(self):
+        # Verify 8-bit to 16-bit up-conversion.
+        data = self.pattern_test_data["8bit_to_16bit"]
+        dut  = ConverterDUT(user_data_width=8, native_data_width=16, mem_depth=len(data["expected"]))
+        self.converter_readback_test(dut, data["pattern"], data["expected"])
+
+    def test_converter_1to4(self):
+        # Verify 32-bit to 128-bit up-conversion.
+        data = self.pattern_test_data["32bit_to_128bit"]
+        dut  = ConverterDUT(user_data_width=32, native_data_width=128, mem_depth=len(data["expected"]))
+        self.converter_readback_test(dut, data["pattern"], data["expected"])
+
+    def test_converter_1to8(self):
+        # Verify 32-bit to 256-bit up-conversion.
+        data = self.pattern_test_data["32bit_to_256bit"]
+        dut  = ConverterDUT(user_data_width=32, native_data_width=256, mem_depth=len(data["expected"]))
+        self.converter_readback_test(dut, data["pattern"], data["expected"])
+
+    # TODO: implement case when user does not write all words (LiteDRAMNativeWritePortUpConverter)
+    @unittest.skip("Only full-burst writes currently supported")
+    def test_converter_up_not_aligned(self):
+        data = self.pattern_test_data["8bit_to_32bit_not_aligned"]
+        dut  = ConverterDUT(user_data_width=8, native_data_width=32, mem_depth=len(data["expected"]))
+        self.converter_readback_test(dut, data["pattern"], data["expected"])
+
+    def cdc_readback_test(self, dut, pattern, mem_expected, clocks):
+        assert len(set(adr for adr, _ in pattern)) == len(pattern), "Pattern has duplicates!"
+        read_data = []
+
+        @passive
+        def read_handler(read_port):
+            yield read_port.rdata.ready.eq(1)
+            while True:
+                if (yield read_port.rdata.valid):
+                    read_data.append((yield read_port.rdata.data))
+                yield
+
+        def main_generator(dut, pattern):
+            for adr, data in pattern:
+                yield from dut.write(adr, data)
+
+            for adr, _ in pattern:
+                yield from dut.read(adr, read_data=False)
+
+            # Latency delay
+            for _ in range(32):
+                yield
+
+        generators = {
+            "user": [
+                main_generator(dut, pattern),
+                read_handler(dut.read_user_port),
+                timeout_generator(5000),
+            ],
+            "native": [
+                dut.memory.write_handler(dut.write_crossbar_port),
+                dut.memory.read_handler(dut.read_crossbar_port),
+            ],
+        }
+        run_simulation(dut, generators, clocks)
+        self.assertEqual(dut.memory.mem, mem_expected)
+        self.assertEqual(read_data, [data for adr, data in pattern])
+
+    def test_port_cdc_same_clocks(self):
+        # Verify CDC with same clocks (frequency and phase).
+        data = self.pattern_test_data["32bit"]
+        dut  = CDCDUT(user_data_width=32, native_data_width=32, mem_depth=len(data["expected"]))
+        clocks = {
+            "user": 10,
+            "native": (7, 3),
+        }
+        self.cdc_readback_test(dut, data["pattern"], data["expected"], clocks=clocks)
+
+    def test_port_cdc_different_period(self):
+        # Verify CDC with different clock frequencies.
+        data = self.pattern_test_data["32bit"]
+        dut  = CDCDUT(user_data_width=32, native_data_width=32, mem_depth=len(data["expected"]))
+        clocks = {
+            "user": 10,
+            "native": 7,
+        }
+        self.cdc_readback_test(dut, data["pattern"], data["expected"], clocks=clocks)
+
+    def test_port_cdc_out_of_phase(self):
+        # Verify CDC with different clock phases.
+        data = self.pattern_test_data["32bit"]
+        dut  = CDCDUT(user_data_width=32, native_data_width=32, mem_depth=len(data["expected"]))
+        clocks = {
+            "user": 10,
+            "native": (7, 3),
+        }
+        self.cdc_readback_test(dut, data["pattern"], data["expected"], clocks=clocks)
diff --git a/test/test_axi.py b/test/test_axi.py
new file mode 100644 (file)
index 0000000..df9eb1d
--- /dev/null
@@ -0,0 +1,275 @@
+# This file is Copyright (c) 2018-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+import unittest
+import random
+
+from migen import *
+
+from litedram.common import *
+from litedram.frontend.axi import *
+
+from test.common import *
+
+from litex.gen.sim import *
+
+
+class Burst:
+    def __init__(self, addr, type=BURST_FIXED, len=0, size=0):
+        self.addr = addr
+        self.type = type
+        self.len  = len
+        self.size = size
+
+    def to_beats(self):
+        r = []
+        for i in range(self.len + 1):
+            if self.type == BURST_INCR:
+                offset = i*2**(self.size)
+                r += [Beat(self.addr + offset)]
+            elif self.type == BURST_WRAP:
+                offset = (i*2**(self.size))%((2**self.size)*(self.len))
+                r += [Beat(self.addr + offset)]
+            else:
+                r += [Beat(self.addr)]
+        return r
+
+
+class Beat:
+    def __init__(self, addr):
+        self.addr = addr
+
+
+class Access(Burst):
+    def __init__(self, addr, data, id, **kwargs):
+        Burst.__init__(self, addr, **kwargs)
+        self.data = data
+        self.id   = id
+
+
+class Write(Access):
+    pass
+
+
+class Read(Access):
+    pass
+
+
+class TestAXI(unittest.TestCase):
+    def _test_axi2native(self,
+        naccesses=16, simultaneous_writes_reads=False,
+        # Random: 0: min (no random), 100: max.
+        # Burst randomness
+        id_rand_enable   = False,
+        len_rand_enable  = False,
+        data_rand_enable = False,
+        # Flow valid randomness
+        aw_valid_random = 0,
+        w_valid_random  = 0,
+        ar_valid_random = 0,
+        r_valid_random  = 0,
+        # Flow ready randomness
+        w_ready_random  = 0,
+        b_ready_random  = 0,
+        r_ready_random  = 0
+        ):
+
+        def writes_cmd_generator(axi_port, writes):
+            prng = random.Random(42)
+            for write in writes:
+                while prng.randrange(100) < aw_valid_random:
+                    yield
+                # Send command
+                yield axi_port.aw.valid.eq(1)
+                yield axi_port.aw.addr.eq(write.addr<<2)
+                yield axi_port.aw.burst.eq(write.type)
+                yield axi_port.aw.len.eq(write.len)
+                yield axi_port.aw.size.eq(write.size)
+                yield axi_port.aw.id.eq(write.id)
+                yield
+                while (yield axi_port.aw.ready) == 0:
+                    yield
+                yield axi_port.aw.valid.eq(0)
+
+        def writes_data_generator(axi_port, writes):
+            prng = random.Random(42)
+            for write in writes:
+                for i, data in enumerate(write.data):
+                    while prng.randrange(100) < w_valid_random:
+                        yield
+                    # Send data
+                    yield axi_port.w.valid.eq(1)
+                    if (i == (len(write.data) - 1)):
+                        yield axi_port.w.last.eq(1)
+                    else:
+                        yield axi_port.w.last.eq(0)
+                    yield axi_port.w.data.eq(data)
+                    yield axi_port.w.strb.eq(2**axi_port.w.strb.nbits - 1)
+                    yield
+                    while (yield axi_port.w.ready) == 0:
+                        yield
+                    yield axi_port.w.valid.eq(0)
+            axi_port.reads_enable = True
+
+        def writes_response_generator(axi_port, writes):
+            prng = random.Random(42)
+            self.writes_id_errors = 0
+            for write in writes:
+                # Wait response
+                yield axi_port.b.ready.eq(0)
+                yield
+                while (yield axi_port.b.valid) == 0:
+                    yield
+                while prng.randrange(100) < b_ready_random:
+                    yield
+                yield axi_port.b.ready.eq(1)
+                yield
+                if (yield axi_port.b.id) != write.id:
+                    self.writes_id_errors += 1
+
+        def reads_cmd_generator(axi_port, reads):
+            prng = random.Random(42)
+            while not axi_port.reads_enable:
+                yield
+            for read in reads:
+                while prng.randrange(100) < ar_valid_random:
+                    yield
+                # Send command
+                yield axi_port.ar.valid.eq(1)
+                yield axi_port.ar.addr.eq(read.addr<<2)
+                yield axi_port.ar.burst.eq(read.type)
+                yield axi_port.ar.len.eq(read.len)
+                yield axi_port.ar.size.eq(read.size)
+                yield axi_port.ar.id.eq(read.id)
+                yield
+                while (yield axi_port.ar.ready) == 0:
+                    yield
+                yield axi_port.ar.valid.eq(0)
+
+        def reads_response_data_generator(axi_port, reads):
+            prng = random.Random(42)
+            self.reads_data_errors = 0
+            self.reads_id_errors   = 0
+            self.reads_last_errors = 0
+            while not axi_port.reads_enable:
+                yield
+            for read in reads:
+                for i, data in enumerate(read.data):
+                    # Wait data / response
+                    yield axi_port.r.ready.eq(0)
+                    yield
+                    while (yield axi_port.r.valid) == 0:
+                        yield
+                    while prng.randrange(100) < r_ready_random:
+                        yield
+                    yield axi_port.r.ready.eq(1)
+                    yield
+                    if (yield axi_port.r.data) != data:
+                        self.reads_data_errors += 1
+                    if (yield axi_port.r.id) != read.id:
+                        self.reads_id_errors += 1
+                    if i == (len(read.data) - 1):
+                        if (yield axi_port.r.last) != 1:
+                            self.reads_last_errors += 1
+                    else:
+                        if (yield axi_port.r.last) != 0:
+                            self.reads_last_errors += 1
+
+        # DUT
+        axi_port  = LiteDRAMAXIPort(32, 32, 8)
+        dram_port = LiteDRAMNativePort("both", 32, 32)
+        dut       = LiteDRAMAXI2Native(axi_port, dram_port)
+        mem       = DRAMMemory(32, 1024)
+
+        # Generate writes/reads
+        prng   = random.Random(42)
+        writes = []
+        offset = 1
+        for i in range(naccesses):
+            _id   = prng.randrange(2**8) if id_rand_enable else i
+            _len  = prng.randrange(32) if len_rand_enable else i
+            _data = [prng.randrange(2**32) if data_rand_enable else j for j in range(_len + 1)]
+            writes.append(Write(offset, _data, _id, type=BURST_INCR, len=_len, size=log2_int(32//8)))
+            offset += _len + 1
+        # Dummy reads to ensure datas have been written before the effective reads start.
+        dummy_reads = [Read(1023, [0], 0, type=BURST_FIXED, len=0, size=log2_int(32//8)) for _ in range(32)]
+        reads = dummy_reads + writes
+
+        # Simulation
+        if simultaneous_writes_reads:
+            axi_port.reads_enable = True
+        else:
+            axi_port.reads_enable = False # Will be set by writes_data_generator
+        generators = [
+            writes_cmd_generator(axi_port, writes),
+            writes_data_generator(axi_port, writes),
+            writes_response_generator(axi_port, writes),
+            reads_cmd_generator(axi_port, reads),
+            reads_response_data_generator(axi_port, reads),
+            mem.read_handler(dram_port, rdata_valid_random=r_valid_random),
+            mem.write_handler(dram_port, wdata_ready_random=w_ready_random)
+        ]
+        run_simulation(dut, generators)
+        #mem.show_content()
+        self.assertEqual(self.writes_id_errors, 0)
+        self.assertEqual(self.reads_data_errors, 0)
+        self.assertEqual(self.reads_id_errors, 0)
+        self.assertEqual(self.reads_last_errors, 0)
+
+    # Test with no randomness
+    def test_axi2native_writes_then_reads_no_random(self):
+        self._test_axi2native(simultaneous_writes_reads=False)
+
+    def test_axi2native_writes_and_reads_no_random(self):
+        self._test_axi2native(simultaneous_writes_reads=True)
+
+    # Test randomness one parameter at a time
+    def test_axi2native_writes_then_reads_random_bursts(self):
+        self._test_axi2native(
+            simultaneous_writes_reads = False,
+            id_rand_enable   = True,
+            len_rand_enable  = True,
+            data_rand_enable = True)
+
+    def test_axi2native_writes_and_reads_random_bursts(self):
+        self._test_axi2native(
+            simultaneous_writes_reads = True,
+            id_rand_enable   = True,
+            len_rand_enable  = True,
+            data_rand_enable = True)
+
+    def test_axi2native_random_w_ready(self):
+        self._test_axi2native(w_ready_random=90)
+
+    def test_axi2native_random_b_ready(self):
+        self._test_axi2native(b_ready_random=90)
+
+    def test_axi2native_random_r_ready(self):
+        self._test_axi2native(r_ready_random=90)
+
+    def test_axi2native_random_aw_valid(self):
+        self._test_axi2native(aw_valid_random=90)
+
+    def test_axi2native_random_w_valid(self):
+        self._test_axi2native(w_valid_random=90)
+
+    def test_axi2native_random_ar_valid(self):
+        self._test_axi2native(ar_valid_random=90)
+
+    def test_axi2native_random_r_valid(self):
+        self._test_axi2native(r_valid_random=90)
+
+    # Now let's stress things a bit... :)
+    def test_axi2native_random_all(self):
+        self._test_axi2native(
+            simultaneous_writes_reads=True,
+            id_rand_enable  = True,
+            len_rand_enable = True,
+            aw_valid_random = 50,
+            w_ready_random  = 50,
+            b_ready_random  = 50,
+            w_valid_random  = 50,
+            ar_valid_random = 90,
+            r_valid_random  = 90,
+            r_ready_random  = 90
+        )
diff --git a/test/test_bandwidth.py b/test/test_bandwidth.py
new file mode 100644 (file)
index 0000000..adca81f
--- /dev/null
@@ -0,0 +1,242 @@
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import random
+import unittest
+import itertools
+import collections
+
+from migen import *
+
+from litex.soc.interconnect import stream
+
+from litedram.common import *
+from litedram.core.bandwidth import Bandwidth
+
+from test.common import timeout_generator, CmdRequestRWDriver
+
+
+class BandwidthDUT(Module):
+    def __init__(self, data_width=8, **kwargs):
+        a, ba = 13, 3
+        self.cmd = stream.Endpoint(cmd_request_rw_layout(a, ba))
+        self.submodules.bandwidth = Bandwidth(self.cmd, data_width, **kwargs)
+
+
+class CommandDriver:
+    def __init__(self, cmd, cmd_options=None):
+        self.cmd = cmd
+        self.driver = CmdRequestRWDriver(cmd)
+        self.cmd_counts = collections.defaultdict(int)
+
+    @passive
+    def random_generator(self, random_ready_max=20, commands=None):
+        commands = commands or ["read", "write"]
+        prng = random.Random(42)
+
+        while True:
+            # Generate random command
+            command = prng.choice(commands)
+            yield from getattr(self.driver, command)()
+            yield
+            # Wait some times before it becomes ready
+            for _ in range(prng.randint(0, random_ready_max)):
+                yield
+            yield self.cmd.ready.eq(1)
+            yield
+            self.cmd_counts[command] += 1
+            yield self.cmd.ready.eq(0)
+            # Disable command
+            yield from self.driver.nop()
+            yield
+
+    @passive
+    def timeline_generator(self, timeline):
+        # Timeline: an iterator of tuples (cycle, command)
+        sim_cycle = 0
+        for cycle, command in timeline:
+            assert cycle >= sim_cycle
+            while sim_cycle != cycle:
+                sim_cycle += 1
+                yield
+            # Set the command
+            yield from getattr(self.driver, command)()
+            yield self.cmd.ready.eq(1)
+            self.cmd_counts[command] += 1
+            # Advance 1 cycle
+            yield
+            sim_cycle += 1
+            # Clear state
+            yield self.cmd.ready.eq(0)
+            yield from self.driver.nop()
+
+
+class TestBandwidth(unittest.TestCase):
+    def test_can_read_status_data_width(self):
+        # Verify that data width can be read from a CSR.
+        def test(data_width):
+            def main_generator(dut):
+                yield
+                self.assertEqual((yield dut.bandwidth.data_width.status), data_width)
+
+            dut = BandwidthDUT(data_width=data_width)
+            run_simulation(dut, main_generator(dut))
+
+        for data_width in [8, 16, 32, 64]:
+            with self.subTest(data_width=data_width):
+                test(data_width)
+
+    def test_requires_update_to_copy_the_data(self):
+        # Verify that command counts are copied to CSRs only after `update`.
+        def main_generator(dut):
+            nreads  = (yield from dut.bandwidth.nreads.read())
+            nwrites = (yield from dut.bandwidth.nwrites.read())
+            self.assertEqual(nreads, 0)
+            self.assertEqual(nwrites, 0)
+
+            # Wait enough for the period to end
+            for _ in range(2**6):
+                yield
+
+            nreads  = (yield from dut.bandwidth.nreads.read())
+            nwrites = (yield from dut.bandwidth.nwrites.read())
+            self.assertEqual(nreads, 0)
+            self.assertEqual(nwrites, 0)
+
+            # Update register values
+            yield from dut.bandwidth.update.write(1)
+
+            nreads  = (yield from dut.bandwidth.nreads.read())
+            nwrites = (yield from dut.bandwidth.nwrites.read())
+            self.assertNotEqual((nreads, nwrites), (0, 0))
+
+        dut = BandwidthDUT(period_bits=6)
+        cmd_driver = CommandDriver(dut.cmd)
+        generators = [
+            main_generator(dut),
+            cmd_driver.random_generator(),
+        ]
+        run_simulation(dut, generators)
+
+    def test_correct_read_write_counts(self):
+        # Verify that the number of registered READ/WRITE commands is correct.
+        results = {}
+
+        def main_generator(dut):
+            # Wait for the first period to end
+            for _ in range(2**8):
+                yield
+            yield from dut.bandwidth.update.write(1)
+            yield
+            results["nreads"]  = (yield from dut.bandwidth.nreads.read())
+            results["nwrites"] = (yield from dut.bandwidth.nwrites.read())
+
+        dut = BandwidthDUT(period_bits=8)
+        cmd_driver = CommandDriver(dut.cmd)
+        generators = [
+            main_generator(dut),
+            cmd_driver.random_generator(),
+        ]
+        run_simulation(dut, generators)
+
+        self.assertEqual(results["nreads"], cmd_driver.cmd_counts["read"])
+
+    def test_counts_read_write_only(self):
+        # Verify that only READ and WRITE commands are registered.
+        results = {}
+
+        def main_generator(dut):
+            # Wait for the first period to end
+            for _ in range(2**8):
+                yield
+            yield from dut.bandwidth.update.write(1)
+            yield
+            results["nreads"] = (yield from dut.bandwidth.nreads.read())
+            results["nwrites"] = (yield from dut.bandwidth.nwrites.read())
+
+        dut = BandwidthDUT(period_bits=8)
+        cmd_driver = CommandDriver(dut.cmd)
+        commands   = ["read", "write", "activate", "precharge", "refresh"]
+        generators = [
+            main_generator(dut),
+            cmd_driver.random_generator(commands=commands),
+        ]
+        run_simulation(dut, generators)
+
+        self.assertEqual(results["nreads"], cmd_driver.cmd_counts["read"])
+
+    def test_correct_period_length(self):
+        # Verify that period length is correct by measuring time between CSR changes.
+        period_bits = 5
+        period = 2**period_bits
+
+        n_per_period = {0: 3, 1: 6, 2: 9}
+        timeline = {}
+        for p, n in n_per_period.items():
+            for i in range(n):
+                margin = 10
+                timeline[period*p + margin + i] = "write"
+
+        def main_generator(dut):
+            # Keep the values always up to date
+            yield dut.bandwidth.update.re.eq(1)
+
+            # Wait until we have the data from 1st period
+            while (yield dut.bandwidth.nwrites.status) != 3:
+                yield
+
+            # Count time to next period
+            cycles = 0
+            while (yield dut.bandwidth.nwrites.status) != 6:
+                cycles += 1
+                yield
+
+            self.assertEqual(cycles, period)
+
+        dut = BandwidthDUT(period_bits=period_bits)
+        cmd_driver = CommandDriver(dut.cmd)
+        generators = [
+            main_generator(dut),
+            cmd_driver.timeline_generator(timeline.items()),
+            timeout_generator(period * 3),
+        ]
+        run_simulation(dut, generators)
+
+    def test_not_missing_commands_on_period_boundary(self):
+        # Verify that no data is lost in the cycle when new period starts.
+        period_bits = 5
+        period = 2**period_bits
+
+        # Start 10 cycles before period ends, end 10 cycles after it ends
+        base = period - 10
+        nwrites = 20
+        timeline = {base + i: "write" for i in range(nwrites)}
+
+        def main_generator(dut):
+            # Wait until 1st period ends (+ some margin)
+            for _ in range(period + 10):
+                yield
+
+            # Read the count from 1st period
+            yield from dut.bandwidth.update.write(1)
+            yield
+            nwrites_registered = (yield from dut.bandwidth.nwrites.read())
+
+            # Wait until 2nd period ends
+            for _ in range(period):
+                yield
+
+            # Read the count from 1st period
+            yield from dut.bandwidth.update.write(1)
+            yield
+            nwrites_registered += (yield from dut.bandwidth.nwrites.read())
+
+            self.assertEqual(nwrites_registered, nwrites)
+
+        dut = BandwidthDUT(period_bits=period_bits)
+        cmd_driver = CommandDriver(dut.cmd)
+        generators = [
+            main_generator(dut),
+            cmd_driver.timeline_generator(timeline.items()),
+        ]
+        run_simulation(dut, generators)
diff --git a/test/test_bankmachine.py b/test/test_bankmachine.py
new file mode 100644 (file)
index 0000000..f676d0d
--- /dev/null
@@ -0,0 +1,427 @@
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import math
+import unittest
+
+from migen import *
+
+from litedram.common import *
+from litedram.core.bankmachine import BankMachine
+
+from test.common import timeout_generator
+
+
+class BankMachineDUT(Module):
+    # Fill only settings needed by BankMachine
+    default_controller_settings = dict(
+        cmd_buffer_depth    = 8,
+        cmd_buffer_buffered = False,
+        with_auto_precharge = True,
+    )
+    default_phy_settings = dict(
+        cwl          = 2,
+        nphases      = 2,
+        nranks       = 1,
+        # indirectly
+        memtype      = "DDR2",
+        dfi_databits = 2*16,
+    )
+    default_geom_settings = dict(
+        bankbits = 3,
+        rowbits  = 13,
+        colbits  = 10,
+    )
+    default_timing_settings = dict(
+        tRAS = None,
+        tRC  = None,
+        tCCD = 1,
+        tRCD = 2,
+        tRP  = 2,
+        tWR  = 2,
+    )
+
+    def __init__(self, n,
+        controller_settings = None,
+        phy_settings        = None,
+        geom_settings       = None,
+        timing_settings     = None):
+        # Update settings if provided
+        def updated(settings, update):
+            copy = settings.copy()
+            copy.update(update or {})
+            return copy
+
+        controller_settings = updated(self.default_controller_settings, controller_settings)
+        phy_settings        = updated(self.default_phy_settings, phy_settings)
+        geom_settings       = updated(self.default_geom_settings, geom_settings)
+        timing_settings     = updated(self.default_timing_settings, timing_settings)
+
+        class SimpleSettings(Settings):
+            def __init__(self, **kwargs):
+                self.set_attributes(kwargs)
+
+        settings        = SimpleSettings(**controller_settings)
+        settings.phy    = SimpleSettings(**phy_settings)
+        settings.geom   = SimpleSettings(**geom_settings)
+        settings.timing = SimpleSettings(**timing_settings)
+        settings.geom.addressbits = max(settings.geom.rowbits, settings.geom.colbits)
+        self.settings = settings
+
+        self.address_align = log2_int(burst_lengths[settings.phy.memtype])
+        self.address_width = LiteDRAMInterface(self.address_align, settings).address_width
+
+        bankmachine = BankMachine(n=n,
+            address_width = self.address_width,
+            address_align = self.address_align,
+            nranks        = settings.phy.nranks,
+            settings      = settings)
+        self.submodules.bankmachine = bankmachine
+
+    def get_cmd(self):
+        # cmd_request_rw_layout -> name
+        layout = [name for name, _ in cmd_request_rw_layout(
+            a  = self.settings.geom.addressbits,
+            ba = self.settings.geom.bankbits)]
+        request = {}
+        for name in layout + ["valid", "ready", "first", "last"]:
+            request[name] = (yield getattr(self.bankmachine.cmd, name))
+        request["type"] = {
+            (0, 0, 0): "nop",
+            (1, 0, 1): "write",
+            (1, 0, 0): "read",
+            (0, 1, 0): "activate",
+            (0, 1, 1): "precharge",
+            (1, 1, 0): "refresh",
+        }[(request["cas"], request["ras"], request["we"])]
+        return request
+
+    def req_address(self, row, col):
+        col = col & (2**self.settings.geom.colbits - 1)
+        row = row & (2**self.settings.geom.rowbits - 1)
+        split = self.settings.geom.colbits - self.address_align
+        return (row << split) | col
+
+
+class TestBankMachine(unittest.TestCase):
+    def test_init(self):
+        BankMachineDUT(1)
+
+    def bankmachine_commands_test(self, dut, requests, generators=None):
+        # Perform a test by simulating requests producer and return registered commands
+        commands = []
+
+        def producer(dut):
+            for req in requests:
+                yield dut.bankmachine.req.addr.eq(req["addr"])
+                yield dut.bankmachine.req.we.eq(req["we"])
+                yield dut.bankmachine.req.valid.eq(1)
+                yield
+                while not (yield dut.bankmachine.req.ready):
+                    yield
+                yield dut.bankmachine.req.valid.eq(0)
+                for _ in range(req.get("delay", 0)):
+                    yield
+
+        def req_consumer(dut):
+            for req in requests:
+                if req["we"]:
+                    signal = dut.bankmachine.req.wdata_ready
+                else:
+                    signal = dut.bankmachine.req.rdata_valid
+                while not (yield signal):
+                    yield
+                yield
+
+        @passive
+        def cmd_consumer(dut):
+            while True:
+                while not (yield dut.bankmachine.cmd.valid):
+                    yield
+                yield dut.bankmachine.cmd.ready.eq(1)
+                yield
+                commands.append((yield from dut.get_cmd()))
+                yield dut.bankmachine.cmd.ready.eq(0)
+                yield
+
+        all_generators = [
+            producer(dut),
+            req_consumer(dut),
+            cmd_consumer(dut),
+            timeout_generator(50 * len(requests)),
+        ]
+        if generators is not None:
+            all_generators += [g(dut) for g in generators]
+        run_simulation(dut, all_generators)
+        return commands
+
+    def test_opens_correct_row(self):
+        # Verify that the correct row is activated before read/write commands.
+        dut = BankMachineDUT(3)
+        requests = [
+            dict(addr=dut.req_address(row=0xf0, col=0x0d), we=0),
+            dict(addr=dut.req_address(row=0xd0, col=0x0d), we=1),
+        ]
+        commands = self.bankmachine_commands_test(dut=dut, requests=requests)
+        # Commands: activate, read (auto-precharge), activate, write
+        self.assertEqual(commands[0]["type"], "activate")
+        self.assertEqual(commands[0]["a"], 0xf0)
+        self.assertEqual(commands[2]["type"], "activate")
+        self.assertEqual(commands[2]["a"], 0xd0)
+
+    def test_correct_bank_address(self):
+        # Verify that `ba` always corresponds to the BankMachine number.
+        for bn in [0, 2, 7]:
+            with self.subTest(bn=bn):
+                dut = BankMachineDUT(bn, geom_settings=dict(bankbits=3))
+                requests = [dict(addr=0, we=0)]
+                commands = self.bankmachine_commands_test(dut=dut, requests=requests)
+                for cmd in commands:
+                    self.assertEqual(cmd["ba"], bn)
+
+    def test_read_write_same_row(self):
+        # Verify that there is only one activate when working on single row.
+        dut = BankMachineDUT(1)
+        requests = [
+            dict(addr=dut.req_address(row=0xba, col=0xad), we=0),
+            dict(addr=dut.req_address(row=0xba, col=0xad), we=1),
+            dict(addr=dut.req_address(row=0xba, col=0xbe), we=0),
+            dict(addr=dut.req_address(row=0xba, col=0xbe), we=1),
+        ]
+        commands = self.bankmachine_commands_test(dut=dut, requests=requests)
+        commands = [(cmd["type"], cmd["a"]) for cmd in commands]
+        expected = [
+            ("activate", 0xba),
+            ("read",     0xad << dut.address_align),
+            ("write",    0xad << dut.address_align),
+            ("read",     0xbe << dut.address_align),
+            ("write",    0xbe << dut.address_align),
+        ]
+        self.assertEqual(commands, expected)
+
+    def test_write_different_rows_with_delay(self):
+        # Verify that precharge is used when changing row with a delay this is independent form auto-precharge.
+        for auto_precharge in [False, True]:
+            with self.subTest(auto_precharge=auto_precharge):
+                settings = dict(with_auto_precharge=auto_precharge)
+                dut      = BankMachineDUT(1, controller_settings=settings)
+                requests = [
+                    dict(addr=dut.req_address(row=0xba, col=0xad), we=1, delay=8),
+                    dict(addr=dut.req_address(row=0xda, col=0xad), we=1),
+                ]
+                commands = self.bankmachine_commands_test(dut=dut, requests=requests)
+                commands = [(cmd["type"], cmd["a"]) for cmd in commands]
+                expected = [
+                    ("activate",  0xba),
+                    ("write",     0xad << dut.address_align),
+                    ("precharge", 0xad << dut.address_align),
+                    ("activate",  0xda),
+                    ("write",     0xad << dut.address_align),
+                ]
+                self.assertEqual(commands, expected)
+
+    def test_write_different_rows_with_auto_precharge(self):
+        # Verify that auto-precharge is used when changing row without delay.
+        settings = dict(with_auto_precharge=True)
+        dut      = BankMachineDUT(1, controller_settings=settings)
+        requests = [
+            dict(addr=dut.req_address(row=0xba, col=0xad), we=1),
+            dict(addr=dut.req_address(row=0xda, col=0xad), we=1),
+        ]
+        commands = self.bankmachine_commands_test(dut=dut, requests=requests)
+        commands = [(cmd["type"], cmd["a"]) for cmd in commands]
+        expected = [
+            ("activate",  0xba),
+            ("write",    (0xad << dut.address_align) | (1 << 10)),
+            ("activate",  0xda),
+            ("write",     0xad << dut.address_align),
+        ]
+        self.assertEqual(commands, expected)
+
+    def test_write_different_rows_without_auto_precharge(self):
+        # Verify that auto-precharge is used when changing row without delay.
+        settings = dict(with_auto_precharge=False)
+        dut = BankMachineDUT(1, controller_settings=settings)
+        requests = [
+            dict(addr=dut.req_address(row=0xba, col=0xad), we=1),
+            dict(addr=dut.req_address(row=0xda, col=0xad), we=1),
+        ]
+        commands = self.bankmachine_commands_test(dut=dut, requests=requests)
+        commands = [(cmd["type"], cmd["a"]) for cmd in commands]
+        expected = [
+            ("activate",  0xba),
+            ("write",     0xad << dut.address_align),
+            ("precharge", 0xad << dut.address_align),
+            ("activate",  0xda),
+            ("write",     0xad << dut.address_align),
+        ]
+        self.assertEqual(commands, expected)
+
+    def test_burst_no_request_lost(self):
+        # Verify that no request is lost in fast bursts of requests regardless of cmd_buffer_depth.
+        for cmd_buffer_depth in [8, 1, 0]:
+            settings = dict(cmd_buffer_depth=cmd_buffer_depth)
+            with self.subTest(**settings):
+                dut = BankMachineDUT(1, controller_settings=settings)
+                # Long sequence of writes to the same row
+                requests = [dict(addr=dut.req_address(row=0xba, col=i), we=1) for i in range(32)]
+                expected = ([("activate", 0xba)] +
+                            [("write", i << dut.address_align) for i in range(32)])
+                commands = self.bankmachine_commands_test(dut=dut, requests=requests)
+                commands = [(cmd["type"], cmd["a"]) for cmd in commands]
+                self.assertEqual(commands, expected)
+
+    def test_lock_until_requests_finished(self):
+        # Verify that lock is being held until all requests in FIFO are processed.
+        @passive
+        def lock_checker(dut):
+            req = dut.bankmachine.req
+            self.assertEqual((yield req.lock), 0)
+
+            # Wait until first request becomes locked
+            while not (yield req.valid):
+                yield
+
+            # Wait until lock should be released (all requests in queue gets processed)
+            # here it happens when the final wdata_ready ends
+            for _ in range(3):
+                while not (yield req.wdata_ready):
+                    yield
+                    self.assertEqual((yield req.lock), 1)
+                yield
+
+            yield
+            self.assertEqual((yield req.lock), 0)
+
+        dut = BankMachineDUT(1)
+        # Simple sequence with row change
+        requests = [
+            dict(addr=dut.req_address(row=0x1a, col=0x01), we=1),
+            dict(addr=dut.req_address(row=0x1b, col=0x02), we=1),
+            dict(addr=dut.req_address(row=0x1c, col=0x04), we=1),
+        ]
+        self.bankmachine_commands_test(dut=dut, requests=requests, generators=[lock_checker])
+
+    def timing_test(self, from_cmd, to_cmd, time_expected, **dut_kwargs):
+        @passive
+        def timing_checker(dut):
+            def is_cmd(cmd_type, test_ready):
+                cmd = (yield from dut.get_cmd())
+                ready = cmd["ready"] if test_ready else True
+                return cmd["valid"] and ready and cmd["type"] == cmd_type
+
+            # Time between WRITE ends (ready and valid) and PRECHARGE becomes valid
+            while not (yield from is_cmd(from_cmd, test_ready=True)):
+                yield
+            yield  # Wait until cmd deactivates in case the second cmd is the same as first
+            time = 1
+            while not (yield from is_cmd(to_cmd, test_ready=False)):
+                yield
+                time += 1
+
+            self.assertEqual(time, time_expected)
+
+        dut = BankMachineDUT(1, **dut_kwargs)
+        # Simple sequence with row change
+        requests = [
+            dict(addr=dut.req_address(row=0xba, col=0xad), we=1),
+            dict(addr=dut.req_address(row=0xda, col=0xad), we=1),
+        ]
+        self.bankmachine_commands_test(dut=dut, requests=requests, generators=[timing_checker])
+
+    def test_timing_write_to_precharge(self):
+        controller_settings = dict(with_auto_precharge=False)
+        timing_settings = dict(tWR=6, tCCD=4)
+        phy_settings = dict(cwl=2, nphases=2)
+        write_latency = math.ceil(phy_settings["cwl"] / phy_settings["nphases"])
+        precharge_time = write_latency + timing_settings["tWR"] + timing_settings["tCCD"]
+        self.timing_test("write", "precharge", precharge_time,
+            controller_settings = controller_settings,
+            phy_settings        = phy_settings,
+            timing_settings     = timing_settings)
+
+    def test_timing_activate_to_activate(self):
+        timing_settings = dict(tRC=16)
+        self.timing_test("activate", "activate",
+            time_expected   = 16,
+            timing_settings = timing_settings)
+
+    def test_timing_activate_to_precharge(self):
+        timing_settings = dict(tRAS=32)
+        self.timing_test("activate", "precharge",
+            time_expected   = 32,
+            timing_settings = timing_settings)
+
+    def test_refresh(self):
+        # Verify that no commands are issued during refresh and after it the row is re-activated.
+        @passive
+        def refresh_generator(dut):
+            # Wait some time for the bankmachine to start
+            for _ in range(16):
+                yield
+
+            # Request a refresh
+            yield dut.bankmachine.refresh_req.eq(1)
+            while not (yield dut.bankmachine.refresh_gnt):
+                yield
+
+            # Wait when refresh is being performed
+            # Make sure no command is issued during refresh
+            for _ in range(32):
+                self.assertEqual((yield dut.bankmachine.cmd.valid), 0)
+                yield
+
+            # Signalize refresh is ready
+            yield dut.bankmachine.refresh_req.eq(0)
+
+        dut = BankMachineDUT(1)
+        requests = [dict(addr=dut.req_address(row=0xba, col=i), we=1) for i in range(16)]
+        commands = self.bankmachine_commands_test(dut=dut, requests=requests,
+                                                  generators=[refresh_generator])
+        commands = [(cmd["type"], cmd["a"]) for cmd in commands]
+        # Refresh will close row, so bankmachine should re-activate it after refresh
+        self.assertEqual(commands.count(("activate", 0xba)), 2)
+        # Verify that the write commands are correct
+        write_commands = [cmd for cmd in commands if cmd[0] == "write"]
+        expected_writes = [("write", i << dut.address_align) for i in range(16)]
+        self.assertEqual(write_commands, expected_writes)
+
+    def test_output_annotations(self):
+        # Verify that all commands are annotated correctly using is_* signals.
+        checked = set()
+
+        @passive
+        def cmd_checker(dut):
+            while True:
+                cmd = (yield from dut.get_cmd())
+                if cmd["valid"]:
+                    if cmd["type"] in ["activate", "precharge"]:
+                        self.assertEqual(cmd["is_cmd"],   1)
+                        self.assertEqual(cmd["is_write"], 0)
+                        self.assertEqual(cmd["is_read"],  0)
+                    elif cmd["type"] in ["write"]:
+                        self.assertEqual(cmd["is_cmd"],   0)
+                        self.assertEqual(cmd["is_write"], 1)
+                        self.assertEqual(cmd["is_read"],  0)
+                    elif cmd["type"] in ["read"]:
+                        self.assertEqual(cmd["is_cmd"],   0)
+                        self.assertEqual(cmd["is_write"], 0)
+                        self.assertEqual(cmd["is_read"],  1)
+                    else:
+                        raise ValueError(cmd["type"])
+                    checked.add(cmd["type"])
+                yield
+
+        dut = BankMachineDUT(1)
+        requests = [
+            dict(addr=dut.req_address(row=0xba, col=0xad), we=0),
+            dict(addr=dut.req_address(row=0xba, col=0xad), we=1),
+            dict(addr=dut.req_address(row=0xda, col=0xad), we=0),
+            # Wait enough time for regular (not auto) precharge to be used
+            dict(addr=dut.req_address(row=0xda, col=0xad), we=1, delay=32),
+            dict(addr=dut.req_address(row=0xba, col=0xad), we=0),
+            dict(addr=dut.req_address(row=0xba, col=0xad), we=1),
+        ]
+        self.bankmachine_commands_test(dut=dut, requests=requests, generators=[cmd_checker])
+        # Bankmachine does not produce refresh commands
+        self.assertEqual(checked, {"activate", "precharge", "write", "read"})
diff --git a/test/test_bist.py b/test/test_bist.py
new file mode 100644 (file)
index 0000000..3be4cd7
--- /dev/null
@@ -0,0 +1,446 @@
+# This file is Copyright (c) 2016-2018 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2016 Tim 'mithro' Ansell <mithro@mithis.com>
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import unittest
+
+from migen import *
+
+from litex.gen.sim import *
+
+from litedram.common import *
+from litedram.frontend.bist import *
+from litedram.frontend.bist import _LiteDRAMBISTGenerator, _LiteDRAMBISTChecker, \
+    _LiteDRAMPatternGenerator, _LiteDRAMPatternChecker
+
+from test.common import *
+
+
+class GenCheckDriver:
+    def __init__(self, module):
+        self.module = module
+
+    def reset(self):
+        yield self.module.reset.eq(1)
+        yield
+        yield self.module.reset.eq(0)
+        yield
+
+    def configure(self, base, length, end=None, random_addr=None, random_data=None):
+        # For non-pattern generators/checkers
+        if end is None:
+            end = base + 0x100000
+        yield self.module.base.eq(base)
+        yield self.module.end.eq(end)
+        yield self.module.length.eq(length)
+        if random_addr is not None:
+            yield self.module.random_addr.eq(random_addr)
+        if random_data is not None:
+            yield self.module.random_data.eq(random_data)
+
+    def run(self):
+        yield self.module.start.eq(1)
+        yield
+        yield self.module.start.eq(0)
+        yield
+        while((yield self.module.done) == 0):
+            yield
+        if hasattr(self.module, "errors"):
+            self.errors = (yield self.module.errors)
+
+
+class GenCheckCSRDriver:
+    def __init__(self, module):
+        self.module = module
+
+    def reset(self):
+        yield from self.module.reset.write(1)
+        yield from self.module.reset.write(0)
+
+    def configure(self, base, length, end=None, random_addr=None, random_data=None):
+        # For non-pattern generators/checkers
+        if end is None:
+            end = base + 0x100000
+        yield from self.module.base.write(base)
+        yield from self.module.end.write(end)
+        yield from self.module.length.write(length)
+        if random_addr is not None:
+            yield from self.module.random.addr.write(random_addr)
+        if random_data is not None:
+            yield from self.module.random.data.write(random_data)
+
+    def run(self):
+        yield from self.module.start.write(1)
+        yield
+        yield from self.module.start.write(0)
+        yield
+        while((yield from self.module.done.read()) == 0):
+            yield
+        if hasattr(self.module, "errors"):
+            self.errors = (yield from self.module.errors.read())
+
+
+class TestBIST(MemoryTestDataMixin, unittest.TestCase):
+
+    # Generator ------------------------------------------------------------------------------------
+
+    def test_generator(self):
+        # Verify Generator is behaving correctly in the incr/random modes.
+        def main_generator(dut):
+            self.errors = 0
+
+            # Test incr
+            yield dut.ce.eq(1)
+            yield dut.random_enable.eq(0)
+            yield
+            for i in range(1024):
+                data = (yield dut.o)
+                if data != i:
+                    self.errors += 1
+                yield
+
+            # Test random
+            datas = []
+            yield dut.ce.eq(1)
+            yield dut.random_enable.eq(1)
+            for i in range(1024):
+                data = (yield dut.o)
+                if data in datas:
+                    self.errors += 1
+                datas.append(data)
+                yield
+
+        # DUT
+        dut = Generator(23, n_state=23, taps=[17, 22])
+
+        # Simulation
+        generators = [main_generator(dut)]
+        run_simulation(dut, generators)
+        self.assertEqual(self.errors, 0)
+
+    def generator_test(self, mem_expected, data_width, pattern=None, config_args=None,
+                       check_mem=True):
+        assert pattern is None or config_args is None, \
+            "_LiteDRAMBISTGenerator xor _LiteDRAMPatternGenerator"
+
+        class DUT(Module):
+            def __init__(self):
+                self.write_port = LiteDRAMNativeWritePort(address_width=32, data_width=data_width)
+                if pattern is not None:
+                    self.submodules.generator = _LiteDRAMPatternGenerator(self.write_port, pattern)
+                else:
+                    self.submodules.generator = _LiteDRAMBISTGenerator(self.write_port)
+                self.mem = DRAMMemory(data_width, len(mem_expected))
+
+        def main_generator(driver):
+            yield from driver.reset()
+            if pattern is None:
+                yield from driver.configure(**config_args)
+            yield from driver.run()
+            yield
+
+        dut = DUT()
+        generators = [
+            main_generator(GenCheckDriver(dut.generator)),
+            dut.mem.write_handler(dut.write_port),
+        ]
+        run_simulation(dut, generators)
+        if check_mem:
+            self.assertEqual(dut.mem.mem, mem_expected)
+        return dut
+
+    # _LiteDRAMBISTGenerator -----------------------------------------------------------------------
+
+    def test_bist_generator_8bit(self):
+        # Verify BISTGenerator with a 8-bit datapath.
+        data = self.bist_test_data["8bit"]
+        self.generator_test(data.pop("expected"), data_width=8, config_args=data)
+
+    def test_bist_generator_range_must_be_pow2(self):
+        # NOTE:
+        # in the current implementation (end - start) must be a power of 2,
+        # but it would be better if this restriction didn't hold, this test
+        # is here just to notice the change if it happens unintentionally
+        # and may be removed if we start supporting arbitrary ranges
+        data = self.bist_test_data["8bit"]
+        data["end"] += 1
+        reference = data.pop("expected")
+        dut = self.generator_test(reference, data_width=8, config_args=data, check_mem=False)
+        self.assertNotEqual(dut.mem.mem, reference)
+
+    def test_bist_generator_32bit(self):
+        # Verify BISTGenerator with a 32-bit datapath.
+        data = self.bist_test_data["32bit"]
+        self.generator_test(data.pop("expected"), data_width=32, config_args=data)
+
+    def test_bist_generator_64bit(self):
+        # Verify BISTGenerator with a 64-bit datapath.
+        data = self.bist_test_data["64bit"]
+        self.generator_test(data.pop("expected"), data_width=64, config_args=data)
+
+    def test_bist_generator_32bit_address_masked(self):
+        # Verify BISTGenerator with a 32-bit datapath and masked address.
+        data = self.bist_test_data["32bit_masked"]
+        self.generator_test(data.pop("expected"), data_width=32, config_args=data)
+
+    def test_bist_generator_32bit_long_sequential(self):
+        # Verify BISTGenerator with a 32-bit datapath and long sequential pattern.
+        data = self.bist_test_data["32bit_long_sequential"]
+        self.generator_test(data.pop("expected"), data_width=32, config_args=data)
+
+    def test_bist_generator_random_data(self):
+        # Verify BISTGenerator with a 32-bit datapath and random pattern.
+        data = self.bist_test_data["32bit"]
+        data["random_data"] = True
+        dut = self.generator_test(data.pop("expected"), data_width=32, config_args=data,
+                                  check_mem=False)
+        # Only check that there are no duplicates and that data is not a simple sequence
+        mem = [val for val in dut.mem.mem if val != 0]
+        self.assertEqual(len(set(mem)), len(mem), msg="Duplicate values in memory")
+        self.assertNotEqual(mem, list(range(len(mem))), msg="Values are a sequence")
+
+    def test_bist_generator_random_addr(self):
+        # Verify BISTGenerator with a 32-bit datapath and random address.
+        data = self.bist_test_data["32bit"]
+        data["random_addr"] = True
+        dut = self.generator_test(data.pop("expected"), data_width=32, config_args=data,
+                                  check_mem=False)
+        # With random address and address wrapping (generator.end) we _can_ have duplicates
+        # we can at least check that the values written are not an ordered sequence
+        mem = [val for val in dut.mem.mem if val != 0]
+        self.assertNotEqual(mem, list(range(len(mem))), msg="Values are a sequence")
+        self.assertLess(max(mem), data["length"], msg="Too big value found")
+
+    # _LiteDRAMPatternGenerator --------------------------------------------------------------------
+
+    def test_pattern_generator_8bit(self):
+        # Verify PatternGenerator with a 8-bit datapath.
+        data = self.pattern_test_data["8bit"]
+        self.generator_test(data["expected"], data_width=8, pattern=data["pattern"])
+
+    def test_pattern_generator_32bit(self):
+        # Verify PatternGenerator with a 32-bit datapath.
+        data = self.pattern_test_data["32bit"]
+        self.generator_test(data["expected"], data_width=32, pattern=data["pattern"])
+
+    def test_pattern_generator_64bit(self):
+        # Verify PatternGenerator with a 64-bit datapath.
+        data = self.pattern_test_data["64bit"]
+        self.generator_test(data["expected"], data_width=64, pattern=data["pattern"])
+
+    def test_pattern_generator_32bit_not_aligned(self):
+        # Verify PatternGenerator with a 32-bit datapath and un-aligned addresses.
+        data = self.pattern_test_data["32bit_not_aligned"]
+        self.generator_test(data["expected"], data_width=32, pattern=data["pattern"])
+
+    def test_pattern_generator_32bit_duplicates(self):
+        # Verify PatternGenerator with a 32-bit datapath and duplicate addresses.
+        data = self.pattern_test_data["32bit_duplicates"]
+        self.generator_test(data["expected"], data_width=32, pattern=data["pattern"])
+
+    def test_pattern_generator_32bit_sequential(self):
+        # Verify PatternGenerator with a 32-bit datapath and sequential pattern.
+        data = self.pattern_test_data["32bit_sequential"]
+        self.generator_test(data["expected"], data_width=32, pattern=data["pattern"])
+
+    # _LiteDRAMBISTChecker -------------------------------------------------------------------------
+
+    def checker_test(self, memory, data_width, pattern=None, config_args=None, check_errors=False):
+        assert pattern is None or config_args is None, \
+            "_LiteDRAMBISTChecker xor _LiteDRAMPatternChecker"
+
+        class DUT(Module):
+            def __init__(self):
+                self.read_port = LiteDRAMNativeReadPort(address_width=32, data_width=data_width)
+                if pattern is not None:
+                    self.submodules.checker = _LiteDRAMPatternChecker(self.read_port, init=pattern)
+                else:
+                    self.submodules.checker = _LiteDRAMBISTChecker(self.read_port)
+                self.mem = DRAMMemory(data_width, len(memory), init=memory)
+
+        def main_generator(driver):
+            yield from driver.reset()
+            if pattern is None:
+                yield from driver.configure(**config_args)
+            yield from driver.run()
+            yield
+
+        dut = DUT()
+        checker = GenCheckDriver(dut.checker)
+        generators = [
+            main_generator(checker),
+            dut.mem.read_handler(dut.read_port),
+        ]
+        run_simulation(dut, generators)
+        if check_errors:
+            self.assertEqual(checker.errors, 0)
+        return dut, checker
+
+    def test_bist_checker_8bit(self):
+        # Verify BISTChecker with a 8-bit datapath.
+        data = self.bist_test_data["8bit"]
+        memory = data.pop("expected")
+        self.checker_test(memory, data_width=8, config_args=data)
+
+    def test_bist_checker_32bit(self):
+        # Verify BISTChecker with a 32-bit datapath.
+        data = self.bist_test_data["32bit"]
+        memory = data.pop("expected")
+        self.checker_test(memory, data_width=32, config_args=data)
+
+    def test_bist_checker_64bit(self):
+        # Verify BISTChecker with a 64-bit datapath.
+        data = self.bist_test_data["64bit"]
+        memory = data.pop("expected")
+        self.checker_test(memory, data_width=64, config_args=data)
+
+    # _LiteDRAMPatternChecker ----------------------------------------------------------------------
+
+    def test_pattern_checker_8bit(self):
+        # Verify PatternChecker with a 8-bit datapath.
+        data = self.pattern_test_data["8bit"]
+        self.checker_test(memory=data["expected"], data_width=8, pattern=data["pattern"])
+
+    def test_pattern_checker_32bit(self):
+        # Verify PatternChecker with a 32-bit datapath.
+        data = self.pattern_test_data["32bit"]
+        self.checker_test(memory=data["expected"], data_width=32, pattern=data["pattern"])
+
+    def test_pattern_checker_64bit(self):
+        # Verify PatternChecker with a 64-bit datapath.
+        data = self.pattern_test_data["64bit"]
+        self.checker_test(memory=data["expected"], data_width=64, pattern=data["pattern"])
+
+    def test_pattern_checker_32bit_not_aligned(self):
+        # Verify PatternChecker with a 32-bit datapath and un-aligned addresses.
+        data = self.pattern_test_data["32bit_not_aligned"]
+        self.checker_test(memory=data["expected"], data_width=32, pattern=data["pattern"])
+
+    def test_pattern_checker_32bit_duplicates(self):
+        # Verify PatternChecker with a 32-bit datapath and duplicate addresses.
+        data = self.pattern_test_data["32bit_duplicates"]
+        num_duplicates = len(data["pattern"]) - len(set(adr for adr, _ in data["pattern"]))
+        dut, checker = self.checker_test(
+            memory=data["expected"], data_width=32, pattern=data["pattern"], check_errors=False)
+        self.assertEqual(checker.errors, num_duplicates)
+
+    # LiteDRAMBISTGenerator and LiteDRAMBISTChecker ------------------------------------------------
+
+    def bist_test(self, generator, checker, mem):
+        # write
+        yield from generator.reset()
+        yield from generator.configure(base=16, length=64)
+        yield from generator.run()
+
+        # Read (no errors)
+        yield from checker.reset()
+        yield from checker.configure(base=16, length=64)
+        yield from checker.run()
+        self.assertEqual(checker.errors, 0)
+
+        # Corrupt memory (using generator)
+        yield from generator.reset()
+        yield from generator.configure(base=16 + 48, length=64)
+        yield from generator.run()
+
+        # Read (errors)
+        yield from checker.reset()
+        yield from checker.configure(base=16, length=64)
+        yield from checker.run()
+        # Errors for words:
+        # from (16 + 48) / 4 = 16  (corrupting generator start)
+        # to   (16 + 64) / 4 = 20  (first generator end)
+        self.assertEqual(checker.errors, 4)
+
+        # Read (no errors)
+        yield from checker.reset()
+        yield from checker.configure(base=16 + 48, length=64)
+        yield from checker.run()
+        self.assertEqual(checker.errors, 0)
+
+    def test_bist_base(self):
+        # Verify BIST (Generator and Checker) with control from the logic.
+        class DUT(Module):
+            def __init__(self):
+                self.write_port = LiteDRAMNativeWritePort(address_width=32, data_width=32)
+                self.read_port  = LiteDRAMNativeReadPort(address_width=32, data_width=32)
+                self.submodules.generator = _LiteDRAMBISTGenerator(self.write_port)
+                self.submodules.checker   = _LiteDRAMBISTChecker(self.read_port)
+
+        def main_generator(dut, mem):
+            generator = GenCheckDriver(dut.generator)
+            checker   = GenCheckDriver(dut.checker)
+            yield from self.bist_test(generator, checker, mem)
+
+        # DUT
+        dut = DUT()
+        mem = DRAMMemory(32, 48)
+
+        # Simulation
+        generators = [
+            main_generator(dut, mem),
+            mem.write_handler(dut.write_port),
+            mem.read_handler(dut.read_port)
+        ]
+        run_simulation(dut, generators)
+
+    def test_bist_csr(self):
+        # Verify BIST (Generator and Checker) with control from CSRs.
+        class DUT(Module):
+            def __init__(self):
+                self.write_port = LiteDRAMNativeWritePort(address_width=32, data_width=32)
+                self.read_port  = LiteDRAMNativeReadPort(address_width=32, data_width=32)
+                self.submodules.generator = LiteDRAMBISTGenerator(self.write_port)
+                self.submodules.checker   = LiteDRAMBISTChecker(self.read_port)
+
+        def main_generator(dut, mem):
+            generator = GenCheckCSRDriver(dut.generator)
+            checker   = GenCheckCSRDriver(dut.checker)
+            yield from self.bist_test(generator, checker, mem)
+
+        # DUT
+        dut = DUT()
+        mem = DRAMMemory(32, 48)
+
+        # Simulation
+        generators = [
+            main_generator(dut, mem),
+            mem.write_handler(dut.write_port),
+            mem.read_handler(dut.read_port)
+        ]
+        run_simulation(dut, generators)
+
+    def test_bist_csr_cdc(self):
+           # Verify BIST (Generator and Checker) with control from CSRs in a different clock domain.
+        class DUT(Module):
+            def __init__(self):
+                port_kwargs     = dict(address_width=32, data_width=32, clock_domain="async")
+                self.write_port = LiteDRAMNativeWritePort(**port_kwargs)
+                self.read_port  = LiteDRAMNativeReadPort(**port_kwargs)
+                self.submodules.generator = LiteDRAMBISTGenerator(self.write_port)
+                self.submodules.checker   = LiteDRAMBISTChecker(self.read_port)
+
+        def main_generator(dut, mem):
+            generator = GenCheckCSRDriver(dut.generator)
+            checker   = GenCheckCSRDriver(dut.checker)
+            yield from self.bist_test(generator, checker, mem)
+
+        # DUT
+        dut = DUT()
+        mem = DRAMMemory(32, 48)
+
+        generators = {
+            "sys": [
+                main_generator(dut, mem),
+            ],
+            "async": [
+                mem.write_handler(dut.write_port),
+                mem.read_handler(dut.read_port)
+            ]
+        }
+        clocks = {
+            "sys": 10,
+            "async": (7, 3),
+        }
+        run_simulation(dut, generators, clocks)
diff --git a/test/test_command_chooser.py b/test/test_command_chooser.py
new file mode 100644 (file)
index 0000000..b34323d
--- /dev/null
@@ -0,0 +1,223 @@
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import unittest
+
+from migen import *
+from litex.soc.interconnect import stream
+
+from litedram.common import *
+from litedram.core.multiplexer import _CommandChooser
+
+from test.common import CmdRequestRWDriver
+
+
+class CommandChooserDUT(Module):
+    def __init__(self, n_requests, addressbits, bankbits):
+        self.requests = [stream.Endpoint(cmd_request_rw_layout(a=addressbits, ba=bankbits))
+                         for _ in range(n_requests)]
+        self.submodules.chooser = _CommandChooser(self.requests)
+
+        self.drivers = [CmdRequestRWDriver(req, i) for i, req in enumerate(self.requests)]
+
+    def set_requests(self, description):
+        assert len(description) == len(self.drivers)
+        for driver, char in zip(self.drivers, description):
+            yield from driver.request(char)
+
+
+class TestCommandChooser(unittest.TestCase):
+    def test_helper_methods_correct(self):
+        # Verify that helper methods return correct values.
+        def main_generator(dut):
+            possible_cmds     = "_rwap"
+            expected_read     = "01000"
+            expected_write    = "00100"
+            expected_activate = "00010"
+            helper_methods = {
+                "write": expected_write,
+                "read": expected_read,
+                "activate": expected_activate,
+            }
+
+            # Create a subTest for each method
+            for method, expected_values in helper_methods.items():
+                with self.subTest(method=method):
+                    # Set each available command as the first request and verify
+                    # that the helper method returns the correct value. We can
+                    # safely use only the first request because no requests are
+                    # valid as all the want_* signals are 0.
+                    for cmd, expected in zip(possible_cmds, expected_values):
+                        yield from dut.set_requests(f"{cmd}___")
+                        yield
+                        method_value = (yield getattr(dut.chooser, method)())
+                        self.assertEqual(method_value, int(expected))
+
+            # Test accept helper
+            with self.subTest(method="accept"):
+                yield dut.chooser.want_writes.eq(1)
+                yield
+
+                yield from dut.set_requests("____")
+                yield
+                self.assertEqual((yield dut.chooser.accept()), 0)
+
+                # Set write request, this sets request.valid=1
+                yield from dut.set_requests("w___")
+                yield
+                self.assertEqual((yield dut.chooser.accept()), 0)
+                self.assertEqual((yield dut.chooser.cmd.valid), 1)
+
+                # Accept() is only on after we set cmd.ready=1
+                yield dut.chooser.cmd.ready.eq(1)
+                yield
+                self.assertEqual((yield dut.chooser.accept()), 1)
+
+        dut = CommandChooserDUT(n_requests=4, bankbits=3, addressbits=13)
+        run_simulation(dut, main_generator(dut))
+
+    def test_selects_next_when_request_not_valid(self):
+        # Verify that arbiter moves to next request when valid goes inactive.
+        def main_generator(dut):
+            yield dut.chooser.want_cmds.eq(1)
+            yield from dut.set_requests("pppp")
+            yield
+
+            # Advance to next request
+            def invalidate(i):
+                yield dut.requests[i].valid.eq(0)
+                yield
+                yield dut.requests[i].valid.eq(1)
+                yield
+
+            # First request is selected as it is valid and ~ready
+            self.assertEqual((yield dut.chooser.cmd.ba), 0)
+            yield
+            self.assertEqual((yield dut.chooser.cmd.ba), 0)
+
+            # After deactivating `valid`, arbiter should choose next request
+            yield from invalidate(0)
+            self.assertEqual((yield dut.chooser.cmd.ba), 1)
+            yield from invalidate(1)
+            self.assertEqual((yield dut.chooser.cmd.ba), 2)
+            yield from invalidate(2)
+            self.assertEqual((yield dut.chooser.cmd.ba), 3)
+            yield from invalidate(3)
+            self.assertEqual((yield dut.chooser.cmd.ba), 0)
+
+        dut = CommandChooserDUT(n_requests=4, bankbits=3, addressbits=13)
+        run_simulation(dut, main_generator(dut))
+
+    def test_selects_next_when_cmd_ready(self):
+        # Verify that next request is chosen when the current one becomes ready.
+        def main_generator(dut):
+            yield dut.chooser.want_cmds.eq(1)
+            yield from dut.set_requests("pppp")
+            yield
+
+            # Advance to next request
+            def cmd_ready():
+                yield dut.chooser.cmd.ready.eq(1)
+                yield
+                yield dut.chooser.cmd.ready.eq(0)
+                yield
+
+            # First request is selected as it is valid and ~ready
+            self.assertEqual((yield dut.chooser.cmd.ba), 0)
+            yield
+            self.assertEqual((yield dut.chooser.cmd.ba), 0)
+
+            # After deactivating valid arbiter should choose next request
+            yield from cmd_ready()
+            self.assertEqual((yield dut.chooser.cmd.ba), 1)
+            yield from cmd_ready()
+            self.assertEqual((yield dut.chooser.cmd.ba), 2)
+            yield from cmd_ready()
+            self.assertEqual((yield dut.chooser.cmd.ba), 3)
+            yield from cmd_ready()
+            self.assertEqual((yield dut.chooser.cmd.ba), 0)
+
+        dut = CommandChooserDUT(n_requests=4, bankbits=3, addressbits=13)
+        run_simulation(dut, main_generator(dut))
+
+    def selection_test(self, requests, expected_order, wants):
+        # Set requests to given states and tests whether they are being connected
+        # to chooser.cmd in the expected order. Using `ba` value to distinguish
+        # requests (as initialised in CommandChooserDUT).
+        # "_" means no valid request.
+        def main_generator(dut):
+            for want in wants:
+                yield getattr(dut.chooser, want).eq(1)
+
+            yield from dut.set_requests(requests)
+            yield
+
+            for i, expected_index in enumerate(expected_order):
+                error_msg = f"requests={requests}, expected_order={expected_order}, i={i}"
+                if expected_index == "_":  # not valid - cas/ras/we should be 0
+                    cas = (yield dut.chooser.cmd.cas)
+                    ras = (yield dut.chooser.cmd.ras)
+                    we = (yield dut.chooser.cmd.we)
+                    self.assertEqual((cas, ras, we), (0, 0, 0), msg=error_msg)
+                else:
+                    # Check that ba is as expected
+                    selected_request_index = (yield dut.chooser.cmd.ba)
+                    self.assertEqual(selected_request_index, int(expected_index), msg=error_msg)
+
+                # Advance to next request
+                yield dut.chooser.cmd.ready.eq(1)
+                yield
+                yield dut.chooser.cmd.ready.eq(0)
+                yield
+
+        assert len(requests) == 8
+        dut = CommandChooserDUT(n_requests=8, bankbits=3, addressbits=13)
+        run_simulation(dut, main_generator(dut))
+
+    @unittest.skip("Issue #174")
+    def test_selects_nothing(self):
+        # When want_* = 0, chooser should set cas/ras/we = 0, which means not valid request
+        requests = "w_rawpwr"
+        order    = "____"  # cas/ras/we are never set
+        self.selection_test(requests, order, wants=[])
+
+    def test_selects_writes(self):
+        requests = "w_rawpwr"
+        order    = "0460460"
+        self.selection_test(requests, order, wants=["want_writes"])
+
+    def test_selects_reads(self):
+        requests = "rp_awrrw"
+        order    = "0560560"
+        self.selection_test(requests, order, wants=["want_reads"])
+
+    @unittest.skip("Issue #174")
+    def test_selects_writes_and_reads(self):
+        requests = "rp_awrrw"
+        order    = "04567045670"
+        self.selection_test(requests, order, wants=["want_reads", "want_writes"])
+
+    @unittest.skip("Issue #174")
+    def test_selects_cmds_without_act(self):
+        # When want_cmds = 1, but want_activates = 0, activate commands should not be selected
+        requests = "pr_aa_pw"
+        order    = "06060"
+        self.selection_test(requests, order, wants=["want_cmds"])
+
+    def test_selects_cmds_with_act(self):
+        # When want_cmds/activates = 1, both activate and precharge should be selected
+        requests = "pr_aa_pw"
+        order    = "034603460"
+        self.selection_test(requests, order, wants=["want_cmds", "want_activates"])
+
+    @unittest.skip("Issue #174")
+    def test_selects_nothing_when_want_activates_only(self):
+        # When only want_activates = 1, nothing will be selected
+        requests = "pr_aa_pw"
+        order    = "____"
+        self.selection_test(requests, order, wants=["want_activates"])
+
+    def test_selects_cmds_and_writes(self):
+        requests = "pr_aa_pw"
+        order    = "0670670"
+        self.selection_test(requests, order, wants=["want_cmds", "want_writes"])
diff --git a/test/test_crossbar.py b/test/test_crossbar.py
new file mode 100644 (file)
index 0000000..53d8886
--- /dev/null
@@ -0,0 +1,511 @@
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import random
+import unittest
+import functools
+import itertools
+from collections import namedtuple, defaultdict
+
+from migen import *
+
+from litedram.common import *
+from litedram.core.crossbar import LiteDRAMCrossbar
+
+from test.common import timeout_generator, NativePortDriver
+
+
+class ControllerStub:
+    """Simplified simulation of LiteDRAMController as seen by LiteDRAMCrossbar
+
+    This is a simplified implementation of LiteDRAMController suitable for
+    testing the crossbar. It consisits of bankmachine handlers that try to mimic
+    behaviour of real BankMachines. They also simulate data transmission by
+    scheduling it to appear on data interface (data_handler sets it).
+    """
+    W = namedtuple("WriteData", ["bank", "addr", "data", "we"])
+    R = namedtuple("ReadData",  ["bank", "addr", "data"])
+    WaitingData = namedtuple("WaitingData", ["delay", "data"])
+
+    def __init__(self, controller_interface, write_latency, read_latency, cmd_delay=None):
+        self.interface = controller_interface
+        self.write_latency = write_latency
+        self.read_latency = read_latency
+        self.data = []  # data registered on datapath (W/R)
+        self._waiting = []  # data waiting to be set on datapath
+        # Incremental generator of artificial read data
+        self._read_data = itertools.count(0x10)
+        # Simulated dealy of command processing, by default just constant
+        self._cmd_delay = cmd_delay or (lambda: 6)
+        # Minimal logic required so that no two banks will become ready at the same moment
+        self._multiplexer_lock = None
+
+    def generators(self):
+        bank_handlers = [self.bankmachine_handler(bn) for bn in range(self.interface.nbanks)]
+        return [self.data_handler(), *bank_handlers]
+
+    @passive
+    def data_handler(self):
+        # Responsible for passing data over datapath with requested latency
+        while True:
+            # Examine requests to find if there is any for that cycle
+            available = [w for w in self._waiting if w.delay == 0]
+            # Make sure that it is never the case that we have more then 1
+            # operation of the same type
+            type_counts = defaultdict(int)
+            for a in available:
+                type_counts[type(a.data)] += 1
+            for t, count in type_counts.items():
+                assert count == 1, \
+                    "%d data operations of type %s at the same time!" % (count, t.__name__)
+            for a in available:
+                # Remove it from the list and get the data
+                current = self._waiting.pop(self._waiting.index(a)).data
+                # If this was a write, then fill it with data from this cycle
+                if isinstance(current, self.W):
+                    current = current._replace(
+                        data=(yield self.interface.wdata),
+                        we=(yield self.interface.wdata_we),
+                    )
+                # If this was a read, then assert the data now
+                elif isinstance(current, self.R):
+                    yield self.interface.rdata.eq(current.data)
+                else:
+                    raise TypeError(current)
+                # Add it to the data that appeared on the datapath
+                self.data.append(current)
+            # Advance simulation time by 1 cycle
+            for i, w in enumerate(self._waiting):
+                self._waiting[i] = w._replace(delay=w.delay - 1)
+            yield
+
+    @passive
+    def bankmachine_handler(self, n):
+        # Simplified simulation of a bank machine.
+        # Uses a single buffer (no input fifo). Generates random read data.
+        bank = getattr(self.interface, "bank%d" % n)
+        while True:
+            # Wait for a valid bank command
+            while not (yield bank.valid):
+                # The lock is being held as long as there is a valid command
+                # in the buffer or there is a valid command on the interface.
+                # As at this point we have nothing in the buffer, we unlock
+                # the lock only if the command on the interface is not valid.
+                yield bank.lock.eq(0)
+                yield
+            # Latch the command to the internal buffer
+            cmd_addr = (yield bank.addr)
+            cmd_we = (yield bank.we)
+            # Lock the buffer as soon as command is valid on the interface.
+            # We do this 1 cycle after we see the command, but BankMachine
+            # also has latency, because cmd_buffer_lookahead.source must
+            # become valid.
+            yield bank.lock.eq(1)
+            yield bank.ready.eq(1)
+            yield
+            yield bank.ready.eq(0)
+            # Simulate that we are processing the command
+            for _ in range(self._cmd_delay()):
+                yield
+            # Avoid situation that can happen due to the lack of multiplexer,
+            # where more than one bank would send data at the same moment
+            while self._multiplexer_lock is not None:
+                yield
+            self._multiplexer_lock = n
+            yield
+            # After READ/WRITE has been issued, this is signalized by using
+            # rdata_valid/wdata_ready. The actual data will appear with latency.
+            if cmd_we:  # WRITE
+                yield bank.wdata_ready.eq(1)
+                yield
+                yield bank.wdata_ready.eq(0)
+                # Send a request to the data_handler, it will check what
+                # has been sent from the crossbar port.
+                wdata = self.W(bank=n, addr=cmd_addr,
+                               data=None, we=None)  # to be filled in callback
+                self._waiting.append(self.WaitingData(data=wdata, delay=self.write_latency))
+            else:  # READ
+                yield bank.rdata_valid.eq(1)
+                yield
+                yield bank.rdata_valid.eq(0)
+                # Send a request with "data from memory" to the data_handler
+                rdata = self.R(bank=n, addr=cmd_addr, data=next(self._read_data))
+                # Decrease latecy, as data_handler sets data with 1 cycle delay
+                self._waiting.append(self.WaitingData(data=rdata, delay=self.read_latency - 1))
+            # At this point cmd_buffer.source.ready has been activated and the
+            # command in internal buffer has been discarded. The lock will be
+            self._multiplexer_lock = None
+            # removed in next loop if there is no other command pending.
+            yield
+
+
+class CrossbarDUT(Module):
+    default_controller_settings = dict(
+        cmd_buffer_depth = 8,
+        address_mapping  = "ROW_BANK_COL",
+    )
+    default_phy_settings = dict(
+        cwl           = 2,
+        nphases       = 2,
+        nranks        = 1,
+        memtype       = "DDR2",
+        dfi_databits  = 2*16,
+        read_latency  = 5,
+        write_latency = 1,
+    )
+    default_geom_settings = dict(
+        bankbits = 3,
+        rowbits  = 13,
+        colbits  = 10,
+    )
+
+    def __init__(self, controller_settings=None, phy_settings=None, geom_settings=None):
+        # update settings if provided
+        def updated(settings, update):
+            copy = settings.copy()
+            copy.update(update or {})
+            return copy
+
+        controller_settings = updated(self.default_controller_settings, controller_settings)
+        phy_settings        = updated(self.default_phy_settings, phy_settings)
+        geom_settings       = updated(self.default_geom_settings, geom_settings)
+
+        class SimpleSettings(Settings):
+            def __init__(self, **kwargs):
+                self.set_attributes(kwargs)
+
+        settings        = SimpleSettings(**controller_settings)
+        settings.phy    = SimpleSettings(**phy_settings)
+        settings.geom   = SimpleSettings(**geom_settings)
+        self.settings = settings
+
+        self.address_align = log2_int(burst_lengths[settings.phy.memtype])
+        self.interface = LiteDRAMInterface(self.address_align, settings)
+        self.submodules.crossbar = LiteDRAMCrossbar(self.interface)
+
+    def addr_port(self, bank, row, col):
+        # construct an address the way port master would do it
+        assert self.settings.address_mapping == "ROW_BANK_COL"
+        aa = self.address_align
+        cb = self.settings.geom.colbits
+        rb = self.settings.geom.rowbits
+        bb = self.settings.geom.bankbits
+        col  = (col  & (2**cb - 1)) >> aa
+        bank = (bank & (2**bb - 1)) << (cb - aa)
+        row  = (row  & (2**rb - 1)) << (cb + bb - aa)
+        return row | bank | col
+
+    def addr_iface(self, row, col):
+        # construct address the way bankmachine should receive it
+        aa = self.address_align
+        cb = self.settings.geom.colbits
+        rb = self.settings.geom.rowbits
+        col = (col & (2**cb - 1)) >> aa
+        row = (row & (2**rb - 1)) << (cb - aa)
+        return row | col
+
+
+class TestCrossbar(unittest.TestCase):
+    W = ControllerStub.W
+    R = ControllerStub.R
+
+    def test_init(self):
+        dut = CrossbarDUT()
+        dut.crossbar.get_port()
+        dut.finalize()
+
+    def crossbar_test(self, dut, generators, timeout=100, **kwargs):
+        # Runs simulation with a controller stub (passive generators) and user generators
+        if not isinstance(generators, list):
+            generators = [generators]
+        controller = ControllerStub(dut.interface,
+                                    write_latency=dut.settings.phy.write_latency,
+                                    read_latency=dut.settings.phy.read_latency,
+                                    **kwargs)
+        generators += [*controller.generators(), timeout_generator(timeout)]
+        run_simulation(dut, generators)
+        return controller.data
+
+    def test_available_address_mappings(self):
+        # Check that the only supported address mapping is ROW_BANK_COL (if we start supporting new
+        # mappings, then update these tests to also test these other mappings).
+        def finalize_crossbar(mapping):
+            dut = CrossbarDUT(controller_settings=dict(address_mapping=mapping))
+            dut.crossbar.get_port()
+            dut.crossbar.finalize()
+
+        for mapping in ["ROW_BANK_COL", "BANK_ROW_COL"]:
+            if mapping in ["ROW_BANK_COL"]:
+                finalize_crossbar(mapping)
+            else:
+                with self.assertRaises(KeyError):
+                    finalize_crossbar(mapping)
+
+    def test_address_mappings(self):
+        # Verify that address is translated correctly.
+        reads = []
+
+        def producer(dut, port):
+            driver = NativePortDriver(port)
+            for t in transfers:
+                addr = dut.addr_port(bank=t["bank"], row=t["row"], col=t["col"])
+                if t["rw"] == self.W:
+                    yield from driver.write(addr, data=t["data"], we=t.get("we", None))
+                elif t["rw"] == self.R:
+                    data = (yield from driver.read(addr))
+                    reads.append(data)
+                else:
+                    raise TypeError(t["rw"])
+
+        geom_settings = dict(colbits=10, rowbits=13, bankbits=2)
+        dut  = CrossbarDUT(geom_settings=geom_settings)
+        port = dut.crossbar.get_port()
+        transfers = [
+            dict(rw=self.W, bank=2, row=0x30, col=0x03, data=0x20),
+            dict(rw=self.W, bank=3, row=0x30, col=0x03, data=0x21),
+            dict(rw=self.W, bank=2, row=0xab, col=0x03, data=0x22),
+            dict(rw=self.W, bank=2, row=0x30, col=0x13, data=0x23),
+            dict(rw=self.R, bank=1, row=0x10, col=0x99),
+            dict(rw=self.R, bank=0, row=0x10, col=0x99),
+            dict(rw=self.R, bank=1, row=0xcd, col=0x99),
+            dict(rw=self.R, bank=1, row=0x10, col=0x77),
+        ]
+        expected = []
+        for i, t in enumerate(transfers):
+            cls = t["rw"]
+            addr = dut.addr_iface(row=t["row"], col=t["col"])
+            if cls == self.W:
+                kwargs = dict(data=t["data"], we=0xff)
+            elif cls == self.R:
+                kwargs = dict(data=0x10 + i)
+            return cls(bank=t["bank"], addr=addr, **kwargs)
+
+        data = self.crossbar_test(dut, producer(port))
+        self.assertEqual(data, expected)
+
+    def test_arbitration(self):
+        # Create multiple masters that write to the same bank at the same time and verify that all
+        # the requests have been sent correctly.
+        def producer(dut, port, num):
+            driver = NativePortDriver(port)
+            addr = dut.addr_port(bank=3, row=0x10 + num, col=0x20 + num)
+            yield from driver.write(addr, data=0x30 + num)
+
+        dut      = CrossbarDUT()
+        ports    = [dut.crossbar.get_port() for _ in range(4)]
+        masters  = [producer(dut, port, i) for i, port in enumerate(ports)]
+        data     = self.crossbar_test(dut, masters)
+        expected = {
+            self.W(bank=3, addr=dut.addr_iface(row=0x10, col=0x20), data=0x30, we=0xff),
+            self.W(bank=3, addr=dut.addr_iface(row=0x11, col=0x21), data=0x31, we=0xff),
+            self.W(bank=3, addr=dut.addr_iface(row=0x12, col=0x22), data=0x32, we=0xff),
+            self.W(bank=3, addr=dut.addr_iface(row=0x13, col=0x23), data=0x33, we=0xff),
+        }
+        self.assertEqual(set(data), expected)
+
+    def test_lock_write(self):
+        # Verify that the locking mechanism works
+        # Create a situation when one master A wants to write to banks 0 then 1, but master B is
+        # continuously writing to bank 1 (bank is locked) so that master A is blocked. We use
+        # wait_data=False because we are only concerned about sending commands fast enough for
+        # the lock to be held continuously.
+        def master_a(dut, port):
+            driver = NativePortDriver(port)
+            adr    = functools.partial(dut.addr_port, row=1, col=1)
+            write  = functools.partial(driver.write, wait_data=False)
+            yield from write(adr(bank=0), data=0x10)
+            yield from write(adr(bank=1), data=0x11)
+            yield from write(adr(bank=0), data=0x12, wait_data=True)
+
+        def master_b(dut, port):
+            driver = NativePortDriver(port)
+            adr    = functools.partial(dut.addr_port, row=2, col=2)
+            write  = functools.partial(driver.write, wait_data=False)
+            yield from write(adr(bank=1), data=0x20)
+            yield from write(adr(bank=1), data=0x21)
+            yield from write(adr(bank=1), data=0x22)
+            yield from write(adr(bank=1), data=0x23)
+            yield from write(adr(bank=1), data=0x24)
+
+        dut   = CrossbarDUT()
+        ports = [dut.crossbar.get_port() for _ in range(2)]
+        data  = self.crossbar_test(dut, [master_a(dut, ports[0]), master_b(dut, ports[1])])
+        expected = [
+            self.W(bank=0, addr=dut.addr_iface(row=1, col=1), data=0x10, we=0xff),  # A
+            self.W(bank=1, addr=dut.addr_iface(row=2, col=2), data=0x20, we=0xff),  #  B
+            self.W(bank=1, addr=dut.addr_iface(row=2, col=2), data=0x21, we=0xff),  #  B
+            self.W(bank=1, addr=dut.addr_iface(row=2, col=2), data=0x22, we=0xff),  #  B
+            self.W(bank=1, addr=dut.addr_iface(row=2, col=2), data=0x23, we=0xff),  #  B
+            self.W(bank=1, addr=dut.addr_iface(row=2, col=2), data=0x24, we=0xff),  #  B
+            self.W(bank=1, addr=dut.addr_iface(row=1, col=1), data=0x11, we=0xff),  # A
+            self.W(bank=0, addr=dut.addr_iface(row=1, col=1), data=0x12, we=0xff),  # A
+        ]
+        self.assertEqual(data, expected)
+
+    def test_lock_read(self):
+        # Verify that the locking mechanism works.
+        def master_a(dut, port):
+            driver = NativePortDriver(port)
+            adr    = functools.partial(dut.addr_port, row=1, col=1)
+            read   = functools.partial(driver.read, wait_data=False)
+            yield from read(adr(bank=0))
+            yield from read(adr(bank=1))
+            yield from read(adr(bank=0))
+            # Wait for read data to show up
+            for _ in range(16):
+                yield
+
+        def master_b(dut, port):
+            driver = NativePortDriver(port)
+            adr    = functools.partial(dut.addr_port, row=2, col=2)
+            read   = functools.partial(driver.read, wait_data=False)
+            yield from read(adr(bank=1))
+            yield from read(adr(bank=1))
+            yield from read(adr(bank=1))
+            yield from read(adr(bank=1))
+            yield from read(adr(bank=1))
+
+        dut   = CrossbarDUT()
+        ports = [dut.crossbar.get_port() for _ in range(2)]
+        data  = self.crossbar_test(dut, [master_a(dut, ports[0]), master_b(dut, ports[1])])
+        expected = [
+            self.R(bank=0, addr=dut.addr_iface(row=1, col=1), data=0x10),  # A
+            self.R(bank=1, addr=dut.addr_iface(row=2, col=2), data=0x11),  #  B
+            self.R(bank=1, addr=dut.addr_iface(row=2, col=2), data=0x12),  #  B
+            self.R(bank=1, addr=dut.addr_iface(row=2, col=2), data=0x13),  #  B
+            self.R(bank=1, addr=dut.addr_iface(row=2, col=2), data=0x14),  #  B
+            self.R(bank=1, addr=dut.addr_iface(row=2, col=2), data=0x15),  #  B
+            self.R(bank=1, addr=dut.addr_iface(row=1, col=1), data=0x16),  # A
+            self.R(bank=0, addr=dut.addr_iface(row=1, col=1), data=0x17),  # A
+        ]
+        self.assertEqual(data, expected)
+
+    def crossbar_stress_test(self, dut, ports, n_banks, n_ops, clocks=None):
+        # Runs simulation with multiple masters writing and reading to multiple banks
+        controller = ControllerStub(dut.interface,
+                                    write_latency=dut.settings.phy.write_latency,
+                                    read_latency=dut.settings.phy.read_latency)
+        # Store data produced per master
+        produced = defaultdict(list)
+        prng = random.Random(42)
+
+        def master(dut, port, num):
+            # Choose operation types based on port mode
+            ops_choice = {
+                "both":  ["w", "r"],
+                "write": ["w"],
+                "read":  ["r"],
+            }[port.mode]
+            driver = NativePortDriver(port)
+
+            for i in range(n_ops):
+                bank = prng.randrange(n_banks)
+                # We will later distinguish data by its row address
+                row = num
+                col = 0x20 * num + i
+                addr = dut.addr_port(bank=bank, row=row, col=col)
+                addr_iface = dut.addr_iface(row=row, col=col)
+                if prng.choice(ops_choice) == "w":
+                    yield from driver.write(addr, data=i)
+                    produced[num].append(self.W(bank, addr_iface, data=i, we=0xff))
+                else:
+                    yield from driver.read(addr)
+                    produced[num].append(self.R(bank, addr_iface, data=None))
+
+            for _ in range(8):
+                yield
+
+        generators = defaultdict(list)
+        for i, port in enumerate(ports):
+            generators[port.clock_domain].append(master(dut, port, i))
+        generators["sys"] += controller.generators()
+        generators["sys"].append(timeout_generator(80 * n_ops))
+
+        sim_kwargs = {}
+        if clocks is not None:
+            sim_kwargs["clocks"] = clocks
+        run_simulation(dut, generators, **sim_kwargs)
+
+        # Split controller data by master, as this is what we want to compare
+        consumed = defaultdict(list)
+        for data in controller.data:
+            master = data.addr >> (dut.settings.geom.colbits - dut.address_align)
+            if isinstance(data, self.R):
+                # Master couldn't know the data when it was sending
+                data = data._replace(data=None)
+            consumed[master].append(data)
+
+        return produced, consumed, controller.data
+
+    def test_stress(self):
+        # Test communication in complex scenarios.
+        dut = CrossbarDUT()
+        ports = [dut.crossbar.get_port() for _ in range(8)]
+        produced, consumed, consumed_all = self.crossbar_stress_test(dut, ports, n_banks=4, n_ops=8)
+        for master in produced.keys():
+            self.assertEqual(consumed[master], produced[master], msg="master = %d" % master)
+
+    def test_stress_single_bank(self):
+        # Test communication in complex scenarios
+        dut = CrossbarDUT()
+        ports = [dut.crossbar.get_port() for _ in range(4)]
+        produced, consumed, consumed_all = self.crossbar_stress_test(dut, ports, n_banks=1, n_ops=8)
+        for master in produced.keys():
+            self.assertEqual(consumed[master], produced[master], msg="master = %d" % master)
+
+    def test_stress_single_master(self):
+        # Test communication in complex scenarios.
+        dut = CrossbarDUT()
+        ports = [dut.crossbar.get_port() for _ in range(1)]
+        produced, consumed, consumed_all = self.crossbar_stress_test(dut, ports, n_banks=4, n_ops=8)
+        for master in produced.keys():
+            self.assertEqual(consumed[master], produced[master], msg="master = %d" % master)
+
+    def test_port_cdc(self):
+        # Verify that correct clock domain is being used.
+        dut = CrossbarDUT()
+        port = dut.crossbar.get_port(clock_domain="other")
+        self.assertEqual(port.clock_domain, "other")
+
+    def test_stress_cdc(self):
+        # Verify communication when ports are in different clock domains.
+        dut = CrossbarDUT()
+        clocks = {
+            "sys": 10,
+            "clk1": (7, 4),
+            "clk2": 12,
+        }
+        master_clocks = ["sys", "clk1", "clk2"]
+        ports = [dut.crossbar.get_port(clock_domain=clk) for clk in master_clocks]
+        produced, consumed, consumed_all = self.crossbar_stress_test(
+            dut, ports, n_banks=4, n_ops=6, clocks=clocks)
+        for master in produced.keys():
+            self.assertEqual(consumed[master], produced[master], msg="master = %d" % master)
+
+    def test_port_mode(self):
+        # Verify that ports in different modes can be requested.
+        dut = CrossbarDUT()
+        for mode in ["both", "write", "read"]:
+            port = dut.crossbar.get_port(mode=mode)
+            self.assertEqual(port.mode, mode)
+
+    # NOTE: Stress testing with different data widths would require complicating
+    # the logic a lot to support registering data comming in multiple words (in
+    # data_handler), address shifting and recreation of packets. Because of this,
+    # and because data width converters are tested separately in test_adaptation,
+    # here we only test if ports report correct data widths.
+    def test_port_data_width_conversion(self):
+        # Verify that correct port data widths are being used.
+        dut         = CrossbarDUT()
+        dw          = dut.interface.data_width
+        data_widths = [dw*2, dw, dw//2]
+        modes       = ["both", "write", "read"]
+        for mode, data_width in itertools.product(modes, data_widths):
+            with self.subTest(mode=mode, data_width=data_width):
+                # Up conversion is supported only for single direction ports
+                if mode == "both" and data_width < dut.interface.data_width:
+                    with self.assertRaises(NotImplementedError):
+                        dut.crossbar.get_port(mode=mode, data_width=data_width)
+                else:
+                    port = dut.crossbar.get_port(mode=mode, data_width=data_width)
+                    self.assertEqual(port.data_width, data_width)
diff --git a/test/test_dma.py b/test/test_dma.py
new file mode 100644 (file)
index 0000000..138a13a
--- /dev/null
@@ -0,0 +1,174 @@
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import unittest
+
+from migen import *
+
+from litex.gen.sim import *
+
+from litedram.common import *
+from litedram.frontend.dma import *
+
+from test.common import *
+
+
+class DMAWriterDriver:
+    def __init__(self, dma):
+        self.dma = dma
+
+    def write(self, pattern):
+        yield self.dma.sink.valid.eq(1)
+        for adr, data in pattern:
+            yield self.dma.sink.address.eq(adr)
+            yield self.dma.sink.data.eq(data)
+            while not (yield self.dma.sink.ready):
+                yield
+            yield
+        yield self.dma.sink.valid.eq(0)
+
+    @staticmethod
+    def wait_complete(port, n):
+        for _ in range(n):
+            while not (yield port.wdata.ready):
+                yield
+            yield
+
+
+class DMAReaderDriver:
+    def __init__(self, dma):
+        self.dma  = dma
+        self.data = []
+
+    def read(self, address_list):
+        n_last = len(self.data)
+        yield self.dma.sink.valid.eq(1)
+        for adr in address_list:
+            yield self.dma.sink.address.eq(adr)
+            while not (yield self.dma.sink.ready):
+                yield
+            while (yield self.dma.sink.ready):
+                yield
+        yield self.dma.sink.valid.eq(0)
+        while len(self.data) < n_last + len(address_list):
+            yield
+
+    @passive
+    def read_handler(self):
+        yield self.dma.source.ready.eq(1)
+        while True:
+            if (yield self.dma.source.valid):
+                self.data.append((yield self.dma.source.data))
+            yield
+
+
+class TestDMA(MemoryTestDataMixin, unittest.TestCase):
+
+    # LiteDRAMDMAWriter ----------------------------------------------------------------------------
+
+    def dma_writer_test(self, pattern, mem_expected, data_width, **kwargs):
+        class DUT(Module):
+            def __init__(self):
+                self.port = LiteDRAMNativeWritePort(address_width=32, data_width=data_width)
+                self.submodules.dma = LiteDRAMDMAWriter(self.port, **kwargs)
+
+        dut = DUT()
+        driver = DMAWriterDriver(dut.dma)
+        mem = DRAMMemory(data_width, len(mem_expected))
+
+        generators = [
+            driver.write(pattern),
+            driver.wait_complete(dut.port, len(pattern)),
+            mem.write_handler(dut.port),
+        ]
+        run_simulation(dut, generators)
+        self.assertEqual(mem.mem, mem_expected)
+
+    def test_dma_writer_single(self):
+        # Verify DMAWriter with a single 32-bit data.
+        pattern            = [(0x04, 0xdeadc0de)]
+        mem_expected       = [0] * 32
+        mem_expected[0x04] = 0xdeadc0de
+        self.dma_writer_test(pattern, mem_expected, data_width=32)
+
+    def test_dma_writer_multiple(self):
+        # Verify DMAWriter with multiple 32-bit datas.
+        data = self.pattern_test_data["32bit"]
+        self.dma_writer_test(data["pattern"], data["expected"], data_width=32)
+
+    def test_dma_writer_sequential(self):
+        # Verify DMAWriter with sequential 32-bit datas.
+        data = self.pattern_test_data["32bit_sequential"]
+        self.dma_writer_test(data["pattern"], data["expected"], data_width=32)
+
+    def test_dma_writer_long_sequential(self):
+        # Verify DMAWriter with long sequential 32-bit datas.
+        data = self.pattern_test_data["32bit_long_sequential"]
+        self.dma_writer_test(data["pattern"], data["expected"], data_width=32)
+
+    def test_dma_writer_no_fifo(self):
+        # Verify DMAWriter without FIFO.
+        data = self.pattern_test_data["32bit_long_sequential"]
+        self.dma_writer_test(data["pattern"], data["expected"], data_width=32, fifo_depth=1)
+
+    def test_dma_writer_fifo_buffered(self):
+        # Verify DMAWriter with a buffered FIFO.
+        data = self.pattern_test_data["32bit_long_sequential"]
+        self.dma_writer_test(data["pattern"], data["expected"], data_width=32, fifo_buffered=True)
+
+    def test_dma_writer_duplicates(self):
+        # Verify DMAWriter with a duplicate addresses.
+        data = self.pattern_test_data["32bit_duplicates"]
+        self.dma_writer_test(data["pattern"], data["expected"], data_width=32)
+
+    # LiteDRAMDMAReader ----------------------------------------------------------------------------
+
+    def dma_reader_test(self, pattern, mem_expected, data_width, **kwargs):
+        class DUT(Module):
+            def __init__(self):
+                self.port = LiteDRAMNativeReadPort(address_width=32, data_width=data_width)
+                self.submodules.dma = LiteDRAMDMAReader(self.port, **kwargs)
+
+        dut    = DUT()
+        driver = DMAReaderDriver(dut.dma)
+        mem    = DRAMMemory(data_width, len(mem_expected), init=mem_expected)
+
+        generators = [
+            driver.read([adr for adr, data in pattern]),
+            driver.read_handler(),
+            mem.read_handler(dut.port),
+        ]
+        run_simulation(dut, generators)
+        self.assertEqual(driver.data, [data for adr, data in pattern])
+
+    def test_dma_reader_single(self):
+        # Verify DMAReader with a single 32-bit data.
+        pattern            = [(0x04, 0xdeadc0de)]
+        mem_expected       = [0] * 32
+        mem_expected[0x04] = 0xdeadc0de
+        self.dma_reader_test(pattern, mem_expected, data_width=32)
+
+    def test_dma_reader_multiple(self):
+        # Verify DMAReader with multiple 32-bit datas.
+        data = self.pattern_test_data["32bit"]
+        self.dma_reader_test(data["pattern"], data["expected"], data_width=32)
+
+    def test_dma_reader_sequential(self):
+        # Verify DMAReader with sequential 32-bit datas.
+        data = self.pattern_test_data["32bit_sequential"]
+        self.dma_reader_test(data["pattern"], data["expected"], data_width=32)
+
+    def test_dma_reader_long_sequential(self):
+        # Verify DMAReader with long sequential 32-bit datas.
+        data = self.pattern_test_data["32bit_long_sequential"]
+        self.dma_reader_test(data["pattern"], data["expected"], data_width=32)
+
+    def test_dma_reader_no_fifo(self):
+        # Verify DMAReader without FIFO.
+        data = self.pattern_test_data["32bit_long_sequential"]
+        self.dma_reader_test(data["pattern"], data["expected"], data_width=32, fifo_depth=1)
+
+    def test_dma_reader_fifo_buffered(self):
+        # Verify DMAReader with a buffered FIFO.
+        data = self.pattern_test_data["32bit_long_sequential"]
+        self.dma_reader_test(data["pattern"], data["expected"], data_width=32, fifo_buffered=True)
diff --git a/test/test_ecc.py b/test/test_ecc.py
new file mode 100644 (file)
index 0000000..2da84da
--- /dev/null
@@ -0,0 +1,321 @@
+# This file is Copyright (c) 2018-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import unittest
+import random
+
+from migen import *
+
+from litedram.common import *
+from litedram.frontend.ecc import *
+
+from litex.gen.sim import *
+from litex.soc.cores.ecc import *
+
+from test.common import *
+
+# Helpers ------------------------------------------------------------------------------------------
+
+def bits(value, width=32):
+    # Convert int to a string representing binary value and reverse it so that we can index bits
+    # easily with s[0] being LSB
+    return f"{value:0{width}b}"[::-1]
+
+def frombits(bits):
+    # Reverse of bits()
+    return int(bits[::-1], 2)
+
+def bits_pp(value, width=32):
+    # Pretty print binary value, groupped by bytes
+    if isinstance(value, str):
+        value = frombits(value)
+    s = f"{value:0{width}b}"
+    byte_chunks = [s[i:i+8] for i in range(0, len(s), 8)]
+    return "0b " + " ".join(byte_chunks)
+
+def extract_ecc_data(data_width, codeword_width, codeword_bits):
+    extracted = ""
+    for i in range(8):
+        word = codeword_bits[codeword_width*i:codeword_width*(i+1)]
+        # Remove parity bit
+        word = word[1:]
+        data_pos = compute_data_positions(codeword_width - 1)  # -1 for parity
+        # Extract data bits
+        word_ex = list(bits(0, 32))
+        for j, d in enumerate(data_pos):
+            word_ex[j] = word[d-1]
+        word_ex = "".join(word_ex)
+        extracted += word_ex
+    return extracted
+
+# TestECC ------------------------------------------------------------------------------------------
+
+class TestECC(unittest.TestCase):
+    def test_eccw_connected(self):
+        # Verify LiteDRAMNativePortECCW ECC encoding.
+        class DUT(Module):
+            def __init__(self):
+                eccw = LiteDRAMNativePortECCW(data_width_from=32*8, data_width_to=39*8)
+                self.submodules.eccw = eccw
+
+        def main_generator(dut):
+            sink_data = seed_to_data(0, nbits=32*8)
+            yield dut.eccw.sink.data.eq(sink_data)
+            yield
+            source_data = (yield dut.eccw.source.data)
+
+            sink_data_bits   = bits(sink_data,   32*8)
+            source_data_bits = bits(source_data, 39*8)
+            self.assertNotEqual(sink_data_bits, source_data_bits[:len(sink_data_bits)])
+
+            source_extracted = extract_ecc_data(32, 39, source_data_bits)
+            # Assert each word separately for more readable assert messages
+            for i in range(8):
+                word = slice(32*i, 32*(i+1))
+                self.assertEqual(bits_pp(source_extracted[word]), bits_pp(sink_data_bits[word]),
+                    msg=f"Mismatch at i = {i}")
+
+        dut = DUT()
+        run_simulation(dut, main_generator(dut))
+
+    def test_eccw_we_enabled(self):
+        # Verify LiteDRAMNativePortECCW always set bytes enable.
+        class DUT(Module):
+            def __init__(self):
+                eccw = LiteDRAMNativePortECCW(data_width_from=32*8, data_width_to=39*8)
+                self.submodules.eccw = eccw
+
+        def main_generator(dut):
+            yield
+            source_we = (yield dut.eccw.source.we)
+
+            self.assertEqual(bits_pp(source_we, 39//8), bits_pp(2**len(dut.eccw.source.we) - 1))
+
+        dut = DUT()
+        run_simulation(dut, main_generator(dut))
+
+    def test_eccr_connected(self):
+        # Verify LiteDRAMNativePortECCR ECC decoding.
+        class DUT(Module):
+            def __init__(self):
+                eccr = LiteDRAMNativePortECCR(data_width_from=32*8, data_width_to=39*8)
+                self.submodules.eccr = eccr
+
+        def main_generator(dut):
+            sink_data = seed_to_data(0, nbits=(39*8 // 32 + 1) * 32)
+
+            yield dut.eccr.sink.data.eq(sink_data)
+            yield
+            source_data = (yield dut.eccr.source.data)
+
+            sink_data_bits   = bits(sink_data, 39*8)
+            source_data_bits = bits(source_data, 32*8)
+            self.assertNotEqual(sink_data_bits[:len(source_data_bits)], source_data_bits)
+
+            sink_extracted = extract_ecc_data(32, 39, sink_data_bits)
+            self.assertEqual(bits_pp(sink_extracted), bits_pp(source_data_bits))
+            # Assert each word separately for more readable assert messages
+            for i in range(8):
+                word = slice(32*i, 32*(i+1))
+                self.assertEqual(bits_pp(sink_extracted[word]), bits_pp(source_data_bits[word]),
+                                 msg=f"Mismatch at i = {i}")
+
+        dut = DUT()
+        run_simulation(dut, main_generator(dut))
+
+    def test_eccr_errors_connected_when_sink_valid(self):
+        # Verify LiteDRAMNativePortECCR Error detection.
+        class DUT(Module):
+            def __init__(self):
+                eccr = LiteDRAMNativePortECCR(data_width_from=32*8, data_width_to=39*8)
+                self.submodules.eccr = eccr
+
+        def main_generator(dut):
+            yield dut.eccr.enable.eq(1)
+            yield dut.eccr.sink.data.eq(0b10)  # Wrong parity bit
+            yield
+            # Verify no errors are detected
+            self.assertEqual((yield dut.eccr.sec), 0)
+            self.assertEqual((yield dut.eccr.ded), 0)
+            # Set sink.valid and verify errors parity error is detected
+            yield dut.eccr.sink.valid.eq(1)
+            yield
+            self.assertEqual((yield dut.eccr.sec), 1)
+            self.assertEqual((yield dut.eccr.ded), 0)
+
+        dut = DUT()
+        run_simulation(dut, main_generator(dut))
+
+    def ecc_encode_decode_test(self, from_width, to_width, n, pre=None, post=None, **kwargs):
+        """ECC encoding/decoding generic test."""
+        class DUT(Module):
+            def __init__(self):
+                self.port_from = LiteDRAMNativePort("both", 24, from_width)
+                self.port_to   = LiteDRAMNativePort("both", 24, to_width)
+                self.submodules.ecc = LiteDRAMNativePortECC(self.port_from, self.port_to, **kwargs)
+                self.mem = DRAMMemory(to_width, n)
+
+                self.wdata = [seed_to_data(i, nbits=from_width) for i in range(n)]
+                self.rdata = []
+
+        def main_generator(dut):
+            if pre is not None:
+                yield from pre(dut)
+
+            port = dut.port_from
+
+            # Write
+            for i in range(n):
+                yield port.cmd.valid.eq(1)
+                yield port.cmd.we.eq(1)
+                yield port.cmd.addr.eq(i)
+                yield
+                while (yield port.cmd.ready) == 0:
+                    yield
+                yield port.cmd.valid.eq(0)
+                yield
+                yield port.wdata.valid.eq(1)
+                yield port.wdata.data.eq(dut.wdata[i])
+                yield
+                while (yield port.wdata.ready) == 0:
+                    yield
+                yield port.wdata.valid.eq(0)
+                yield
+
+            # Read
+            for i in range(n):
+                yield port.cmd.valid.eq(1)
+                yield port.cmd.we.eq(0)
+                yield port.cmd.addr.eq(i)
+                yield
+                while (yield port.cmd.ready) == 0:
+                    yield
+                yield port.cmd.valid.eq(0)
+                yield
+                while (yield port.rdata.valid) == 0:
+                    yield
+                dut.rdata.append((yield port.rdata.data))
+                yield port.rdata.ready.eq(1)
+                yield
+                yield port.rdata.ready.eq(0)
+                yield
+
+            if post is not None:
+                yield from post(dut)
+
+        dut = DUT()
+        generators = [
+            main_generator(dut),
+            dut.mem.write_handler(dut.port_to),
+            dut.mem.read_handler(dut.port_to),
+        ]
+        run_simulation(dut, generators)
+        return dut
+
+    def test_ecc_32_7(self):
+        # Verify encoding/decoding on 32 data bits + 6 code bits + parity bit.
+        dut = self.ecc_encode_decode_test(32*8, 39*8, 2)
+        self.assertEqual(dut.wdata, dut.rdata)
+
+    def test_ecc_64_8(self):
+        # Verify encoding/decoding on 64 data bits + 7 code bits + parity bit.
+        dut = self.ecc_encode_decode_test(64*8, 72*8, 2)
+        self.assertEqual(dut.wdata, dut.rdata)
+
+    def test_ecc_sec_errors(self):
+        # Verify SEC errors detection/correction with 1-bit flip.
+        def pre(dut):
+            yield from dut.ecc.flip.write(0b00000100)
+
+        def post(dut):
+            dut.sec_errors = (yield from dut.ecc.sec_errors.read())
+            dut.ded_errors = (yield from dut.ecc.ded_errors.read())
+
+        dut = self.ecc_encode_decode_test(8*8, 13*8, 4, pre, post, with_error_injection=True)
+        self.assertEqual(dut.wdata, dut.rdata)
+        self.assertEqual(dut.sec_errors, 4)
+        self.assertEqual(dut.ded_errors, 0)
+
+    def test_ecc_ded_errors(self):
+        # Verify DED errors detection with 2-bit flip.
+        def pre(dut):
+            yield from dut.ecc.flip.write(0b00001100)
+
+        def post(dut):
+            dut.sec_errors = (yield from dut.ecc.sec_errors.read())
+            dut.ded_errors = (yield from dut.ecc.ded_errors.read())
+
+        dut = self.ecc_encode_decode_test(8*8, 13*8, 4, pre, post, with_error_injection=True)
+        self.assertNotEqual(dut.wdata, dut.rdata)
+        self.assertEqual(dut.sec_errors, 0)
+        self.assertEqual(dut.ded_errors, 4)
+
+    def test_ecc_decoder_disable(self):
+        # Verify enable control.
+        def pre(dut):
+            yield from dut.ecc.flip.write(0b10101100)
+            yield from dut.ecc.enable.write(0)
+
+        def post(dut):
+            dut.sec_errors = (yield from dut.ecc.sec_errors.read())
+            dut.ded_errors = (yield from dut.ecc.ded_errors.read())
+
+        dut = self.ecc_encode_decode_test(8*8, 13*8, 4, pre, post, with_error_injection=True)
+        self.assertNotEqual(dut.wdata, dut.rdata)
+        self.assertEqual(dut.sec_errors, 0)
+        self.assertEqual(dut.ded_errors, 0)
+
+    def test_ecc_clear_sec_errors(self):
+        # Verify SEC errors clear.
+        def pre(dut):
+            yield from dut.ecc.flip.write(0b00000100)
+
+        def post(dut):
+            # Read errors after test (SEC errors expected)
+            dut.sec_errors = (yield from dut.ecc.sec_errors.read())
+            dut.ded_errors = (yield from dut.ecc.ded_errors.read())
+
+            # Clear errors counters
+            yield from dut.ecc.clear.write(1)
+            yield
+
+            # Re-Read errors to verify clear
+            dut.sec_errors_c = (yield from dut.ecc.sec_errors.read())
+            dut.ded_errors_c = (yield from dut.ecc.ded_errors.read())
+
+        dut = self.ecc_encode_decode_test(8*8, 13*8, 4, pre, post, with_error_injection=True)
+        self.assertEqual(dut.wdata, dut.rdata)
+        self.assertNotEqual(dut.sec_errors, 0)
+        self.assertEqual(dut.ded_errors, 0)
+        self.assertEqual(dut.sec_errors_c, 0)
+        self.assertEqual(dut.ded_errors_c, 0)
+
+    def test_ecc_clear_ded_errors(self):
+        # Verify DED errors clear.
+        def pre(dut):
+            yield from dut.ecc.flip.write(0b10101100)
+
+        def post(dut):
+            # Read errors after test (DED errors expected)
+            dut.sec_errors = (yield from dut.ecc.sec_errors.read())
+            dut.ded_errors = (yield from dut.ecc.ded_errors.read())
+
+            # Clear errors counters
+            yield from dut.ecc.clear.write(1)
+            yield
+
+            # Re-Read errors to verify clear
+            dut.sec_errors_c = (yield from dut.ecc.sec_errors.read())
+            dut.ded_errors_c = (yield from dut.ecc.ded_errors.read())
+
+        dut = self.ecc_encode_decode_test(8*8, 13*8, 4, pre, post, with_error_injection=True)
+        self.assertNotEqual(dut.wdata, dut.rdata)
+        self.assertEqual(dut.sec_errors, 0)
+        self.assertNotEqual(dut.ded_errors, 0)
+        self.assertEqual(dut.sec_errors_c, 0)
+        self.assertEqual(dut.ded_errors_c, 0)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/test/test_examples.py b/test/test_examples.py
new file mode 100644 (file)
index 0000000..54cba0f
--- /dev/null
@@ -0,0 +1,28 @@
+# This file is Copyright (c) 2018-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+import unittest
+import os
+
+
+def build_config(name):
+    errors = 0
+    os.system("rm -rf examples/build")
+    os.system("cd examples && python3 ../litedram/gen.py {}.yml".format(name))
+    errors += not os.path.isfile("examples/build/gateware/litedram_core.v")
+    os.system("rm -rf examples/build")
+    return errors
+
+
+class TestExamples(unittest.TestCase):
+    def test_arty(self):
+        errors = build_config("arty")
+        self.assertEqual(errors, 0)
+
+    def test_nexys4ddr(self):
+        errors = build_config("nexys4ddr")
+        self.assertEqual(errors, 0)
+
+    def test_genesys2(self):
+        errors = build_config("genesys2")
+        self.assertEqual(errors, 0)
diff --git a/test/test_fifo.py b/test/test_fifo.py
new file mode 100644 (file)
index 0000000..94ef92b
--- /dev/null
@@ -0,0 +1,304 @@
+# This file is Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import unittest
+import random
+
+from migen import *
+
+from litex.soc.interconnect.stream import *
+
+from litedram.common import LiteDRAMNativeWritePort
+from litedram.common import LiteDRAMNativeReadPort
+from litedram.frontend.fifo import LiteDRAMFIFO, _LiteDRAMFIFOCtrl
+from litedram.frontend.fifo import _LiteDRAMFIFOWriter, _LiteDRAMFIFOReader
+
+from test.common import *
+
+class TestFIFO(unittest.TestCase):
+    @passive
+    def fifo_ctrl_flag_checker(self, fifo_ctrl, write_threshold, read_threshold):
+        # Checks the combinational logic
+        while True:
+            level = (yield fifo_ctrl.level)
+            self.assertEqual((yield fifo_ctrl.writable), level < write_threshold)
+            self.assertEqual((yield fifo_ctrl.readable), level > read_threshold)
+            yield
+
+    # _LiteDRAMFIFOCtrl ----------------------------------------------------------------------------
+
+    def test_fifo_ctrl_address_changes(self):
+        # Verify FIFOCtrl address changes.
+        # We are ignoring thresholds (so readable/writable signals)
+        dut = _LiteDRAMFIFOCtrl(base=0, depth=16, read_threshold=0, write_threshold=16)
+
+        def main_generator():
+            self.assertEqual((yield dut.write_address), 0)
+            self.assertEqual((yield dut.read_address), 0)
+
+            # Write address
+            yield dut.write.eq(1)
+            yield
+            # Write_address gets updated 1 cycle later
+            for i in range(24 - 1):
+                self.assertEqual((yield dut.write_address), i % 16)
+                yield
+            yield dut.write.eq(0)
+            yield
+            self.assertEqual((yield dut.write_address), 24 % 16)
+
+            # Read address
+            yield dut.read.eq(1)
+            yield
+            for i in range(24 - 1):
+                self.assertEqual((yield dut.read_address), i % 16)
+                yield
+            yield dut.read.eq(0)
+            yield
+            self.assertEqual((yield dut.read_address), 24 % 16)
+
+        generators = [
+            main_generator(),
+            self.fifo_ctrl_flag_checker(dut, write_threshold=16, read_threshold=0),
+        ]
+        run_simulation(dut, generators)
+
+    def test_fifo_ctrl_level_changes(self):
+        # Verify FIFOCtrl level changes.
+        dut = _LiteDRAMFIFOCtrl(base=0, depth=16, read_threshold=0, write_threshold=16)
+
+        def main_generator():
+            self.assertEqual((yield dut.level), 0)
+
+            # Level
+            def check_level_diff(write, read, diff):
+                level = (yield dut.level)
+                yield dut.write.eq(write)
+                yield dut.read.eq(read)
+                yield
+                yield dut.write.eq(0)
+                yield dut.read.eq(0)
+                yield
+                self.assertEqual((yield dut.level), level + diff)
+
+            check_level_diff(write=1, read=0, diff=+1)
+            check_level_diff(write=1, read=0, diff=+1)
+            check_level_diff(write=1, read=1, diff=+0)
+            check_level_diff(write=1, read=1, diff=+0)
+            check_level_diff(write=0, read=1, diff=-1)
+            check_level_diff(write=0, read=1, diff=-1)
+
+        generators = [
+            main_generator(),
+            self.fifo_ctrl_flag_checker(dut, write_threshold=16, read_threshold=0),
+        ]
+        run_simulation(dut, generators)
+
+    # _LiteDRAMFIFOWriter --------------------------------------------------------------------------
+
+    def fifo_writer_test(self, depth, sequence_len, write_threshold):
+        class DUT(Module):
+            def __init__(self):
+                self.port = LiteDRAMNativeWritePort(address_width=32, data_width=32)
+                ctrl = _LiteDRAMFIFOCtrl(base=8, depth=depth,
+                    read_threshold  = 0,
+                    write_threshold = write_threshold)
+                self.submodules.ctrl = ctrl
+                writer = _LiteDRAMFIFOWriter(data_width=32, port=self.port, ctrl=ctrl)
+                self.submodules.writer = writer
+
+                self.memory = DRAMMemory(32, 128)
+                assert 8 + sequence_len <= len(self.memory.mem)
+
+        write_data = [seed_to_data(i) for i in range(sequence_len)]
+
+        def generator(dut):
+            for data in write_data:
+                yield dut.writer.sink.valid.eq(1)
+                yield dut.writer.sink.data.eq(data)
+                yield
+                while (yield dut.writer.sink.ready) == 0:
+                    yield
+                yield dut.writer.sink.valid.eq(0)
+
+            for _ in range(16):
+                yield
+
+        dut = DUT()
+        generators = [
+            generator(dut),
+            dut.memory.write_handler(dut.port),
+            self.fifo_ctrl_flag_checker(dut.ctrl,
+                write_threshold = write_threshold,
+                read_threshold  = 0),
+            timeout_generator(1500),
+        ]
+        run_simulation(dut, generators)
+
+        mem_expected = [0] * len(dut.memory.mem)
+        for i, data in enumerate(write_data):
+            mem_expected[8 + i%depth] = data
+        self.assertEqual(dut.memory.mem, mem_expected)
+
+    def test_fifo_writer_sequence(self):
+        # Verify simple FIFOWriter sequence.
+        self.fifo_writer_test(sequence_len=48, depth=64, write_threshold=64)
+
+    def test_fifo_writer_address_wraps(self):
+        # Verify FIFOWriter sequence with address wraps.
+        self.fifo_writer_test(sequence_len=48, depth=32, write_threshold=64)
+
+    def test_fifo_writer_stops_after_threshold(self):
+        # Verify FIFOWriter sequence with stop after threshold is reached.
+        with self.assertRaises(TimeoutError):
+            self.fifo_writer_test(sequence_len=48, depth=32, write_threshold=32)
+
+    # _LiteDRAMFIFOReader --------------------------------------------------------------------------
+
+    def fifo_reader_test(self, depth, sequence_len, read_threshold, inital_writes=0):
+        memory_data = [seed_to_data(i) for i in range(128)]
+        read_data   = []
+
+        class DUT(Module):
+            def __init__(self):
+                self.port = LiteDRAMNativeReadPort(address_width=32, data_width=32)
+                ctrl = _LiteDRAMFIFOCtrl(base=8, depth=depth,
+                    read_threshold  = read_threshold,
+                    write_threshold = depth)
+                reader = _LiteDRAMFIFOReader(data_width=32, port=self.port, ctrl=ctrl)
+                self.submodules.ctrl = ctrl
+                self.submodules.reader = reader
+
+                self.memory = DRAMMemory(32, len(memory_data), init=memory_data)
+                assert 8 + sequence_len <= len(self.memory.mem)
+
+        def reader(dut):
+            # Fake writing to fifo
+            yield dut.ctrl.write.eq(1)
+            for _ in range(inital_writes):
+                yield
+            yield dut.ctrl.write.eq(0)
+            yield
+
+            for _ in range(sequence_len):
+                # Fake single write
+                yield dut.ctrl.write.eq(1)
+                yield
+                yield dut.ctrl.write.eq(0)
+
+                while (yield dut.reader.source.valid) == 0:
+                    yield
+                read_data.append((yield dut.reader.source.data))
+                yield dut.reader.source.ready.eq(1)
+                yield
+                yield dut.reader.source.ready.eq(0)
+                yield
+
+        dut = DUT()
+        generators = [
+            reader(dut),
+            dut.memory.read_handler(dut.port),
+            self.fifo_ctrl_flag_checker(dut.ctrl,
+                write_threshold = depth,
+                read_threshold  = read_threshold),
+            timeout_generator(1500),
+        ]
+        run_simulation(dut, generators)
+
+        read_data_expected = [memory_data[8 + i%depth] for i in range(sequence_len)]
+        self.assertEqual(read_data, read_data_expected)
+
+    def test_fifo_reader_sequence(self):
+        # Verify simple FIFOReader sequence.
+        self.fifo_reader_test(sequence_len=48, depth=64, read_threshold=0)
+
+    def test_fifo_reader_address_wraps(self):
+        # Verify FIFOReader sequence with address wraps.
+        self.fifo_reader_test(sequence_len=48, depth=32, read_threshold=0)
+
+    def test_fifo_reader_requires_threshold(self):
+        # Verify FIFOReader sequence with start after threshold is reached.
+        with self.assertRaises(TimeoutError):
+            self.fifo_reader_test(sequence_len=48, depth=32, read_threshold=8)
+        # Will work after we perform the initial writes
+        self.fifo_reader_test(sequence_len=48, depth=32, read_threshold=8, inital_writes=8)
+
+    # LiteDRAMFIFO ---------------------------------------------------------------------------------
+
+    def test_fifo_default_thresholds(self):
+        # Verify FIFO with default threshold.
+        # Defaults: read_threshold=0, write_threshold=depth
+        read_threshold, write_threshold = (0, 128)
+        write_port = LiteDRAMNativeWritePort(address_width=32, data_width=32)
+        read_port  = LiteDRAMNativeReadPort(address_width=32,  data_width=32)
+        fifo = LiteDRAMFIFO(data_width=32, base=0, depth=write_threshold,
+            write_port = write_port,
+            read_port  = read_port)
+
+        def generator():
+            yield write_port.cmd.ready.eq(1)
+            yield write_port.wdata.ready.eq(1)
+            for i in range(write_threshold):
+                yield fifo.sink.valid.eq(1)
+                yield fifo.sink.data.eq(0)
+                yield
+                while (yield fifo.sink.ready) == 0:
+                    yield
+            yield
+
+        checker = self.fifo_ctrl_flag_checker(fifo.ctrl, write_threshold, read_threshold)
+        run_simulation(fifo, [generator(), checker])
+
+    def test_fifo(self):
+        # Verify FIFO.
+        class DUT(Module):
+            def __init__(self):
+                self.write_port = LiteDRAMNativeWritePort(address_width=32, data_width=32)
+                self.read_port  = LiteDRAMNativeReadPort(address_width=32,  data_width=32)
+                self.submodules.fifo = LiteDRAMFIFO(
+                    data_width          = 32,
+                    depth               = 32,
+                    base                = 16,
+                    write_port          = self.write_port,
+                    read_port           = self.read_port,
+                    read_threshold      = 8,
+                    write_threshold     = 32 - 8
+                )
+
+                self.memory = DRAMMemory(32, 128)
+
+        def generator(dut, valid_random=90):
+            prng = random.Random(42)
+            # We need 8 more writes to account for read_threshold=8
+            for i in range(64 + 8):
+                while prng.randrange(100) < valid_random:
+                    yield
+                yield dut.fifo.sink.valid.eq(1)
+                yield dut.fifo.sink.data.eq(i)
+                yield
+                while (yield dut.fifo.sink.ready) != 1:
+                    yield
+                yield dut.fifo.sink.valid.eq(0)
+
+        def checker(dut, ready_random=90):
+            prng = random.Random(42)
+            for i in range(64):
+                yield dut.fifo.source.ready.eq(0)
+                yield
+                while (yield dut.fifo.source.valid) != 1:
+                    yield
+                while prng.randrange(100) < ready_random:
+                    yield
+                yield dut.fifo.source.ready.eq(1)
+                self.assertEqual((yield dut.fifo.source.data), i)
+                yield
+
+        dut = DUT()
+        generators = [
+            generator(dut),
+            checker(dut),
+            dut.memory.write_handler(dut.write_port),
+            dut.memory.read_handler(dut.read_port)
+        ]
+        run_simulation(dut, generators)
diff --git a/test/test_init.py b/test/test_init.py
new file mode 100644 (file)
index 0000000..c956871
--- /dev/null
@@ -0,0 +1,43 @@
+# This file is Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+import os
+import filecmp
+import unittest
+
+from litex.build.tools import write_to_file
+
+from litedram.init import get_sdram_phy_c_header, get_sdram_phy_py_header
+
+
+def compare_with_reference(content, filename):
+    write_to_file(filename, content)
+    r = filecmp.cmp(filename, os.path.join("test", "reference", filename))
+    os.remove(filename)
+    return r
+
+
+class TestInit(unittest.TestCase):
+    def test_sdr(self):
+        from litex.boards.targets.minispartan6 import BaseSoC
+        soc       = BaseSoC()
+        c_header  = get_sdram_phy_c_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
+        py_header = get_sdram_phy_py_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
+        self.assertEqual(compare_with_reference(c_header, "sdr_init.h"), True)
+        self.assertEqual(compare_with_reference(py_header, "sdr_init.py"), True)
+
+    def test_ddr3(self):
+        from litex.boards.targets.kc705 import BaseSoC
+        soc       = BaseSoC()
+        c_header  = get_sdram_phy_c_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
+        py_header = get_sdram_phy_py_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
+        self.assertEqual(compare_with_reference(c_header, "ddr3_init.h"), True)
+        self.assertEqual(compare_with_reference(py_header, "ddr3_init.py"), True)
+
+    def test_ddr4(self):
+        from litex.boards.targets.kcu105 import BaseSoC
+        soc       = BaseSoC(max_sdram_size=0x4000000)
+        c_header  = get_sdram_phy_c_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
+        py_header = get_sdram_phy_py_header(soc.sdram.controller.settings.phy, soc.sdram.controller.settings.timing)
+        self.assertEqual(compare_with_reference(c_header, "ddr4_init.h"), True)
+        self.assertEqual(compare_with_reference(py_header, "ddr4_init.py"), True)
diff --git a/test/test_modules.py b/test/test_modules.py
new file mode 100644 (file)
index 0000000..df37734
--- /dev/null
@@ -0,0 +1,170 @@
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import os
+import csv
+import unittest
+
+import litedram.modules
+from litedram.modules import SDRAMModule, DDR3SPDData
+
+
+def load_spd_reference(filename):
+    """Load reference SPD data from a CSV file
+
+    Micron reference SPD data can be obtained from:
+    https://www.micron.com/support/tools-and-utilities/serial-presence-detect
+    """
+    script_dir = os.path.dirname(os.path.realpath(__file__))
+    path = os.path.join(script_dir, "spd_data", filename)
+    data = [0] * 256
+    with open(path) as f:
+        reader = csv.DictReader(f)
+        for row in reader:
+            address = row["Byte Number"]
+            value = row["Byte Value"]
+            # Ignore ranges (data we care about is specified per byte anyway)
+            if len(address.split("-")) == 1:
+                data[int(address)] = int(value, 16)
+    return data
+
+
+class TestSPD(unittest.TestCase):
+    def test_tck_to_speedgrade(self):
+        # Verify that speedgrade transfer rates are calculated correctly from tck
+        tck_to_speedgrade = {
+            2.5:    800,
+            1.875: 1066,
+            1.5:   1333,
+            1.25:  1600,
+            1.071: 1866,
+            0.938: 2133,
+        }
+        for tck, speedgrade in tck_to_speedgrade.items():
+            self.assertEqual(speedgrade, DDR3SPDData.speedgrade_freq(tck))
+
+    def compare_geometry(self, module, module_ref):
+        self.assertEqual(module.nbanks, module_ref.nbanks)
+        self.assertEqual(module.nrows, module_ref.nrows)
+        self.assertEqual(module.ncols, module_ref.ncols)
+
+    def compare_technology_timings(self, module, module_ref, omit=None):
+        timings = {"tREFI", "tWTR", "tCCD", "tRRD", "tZQCS"}
+        if omit is not None:
+            timings -= omit
+        for timing in timings:
+            txx = getattr(module.technology_timings, timing)
+            txx_ref = getattr(module_ref.technology_timings, timing)
+            with self.subTest(txx=timing):
+                self.assertEqual(txx, txx_ref)
+
+    def compare_speedgrade_timings(self, module, module_ref, omit=None):
+        timings = {"tRP", "tRCD", "tWR", "tRFC", "tFAW", "tRAS"}
+        if omit is not None:
+            timings -= omit
+        for freq, speedgrade_timings in module.speedgrade_timings.items():
+            if freq == "default":
+                continue
+            for timing in timings:
+                txx = getattr(speedgrade_timings, timing)
+                txx_ref = getattr(module_ref.speedgrade_timings[freq], timing)
+                with self.subTest(freq=freq, txx=timing):
+                    self.assertEqual(txx, txx_ref)
+
+    def compare_modules(self, module, module_ref, omit=None):
+        self.assertEqual(module.memtype, module_ref.memtype)
+        self.assertEqual(module.rate, module_ref.rate)
+        self.compare_geometry(module, module_ref)
+        self.compare_technology_timings(module, module_ref, omit=omit)
+        self.compare_speedgrade_timings(module, module_ref, omit=omit)
+
+    def test_MT16KTF1G64HZ(self):
+        kwargs = dict(clk_freq=125e6, rate="1:4")
+        module_ref = litedram.modules.MT16KTF1G64HZ(**kwargs)
+
+        with self.subTest(speedgrade="-1G6"):
+            data = load_spd_reference("MT16KTF1G64HZ-1G6P1.csv")
+            module = SDRAMModule.from_spd_data(data, kwargs["clk_freq"])
+            self.compare_modules(module, module_ref)
+            sgt = module.speedgrade_timings["1600"]
+            self.assertEqual(sgt.tRP,            13.125)
+            self.assertEqual(sgt.tRCD,           13.125)
+            self.assertEqual(sgt.tRP + sgt.tRAS, 48.125)
+
+        with self.subTest(speedgrade="-1G9"):
+            data = load_spd_reference("MT16KTF1G64HZ-1G9E1.csv")
+            module = SDRAMModule.from_spd_data(data, kwargs["clk_freq"])
+            # tRRD it different for this speedgrade
+            self.compare_modules(module, module_ref, omit={"tRRD"})
+            self.assertEqual(module.technology_timings.tRRD, (4, 5))
+            sgt = module.speedgrade_timings["1866"]
+            self.assertEqual(sgt.tRP,            13.125)
+            self.assertEqual(sgt.tRCD,           13.125)
+            self.assertEqual(sgt.tRP + sgt.tRAS, 47.125)
+
+    def test_MT18KSF1G72HZ(self):
+        kwargs = dict(clk_freq=125e6, rate="1:4")
+        module_ref = litedram.modules.MT18KSF1G72HZ(**kwargs)
+
+        with self.subTest(speedgrade="-1G6"):
+            data = load_spd_reference("MT18KSF1G72HZ-1G6E2.csv")
+            module = SDRAMModule.from_spd_data(data, kwargs["clk_freq"])
+            self.compare_modules(module, module_ref)
+            sgt = module.speedgrade_timings["1600"]
+            self.assertEqual(sgt.tRP,            13.125)
+            self.assertEqual(sgt.tRCD,           13.125)
+            self.assertEqual(sgt.tRP + sgt.tRAS, 48.125)
+
+        with self.subTest(speedgrade="-1G4"):
+            data = load_spd_reference("MT18KSF1G72HZ-1G4E2.csv")
+            module = SDRAMModule.from_spd_data(data, kwargs["clk_freq"])
+            self.compare_modules(module, module_ref)
+            sgt = module.speedgrade_timings["1333"]
+            self.assertEqual(sgt.tRP,            13.125)
+            self.assertEqual(sgt.tRCD,           13.125)
+            self.assertEqual(sgt.tRP + sgt.tRAS, 49.125)
+
+    def test_MT8JTF12864(self):
+        kwargs = dict(clk_freq=125e6, rate="1:4")
+        module_ref = litedram.modules.MT8JTF12864(**kwargs)
+
+        data = load_spd_reference("MT8JTF12864AZ-1G4G1.csv")
+        module = SDRAMModule.from_spd_data(data, kwargs["clk_freq"])
+        self.compare_modules(module, module_ref)
+        sgt = module.speedgrade_timings["1333"]
+        self.assertEqual(sgt.tRP,            13.125)
+        self.assertEqual(sgt.tRCD,           13.125)
+        self.assertEqual(sgt.tRP + sgt.tRAS, 49.125)
+
+    def test_MT8KTF51264(self):
+        kwargs = dict(clk_freq=100e6, rate="1:4")
+        module_ref = litedram.modules.MT8KTF51264(**kwargs)
+
+        with self.subTest(speedgrade="-1G4"):
+            data = load_spd_reference("MT8KTF51264HZ-1G4E1.csv")
+            module = SDRAMModule.from_spd_data(data, kwargs["clk_freq"])
+            self.compare_modules(module, module_ref)
+            sgt = module.speedgrade_timings["1333"]
+            self.assertEqual(sgt.tRP,            13.125)
+            self.assertEqual(sgt.tRCD,           13.125)
+            self.assertEqual(sgt.tRP + sgt.tRAS, 49.125)
+
+        with self.subTest(speedgrade="-1G6"):
+            data = load_spd_reference("MT8KTF51264HZ-1G6E1.csv")
+            module = SDRAMModule.from_spd_data(data, kwargs["clk_freq"])
+            self.compare_modules(module, module_ref)
+            sgt = module.speedgrade_timings["1600"]
+            self.assertEqual(sgt.tRP,            13.125)
+            self.assertEqual(sgt.tRCD,           13.125)
+            self.assertEqual(sgt.tRP + sgt.tRAS, 48.125)
+
+        with self.subTest(speedgrade="-1G9"):
+            data = load_spd_reference("MT8KTF51264HZ-1G9P1.csv")
+            module = SDRAMModule.from_spd_data(data, kwargs["clk_freq"])
+            # tRRD different for this timing
+            self.compare_modules(module, module_ref, omit={"tRRD"})
+            self.assertEqual(module.technology_timings.tRRD, (4, 5))
+            sgt = module.speedgrade_timings["1866"]
+            self.assertEqual(sgt.tRP,            13.125)
+            self.assertEqual(sgt.tRCD,           13.125)
+            self.assertEqual(sgt.tRP + sgt.tRAS, 47.125)
diff --git a/test/test_multiplexer.py b/test/test_multiplexer.py
new file mode 100644 (file)
index 0000000..41816af
--- /dev/null
@@ -0,0 +1,580 @@
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import copy
+import random
+import unittest
+from collections import namedtuple
+
+from migen import *
+
+from litex.soc.interconnect import stream
+
+from litedram.common import *
+from litedram.phy import dfi
+from litedram.core.multiplexer import Multiplexer
+
+# load after "* imports" to avoid using Migen version of vcd.py
+from litex.gen.sim import run_simulation
+
+from test.common import timeout_generator, CmdRequestRWDriver
+
+
+def dfi_cmd_to_char(cas_n, ras_n, we_n):
+    return {
+        (1, 1, 1): "_",
+        (0, 1, 0): "w",
+        (0, 1, 1): "r",
+        (1, 0, 1): "a",
+        (1, 0, 0): "p",
+        (0, 0, 1): "f",
+    }[(cas_n, ras_n, we_n)]
+
+
+class BankMachineStub:
+    def __init__(self, babits, abits):
+        self.cmd = stream.Endpoint(cmd_request_rw_layout(a=abits, ba=babits))
+        self.refresh_req = Signal()
+        self.refresh_gnt = Signal()
+
+
+class RefresherStub:
+    def __init__(self, babits, abits):
+        self.cmd = stream.Endpoint(cmd_request_rw_layout(a=abits, ba=babits))
+
+
+class MultiplexerDUT(Module):
+    # Define default settings that can be overwritten in specific tests use only these settings
+    # that we actually need for Multiplexer.
+    default_controller_settings = dict(
+        read_time      = 32,
+        write_time     = 16,
+        with_bandwidth = False,
+    )
+    default_phy_settings = dict(
+        nphases      = 2,
+        rdphase      = 0,
+        wrphase      = 1,
+        rdcmdphase   = 1,
+        wrcmdphase   = 0,
+        read_latency = 5,
+        cwl          = 3,
+        # Indirectly
+        nranks       = 1,
+        databits     = 16,
+        dfi_databits = 2*16,
+        memtype      = "DDR2",
+    )
+    default_geom_settings = dict(
+        bankbits = 3,
+        rowbits  = 13,
+        colbits  = 10,
+    )
+    default_timing_settings = dict(
+        tWTR = 2,
+        tFAW = None,
+        tCCD = 1,
+        tRRD = None,
+    )
+
+    def __init__(self,
+        controller_settings = None,
+        phy_settings        = None,
+        geom_settings       = None,
+        timing_settings     = None):
+        # Update settings if provided
+        def updated(settings, update):
+            copy = settings.copy()
+            copy.update(update or {})
+            return copy
+
+        controller_settings = updated(self.default_controller_settings, controller_settings)
+        phy_settings        = updated(self.default_phy_settings, phy_settings)
+        geom_settings       = updated(self.default_geom_settings, geom_settings)
+        timing_settings     = updated(self.default_timing_settings, timing_settings)
+
+        # Use simpler settigns to include only Multiplexer-specific members
+        class SimpleSettings(Settings):
+            def __init__(self, **kwargs):
+                self.set_attributes(kwargs)
+
+        settings        = SimpleSettings(**controller_settings)
+        settings.phy    = SimpleSettings(**phy_settings)
+        settings.geom   = SimpleSettings(**geom_settings)
+        settings.timing = SimpleSettings(**timing_settings)
+        settings.geom.addressbits = max(settings.geom.rowbits, settings.geom.colbits)
+        self.settings = settings
+
+        # Create interfaces and stubs required to instantiate Multiplexer
+        abits  = settings.geom.addressbits
+        babits = settings.geom.bankbits
+        nbanks = 2**babits
+        nranks = settings.phy.nranks
+        self.bank_machines = [BankMachineStub(abits=abits, babits=babits)
+                              for _ in range(nbanks*nranks)]
+        self.refresher = RefresherStub(abits=abits, babits=babits)
+        self.dfi = dfi.Interface(
+            addressbits = abits,
+            bankbits    = babits,
+            nranks      = settings.phy.nranks,
+            databits    = settings.phy.dfi_databits,
+            nphases     = settings.phy.nphases)
+        address_align = log2_int(burst_lengths[settings.phy.memtype])
+        self.interface = LiteDRAMInterface(address_align=address_align, settings=settings)
+
+        # Add Multiplexer
+        self.submodules.multiplexer = Multiplexer(settings, self.bank_machines, self.refresher,
+            self.dfi, self.interface)
+
+        # Add helpers for driving bank machines/refresher
+        self.bm_drivers = [CmdRequestRWDriver(bm.cmd, i) for i, bm in enumerate(self.bank_machines)]
+        self.refresh_driver = CmdRequestRWDriver(self.refresher.cmd, i=1)
+
+    def fsm_state(self):
+        # Return name of current state of Multiplexer's FSM
+        return self.multiplexer.fsm.decoding[(yield self.multiplexer.fsm.state)]
+
+
+class TestMultiplexer(unittest.TestCase):
+    def test_init(self):
+        # Verify that instantiation of Multiplexer in MultiplexerDUT is correct. This will fail if
+        # Multiplexer starts using any new setting from controller.settings.
+        MultiplexerDUT()
+
+    def test_fsm_start_at_read(self):
+        # FSM should start at READ state (assumed in some other tests).
+        def main_generator(dut):
+            self.assertEqual((yield from dut.fsm_state()), "READ")
+
+        dut = MultiplexerDUT()
+        run_simulation(dut, main_generator(dut))
+
+    def test_fsm_read_to_write_latency(self):
+        # Verify the timing of READ to WRITE transition.
+        def main_generator(dut):
+            rtw = dut.settings.phy.read_latency
+            expected = "r" + (rtw - 1) * ">" + "w"
+            states = ""
+
+            # Set write_available=1
+            yield from dut.bm_drivers[0].write()
+            yield
+
+            for _ in range(len(expected)):
+                state = (yield from dut.fsm_state())
+                # Use ">" for all other states, as FSM.delayed_enter uses anonymous states instead
+                # of staying in RTW
+                states += {
+                    "READ": "r",
+                    "WRITE": "w",
+                }.get(state, ">")
+                yield
+
+            self.assertEqual(states, expected)
+
+        dut = MultiplexerDUT()
+        run_simulation(dut, main_generator(dut))
+
+    def test_fsm_write_to_read_latency(self):
+        # Verify the timing of WRITE to READ transition.
+        def main_generator(dut):
+            write_latency = math.ceil(dut.settings.phy.cwl / dut.settings.phy.nphases)
+            wtr = dut.settings.timing.tWTR + write_latency + dut.settings.timing.tCCD or 0
+
+            expected = "w" + (wtr - 1) * ">" + "r"
+            states   = ""
+
+            # Simulate until we are in WRITE
+            yield from dut.bm_drivers[0].write()
+            while (yield from dut.fsm_state()) != "WRITE":
+                yield
+
+            # Set read_available=1
+            yield from dut.bm_drivers[0].read()
+            yield
+
+            for _ in range(len(expected)):
+                state = (yield from dut.fsm_state())
+                states += {
+                    "READ": "r",
+                    "WRITE": "w",
+                }.get(state, ">")
+                yield
+
+            self.assertEqual(states, expected)
+
+        dut = MultiplexerDUT()
+        generators = [
+            main_generator(dut),
+            timeout_generator(50),
+        ]
+        run_simulation(dut, generators)
+
+    def test_steer_read_correct_phases(self):
+        # Check that correct phases are being used during READ.
+        def main_generator(dut):
+            yield from dut.bm_drivers[2].read()
+            yield from dut.bm_drivers[3].activate()
+
+            while not (yield dut.bank_machines[2].cmd.ready):
+                yield
+            yield
+
+            # fsm starts in READ
+            for phase in range(dut.settings.phy.nphases):
+                if phase == dut.settings.phy.rdphase:
+                    self.assertEqual((yield dut.dfi.phases[phase].bank), 2)
+                elif phase == dut.settings.phy.rdcmdphase:
+                    self.assertEqual((yield dut.dfi.phases[phase].bank), 3)
+                else:
+                    self.assertEqual((yield dut.dfi.phases[phase].bank), 0)
+
+        dut        = MultiplexerDUT()
+        generators = [
+            main_generator(dut),
+            timeout_generator(50),
+        ]
+        run_simulation(dut, generators)
+
+    def test_steer_write_correct_phases(self):
+        # Check that correct phases are being used during WRITE.
+        def main_generator(dut):
+            yield from dut.bm_drivers[2].write()
+            yield from dut.bm_drivers[3].activate()
+
+            while not (yield dut.bank_machines[2].cmd.ready):
+                yield
+            yield
+
+            # fsm starts in READ
+            for phase in range(dut.settings.phy.nphases):
+                if phase == dut.settings.phy.wrphase:
+                    self.assertEqual((yield dut.dfi.phases[phase].bank), 2)
+                elif phase == dut.settings.phy.wrcmdphase:
+                    self.assertEqual((yield dut.dfi.phases[phase].bank), 3)
+                else:
+                    self.assertEqual((yield dut.dfi.phases[phase].bank), 0)
+
+        dut = MultiplexerDUT()
+        generators = [
+            main_generator(dut),
+            timeout_generator(50),
+        ]
+        run_simulation(dut, generators)
+
+    def test_single_phase_cmd_req(self):
+        # Verify that, for a single phase, commands are sent sequentially.
+        def main_generator(dut):
+            yield from dut.bm_drivers[2].write()
+            yield from dut.bm_drivers[3].activate()
+            ready = {2: dut.bank_machines[2].cmd.ready, 3: dut.bank_machines[3].cmd.ready}
+
+            # Activate should appear first
+            while not ((yield ready[2]) or (yield ready[3])):
+                yield
+            yield from dut.bm_drivers[3].nop()
+            yield
+            self.assertEqual((yield dut.dfi.phases[0].bank), 3)
+
+            # Then write
+            while not (yield ready[2]):
+                yield
+            yield from dut.bm_drivers[2].nop()
+            yield
+            self.assertEqual((yield dut.dfi.phases[0].bank), 2)
+
+        dut = MultiplexerDUT(phy_settings=dict(nphases=1))
+        generators = [
+            main_generator(dut),
+            timeout_generator(50),
+        ]
+        run_simulation(dut, generators)
+
+    def test_ras_trrd(self):
+        # Verify tRRD.
+        def main_generator(dut):
+            yield from dut.bm_drivers[2].activate()
+            yield from dut.bm_drivers[3].activate()
+            ready = {2: dut.bank_machines[2].cmd.ready, 3: dut.bank_machines[3].cmd.ready}
+
+            # Wait for activate
+            while not ((yield ready[2]) or (yield ready[3])):
+                yield
+            # Invalidate command that was ready
+            if (yield ready[2]):
+                yield from dut.bm_drivers[2].nop()
+            else:
+                yield from dut.bm_drivers[3].nop()
+            yield
+
+            # Wait for the second activate; start from 1 for the previous cycle
+            ras_time = 1
+            while not ((yield ready[2]) or (yield ready[3])):
+                ras_time += 1
+                yield
+
+            self.assertEqual(ras_time, 6)
+
+        dut = MultiplexerDUT(timing_settings=dict(tRRD=6))
+        generators = [
+            main_generator(dut),
+            timeout_generator(50),
+        ]
+        run_simulation(dut, generators)
+
+    def test_cas_tccd(self):
+        # Verify tCCD.
+        def main_generator(dut):
+            yield from dut.bm_drivers[2].read()
+            yield from dut.bm_drivers[3].read()
+            ready = {2: dut.bank_machines[2].cmd.ready, 3: dut.bank_machines[3].cmd.ready}
+
+            # Wait for activate
+            while not ((yield ready[2]) or (yield ready[3])):
+                yield
+            # Invalidate command that was ready
+            if (yield ready[2]):
+                yield from dut.bm_drivers[2].nop()
+            else:
+                yield from dut.bm_drivers[3].nop()
+            yield
+
+            # Wait for the second activate; start from 1 for the previous cycle
+            cas_time = 1
+            while not ((yield ready[2]) or (yield ready[3])):
+                cas_time += 1
+                yield
+
+            self.assertEqual(cas_time, 3)
+
+        dut = MultiplexerDUT(timing_settings=dict(tCCD=3))
+        generators = [
+            main_generator(dut),
+            timeout_generator(50),
+        ]
+        run_simulation(dut, generators)
+
+    def test_fsm_anti_starvation(self):
+        # Check that anti-starvation works according to controller settings.
+        def main_generator(dut):
+            yield from dut.bm_drivers[2].read()
+            yield from dut.bm_drivers[3].write()
+
+            # Go to WRITE
+            # anti starvation does not work for 1st read, as read_time_en already starts as 1
+            # READ -> RTW -> WRITE
+            while (yield from dut.fsm_state()) != "WRITE":
+                yield
+
+            # wait for write anti starvation
+            for _ in range(dut.settings.write_time):
+                self.assertEqual((yield from dut.fsm_state()), "WRITE")
+                yield
+            self.assertEqual((yield from dut.fsm_state()), "WTR")
+
+            # WRITE -> WTR -> READ
+            while (yield from dut.fsm_state()) != "READ":
+                yield
+
+            # Wait for read anti starvation
+            for _ in range(dut.settings.read_time):
+                self.assertEqual((yield from dut.fsm_state()), "READ")
+                yield
+            self.assertEqual((yield from dut.fsm_state()), "RTW")
+
+        dut = MultiplexerDUT()
+        generators = [
+            main_generator(dut),
+            timeout_generator(100),
+        ]
+        run_simulation(dut, generators)
+
+    def test_write_datapath(self):
+        # Verify that data is transmitted from native interface to DFI.
+        def main_generator(dut):
+            yield from dut.bm_drivers[2].write()
+            # 16bits * 2 (DDR) * 1 (phases)
+            yield dut.interface.wdata.eq(0xbaadf00d)
+            yield dut.interface.wdata_we.eq(0xf)
+
+            while not (yield dut.bank_machines[2].cmd.ready):
+                yield
+            yield
+
+            self.assertEqual((yield dut.dfi.phases[0].wrdata), 0xbaadf00d)
+            self.assertEqual((yield dut.dfi.phases[0].wrdata_en), 1)
+            self.assertEqual((yield dut.dfi.phases[0].address), 2)
+            self.assertEqual((yield dut.dfi.phases[0].bank), 2)
+
+        dut = MultiplexerDUT(phy_settings=dict(nphases=1))
+        generators = [
+            main_generator(dut),
+            timeout_generator(50),
+        ]
+        run_simulation(dut, generators)
+
+    def test_read_datapath(self):
+        # Verify that data is transmitted from DFI to native interface.
+        def main_generator(dut):
+            yield from dut.bm_drivers[2].write()
+            # 16bits * 2 (DDR) * 1 (phases)
+            yield dut.dfi.phases[0].rddata.eq(0xbaadf00d)
+            yield dut.dfi.phases[0].rddata_en.eq(1)
+            yield
+
+            while not (yield dut.bank_machines[2].cmd.ready):
+                yield
+            yield
+
+            self.assertEqual((yield dut.interface.rdata), 0xbaadf00d)
+            self.assertEqual((yield dut.interface.wdata_we), 0)
+            self.assertEqual((yield dut.dfi.phases[0].address), 2)
+            self.assertEqual((yield dut.dfi.phases[0].bank), 2)
+
+        dut = MultiplexerDUT(phy_settings=dict(nphases=1))
+        generators = [
+            main_generator(dut),
+            timeout_generator(50),
+        ]
+        run_simulation(dut, generators)
+
+    def test_refresh_requires_gnt(self):
+        # After refresher command request, multiplexer waits for permission from all bank machines.
+        def main_generator(dut):
+            def assert_dfi_cmd(cas, ras, we):
+                p = dut.dfi.phases[0]
+                cas_n, ras_n, we_n = (yield p.cas_n), (yield p.ras_n), (yield p.we_n)
+                self.assertEqual((cas_n, ras_n, we_n), (1 - cas, 1 - ras, 1 - we))
+
+            for bm in dut.bank_machines:
+                self.assertEqual((yield bm.refresh_req), 0)
+
+            yield from dut.refresh_driver.refresh()
+            yield
+
+            # Bank machines get the request
+            for bm in dut.bank_machines:
+                self.assertEqual((yield bm.refresh_req), 1)
+            # No command yet
+            yield from assert_dfi_cmd(cas=0, ras=0, we=0)
+
+            # Grant permission for refresh
+            prng = random.Random(42)
+            delays = [prng.randrange(100) for _ in dut.bank_machines]
+            for t in range(max(delays) + 1):
+                # Grant permission
+                for delay, bm in zip(delays, dut.bank_machines):
+                    if delay == t:
+                        yield bm.refresh_gnt.eq(1)
+                yield
+
+                # Make sure thare is no command yet
+                yield from assert_dfi_cmd(cas=0, ras=0, we=0)
+            yield
+            yield
+
+            # Refresh command
+            yield from assert_dfi_cmd(cas=1, ras=1, we=0)
+
+        dut = MultiplexerDUT()
+        run_simulation(dut, main_generator(dut))
+
+    def test_requests_from_multiple_bankmachines(self):
+        # Check complex communication scenario with requests from multiple bank machines
+        # The communication is greatly simplified - data path is completely ignored, no responses
+        # from PHY are simulated. Each bank machine performs a sequence of requests, bank machines
+        # are ordered randomly and the DFI command data is checked to verify if all the commands
+        # have been sent if correct per-bank order.
+
+        # Tequests sequence on given bank machines
+        bm_sequences = {
+            0: "awwwwwwp",
+            1: "arrrrrrp",
+            2: "arwrwrwp",
+            3: "arrrwwwp",
+            4: "awparpawp",
+            5: "awwparrrrp",
+        }
+        # convert to lists to use .pop()
+        bm_sequences = {bm_num: list(seq) for bm_num, seq in bm_sequences.items()}
+
+        def main_generator(bank_machines, drivers):
+            # work on a copy
+            bm_seq = copy.deepcopy(bm_sequences)
+
+            def non_empty():
+                return list(filter(lambda n: len(bm_seq[n]) > 0, bm_seq.keys()))
+
+            # Artificially perform the work of LiteDRAMCrossbar by always picking only one request
+            prng = random.Random(42)
+            while len(non_empty()) > 0:
+                # Pick random bank machine
+                bm_num = prng.choice(non_empty())
+
+                # Set given request
+                request_char = bm_seq[bm_num].pop(0)
+                yield from drivers[bm_num].request(request_char)
+                yield
+
+                # Wait for ready
+                while not (yield bank_machines[bm_num].cmd.ready):
+                    yield
+
+                # Disable it
+                yield from drivers[bm_num].nop()
+
+            for _ in range(16):
+                yield
+
+        # Gather data on DFI
+        DFISnapshot = namedtuple("DFICapture",
+                                 ["cmd", "bank", "address", "wrdata_en", "rddata_en"])
+        dfi_snapshots = []
+
+        @passive
+        def dfi_monitor(dfi):
+            while True:
+                # Capture current state of DFI lines
+                phases = []
+                for i, p in enumerate(dfi.phases):
+                    # Transform cas/ras/we to command name
+                    cas_n, ras_n, we_n = (yield p.cas_n), (yield p.ras_n), (yield p.we_n)
+                    captured = {"cmd": dfi_cmd_to_char(cas_n, ras_n, we_n)}
+
+                    # Capture rest of fields
+                    for field in DFISnapshot._fields:
+                        if field != "cmd":
+                            captured[field] = (yield getattr(p, field))
+
+                    phases.append(DFISnapshot(**captured))
+                dfi_snapshots.append(phases)
+                yield
+
+        dut = MultiplexerDUT()
+        generators = [
+            main_generator(dut.bank_machines, dut.bm_drivers),
+            dfi_monitor(dut.dfi),
+            timeout_generator(200),
+        ]
+        run_simulation(dut, generators)
+
+        # Check captured DFI data with the description
+        for snap in dfi_snapshots:
+            for i, phase_snap in enumerate(snap):
+                if phase_snap.cmd == "_":
+                    continue
+
+                # Distinguish bank machines by the bank number
+                bank = phase_snap.bank
+                # Find next command for the given bank
+                cmd = bm_sequences[bank].pop(0)
+
+                # Check if the captured data is correct
+                self.assertEqual(phase_snap.cmd, cmd)
+                if cmd in ["w", "r"]:
+                    # Addresses are artificially forced to bank numbers in drivers
+                    self.assertEqual(phase_snap.address, bank)
+                    if cmd == "w":
+                        self.assertEqual(phase_snap.wrdata_en, 1)
+                    if cmd == "r":
+                        self.assertEqual(phase_snap.rddata_en, 1)
diff --git a/test/test_refresh.py b/test/test_refresh.py
new file mode 100644 (file)
index 0000000..0046ae8
--- /dev/null
@@ -0,0 +1,110 @@
+# This file is Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+import unittest
+
+from migen import *
+
+from litedram.core.multiplexer import cmd_request_rw_layout
+from litedram.core.refresher import RefreshSequencer, RefreshTimer, Refresher
+
+
+def c2bool(c):
+    return {"-": 1, "_": 0}[c]
+
+class TestRefresh(unittest.TestCase):
+    def refresh_sequencer_test(self, trp, trfc, starts, dones, cmds):
+        cmd = Record(cmd_request_rw_layout(a=16, ba=3))
+        def generator(dut):
+            dut.errors = 0
+            for start, done, cas, ras in zip(starts, dones, cmds.cas, cmds.ras):
+                yield dut.start.eq(c2bool(start))
+                yield
+                if (yield dut.done) != c2bool(done):
+                    dut.errors += 1
+                if (yield cmd.cas) != c2bool(cas):
+                    dut.errors += 1
+                if (yield cmd.ras) != c2bool(ras):
+                    dut.errors += 1
+        dut = RefreshSequencer(cmd, trp, trfc)
+        run_simulation(dut, [generator(dut)])
+        self.assertEqual(dut.errors, 0)
+
+    def test_refresh_sequencer(self):
+        trp  = 1
+        trfc = 2
+        class Obj: pass
+        cmds = Obj()
+        starts   = "_-______________"
+        cmds.cas = "___-____________"
+        cmds.ras = "__--____________"
+        dones    = "_____-__________"
+        self.refresh_sequencer_test(trp, trfc, starts, dones, cmds)
+
+    def refresh_timer_test(self, trefi):
+        def generator(dut):
+            dut.errors = 0
+            for i in range(16*trefi):
+                if i%trefi == (trefi - 1):
+                    if (yield dut.refresh.done) != 1:
+                        dut.errors += 1
+                else:
+                    if (yield dut.refresh.done) != 0:
+                        dut.errors += 1
+                yield
+
+        class DUT(Module):
+            def __init__(self, trefi):
+                self.submodules.refresh = RefreshTimer(trefi)
+                self.comb += self.refresh.wait.eq(~self.refresh.done)
+
+        dut = DUT(trefi)
+        run_simulation(dut, [generator(dut)])
+        self.assertEqual(dut.errors, 0)
+
+    def test_refresh_timer(self):
+        for trefi in range(1, 32):
+            with self.subTest(trefi=trefi):
+                self.refresh_timer_test(trefi)
+
+    def refresher_test(self, postponing):
+        class Obj: pass
+        settings = Obj()
+        settings.with_refresh = True
+        settings.refresh_zqcs_freq = 1e0
+        settings.timing = Obj()
+        settings.timing.tREFI = 64
+        settings.timing.tRP   = 1
+        settings.timing.tRFC  = 2
+        settings.timing.tZQCS = 64
+        settings.geom = Obj()
+        settings.geom.addressbits = 16
+        settings.geom.bankbits    = 3
+        settings.phy = Obj()
+        settings.phy.nranks = 1
+
+        def generator(dut):
+            dut.errors = 0
+            yield dut.cmd.ready.eq(1)
+            for i in range(16):
+                while (yield dut.cmd.valid) == 0:
+                    yield
+                cmd_valid_gap = 0
+                while (yield dut.cmd.valid) == 1:
+                    cmd_valid_gap += 1
+                    yield
+                while (yield dut.cmd.valid) == 0:
+                    cmd_valid_gap += 1
+                    yield
+                if cmd_valid_gap != postponing*settings.timing.tREFI:
+                    print(cmd_valid_gap)
+                    dut.errors += 1
+
+        dut = Refresher(settings, clk_freq=100e6, postponing=postponing)
+        run_simulation(dut, [generator(dut)])
+        self.assertEqual(dut.errors, 0)
+
+    def test_refresher(self):
+        for postponing in [1, 2, 4, 8]:
+            with self.subTest(postponing=postponing):
+                self.refresher_test(postponing)
diff --git a/test/test_steerer.py b/test/test_steerer.py
new file mode 100644 (file)
index 0000000..a37fdf5
--- /dev/null
@@ -0,0 +1,238 @@
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import unittest
+
+from migen import *
+from litex.soc.interconnect import stream
+
+from litedram.common import *
+from litedram.phy import dfi
+from litedram.core.multiplexer import _Steerer
+from litedram.core.multiplexer import STEER_NOP, STEER_CMD, STEER_REQ, STEER_REFRESH
+
+from test.common import CmdRequestRWDriver
+
+
+class SteererDUT(Module):
+    def __init__(self, nranks, dfi_databits, nphases):
+        a, ba         = 13, 3
+        nop           = Record(cmd_request_layout(a=a, ba=ba))
+        choose_cmd    = stream.Endpoint(cmd_request_rw_layout(a=a, ba=ba))
+        choose_req    = stream.Endpoint(cmd_request_rw_layout(a=a, ba=ba))
+        refresher_cmd = stream.Endpoint(cmd_request_rw_layout(a=a, ba=ba))
+
+        self.commands = [nop, choose_cmd, choose_req, refresher_cmd]
+        self.dfi = dfi.Interface(addressbits=a, bankbits=ba, nranks=nranks, databits=dfi_databits,
+                                 nphases=nphases)
+        self.submodules.steerer = _Steerer(self.commands, self.dfi)
+
+        # NOP is not an endpoint and does not have is_* signals
+        self.drivers = [CmdRequestRWDriver(req, i, ep_layout=i != 0, rw_layout=i != 0)
+                        for i, req in enumerate(self.commands)]
+
+
+class TestSteerer(unittest.TestCase):
+    def test_nop_not_valid(self):
+        # If NOP is selected then there should be no command selected on cas/ras/we.
+        def main_generator(dut):
+            # NOP on both phases
+            yield dut.steerer.sel[0].eq(STEER_NOP)
+            yield dut.steerer.sel[1].eq(STEER_NOP)
+            yield from dut.drivers[0].nop()
+            yield
+
+            for i in range(2):
+                cas_n = (yield dut.dfi.phases[i].cas_n)
+                ras_n = (yield dut.dfi.phases[i].ras_n)
+                we_n  = (yield dut.dfi.phases[i].we_n)
+                self.assertEqual((cas_n, ras_n, we_n), (1, 1, 1))
+
+        dut = SteererDUT(nranks=2, dfi_databits=16, nphases=2)
+        run_simulation(dut, main_generator(dut))
+
+    def test_connect_only_if_valid_and_ready(self):
+        # Commands should be connected to phases only if they are valid & ready.
+        def main_generator(dut):
+            # Set possible requests
+            yield from dut.drivers[STEER_NOP].nop()
+            yield from dut.drivers[STEER_CMD].activate()
+            yield from dut.drivers[STEER_REQ].write()
+            yield from dut.drivers[STEER_REFRESH].refresh()
+            # Set how phases are steered
+            yield dut.steerer.sel[0].eq(STEER_CMD)
+            yield dut.steerer.sel[1].eq(STEER_NOP)
+            yield
+            yield
+
+            def check(is_ready):
+                # CMD on phase 0 should be STEER_CMD=activate
+                p = dut.dfi.phases[0]
+                self.assertEqual((yield p.bank),    STEER_CMD)
+                self.assertEqual((yield p.address), STEER_CMD)
+                if is_ready:
+                    self.assertEqual((yield p.cas_n), 1)
+                    self.assertEqual((yield p.ras_n), 0)
+                    self.assertEqual((yield p.we_n),  1)
+                else:  # Not steered
+                    self.assertEqual((yield p.cas_n), 1)
+                    self.assertEqual((yield p.ras_n), 1)
+                    self.assertEqual((yield p.we_n),  1)
+
+                # Nop on phase 1 should be STEER_NOP
+                p = dut.dfi.phases[1]
+                self.assertEqual((yield p.cas_n), 1)
+                self.assertEqual((yield p.ras_n), 1)
+                self.assertEqual((yield p.we_n),  1)
+
+            yield from check(is_ready=False)
+            yield dut.commands[STEER_CMD].ready.eq(1)
+            yield
+            yield
+            yield from check(is_ready=True)
+
+        dut = SteererDUT(nranks=2, dfi_databits=16, nphases=2)
+        run_simulation(dut, main_generator(dut))
+
+    def test_no_decode_ba_signle_rank(self):
+        # With a single rank the whole `ba` signal is bank address.
+        def main_generator(dut):
+            yield from dut.drivers[STEER_NOP].nop()
+            yield from dut.drivers[STEER_REQ].write()
+            yield from dut.drivers[STEER_REFRESH].refresh()
+            # All the bits are for bank
+            dut.drivers[STEER_CMD].bank = 0b110
+            yield from dut.drivers[STEER_CMD].activate()
+            yield dut.commands[STEER_CMD].ready.eq(1)
+            # Set how phases are steered
+            yield dut.steerer.sel[0].eq(STEER_NOP)
+            yield dut.steerer.sel[1].eq(STEER_CMD)
+            yield
+            yield
+
+            p = dut.dfi.phases[1]
+            self.assertEqual((yield p.cas_n),   1)
+            self.assertEqual((yield p.ras_n),   0)
+            self.assertEqual((yield p.we_n),    1)
+            self.assertEqual((yield p.address), STEER_CMD)
+            self.assertEqual((yield p.bank),    0b110)
+            self.assertEqual((yield p.cs_n),    0)
+
+        dut = SteererDUT(nranks=1, dfi_databits=16, nphases=2)
+        run_simulation(dut, main_generator(dut))
+
+    def test_decode_ba_multiple_ranks(self):
+        # With multiple ranks `ba` signal should be split into bank and chip select.
+        def main_generator(dut):
+            yield from dut.drivers[STEER_NOP].nop()
+            yield from dut.drivers[STEER_REQ].write()
+            yield from dut.drivers[STEER_REFRESH].refresh()
+            # Set how phases are steered
+            yield dut.steerer.sel[0].eq(STEER_NOP)
+            yield dut.steerer.sel[1].eq(STEER_CMD)
+
+            variants = [
+                # ba, phase.bank, phase.cs_n
+                (0b110, 0b10, 0b01),  # rank=1 -> cs=0b10 -> cs_n=0b01
+                (0b101, 0b01, 0b01),  # rank=1 -> cs=0b10 -> cs_n=0b01
+                (0b001, 0b01, 0b10),  # rank=0 -> cs=0b01 -> cs_n=0b10
+            ]
+            for ba, phase_bank, phase_cs_n in variants:
+                with self.subTest(ba=ba):
+                    # 1 bit for rank, 2 bits for bank
+                    dut.drivers[STEER_CMD].bank = ba
+                    yield from dut.drivers[STEER_CMD].activate()
+                    yield dut.commands[STEER_CMD].ready.eq(1)
+                    yield
+                    yield
+
+                    p = dut.dfi.phases[1]
+                    self.assertEqual((yield p.cas_n), 1)
+                    self.assertEqual((yield p.ras_n), 0)
+                    self.assertEqual((yield p.we_n),  1)
+                    self.assertEqual((yield p.bank),  phase_bank)
+                    self.assertEqual((yield p.cs_n),  phase_cs_n)
+
+        dut = SteererDUT(nranks=2, dfi_databits=16, nphases=2)
+        run_simulation(dut, main_generator(dut))
+
+    def test_select_all_ranks_on_refresh(self):
+        # When refresh command is on first phase, all ranks should be selected.
+        def main_generator(dut):
+            yield from dut.drivers[STEER_NOP].nop()
+            yield from dut.drivers[STEER_REQ].write()
+            yield from dut.drivers[STEER_CMD].activate()
+            # Set how phases are steered
+            yield dut.steerer.sel[0].eq(STEER_REFRESH)
+            yield dut.steerer.sel[1].eq(STEER_NOP)
+
+            variants = [
+                # ba, phase.bank, phase.cs_n (always all enabled)
+                (0b110, 0b10, 0b00),
+                (0b101, 0b01, 0b00),
+                (0b001, 0b01, 0b00),
+            ]
+            for ba, phase_bank, phase_cs_n in variants:
+                with self.subTest(ba=ba):
+                    # 1 bit for rank, 2 bits for bank
+                    dut.drivers[STEER_REFRESH].bank = ba
+                    yield from dut.drivers[STEER_REFRESH].refresh()
+                    yield dut.commands[STEER_REFRESH].ready.eq(1)
+                    yield
+                    yield
+
+                    p = dut.dfi.phases[0]
+                    self.assertEqual((yield p.cas_n), 0)
+                    self.assertEqual((yield p.ras_n), 0)
+                    self.assertEqual((yield p.we_n),  1)
+                    self.assertEqual((yield p.bank),  phase_bank)
+                    self.assertEqual((yield p.cs_n),  phase_cs_n)
+
+        dut = SteererDUT(nranks=2, dfi_databits=16, nphases=2)
+        run_simulation(dut, main_generator(dut))
+
+    def test_reset_n_high(self):
+        # Reset_n should be 1 for all phases at all times.
+        def main_generator(dut):
+            yield dut.steerer.sel[0].eq(STEER_CMD)
+            yield dut.steerer.sel[1].eq(STEER_NOP)
+            yield
+
+            self.assertEqual((yield dut.dfi.phases[0].reset_n), 1)
+            self.assertEqual((yield dut.dfi.phases[1].reset_n), 1)
+            self.assertEqual((yield dut.dfi.phases[2].reset_n), 1)
+            self.assertEqual((yield dut.dfi.phases[3].reset_n), 1)
+
+        dut = SteererDUT(nranks=2, dfi_databits=16, nphases=4)
+        run_simulation(dut, main_generator(dut))
+
+    def test_cke_high_all_ranks(self):
+        # CKE should be 1 for all phases and ranks at all times.
+        def main_generator(dut):
+            yield dut.steerer.sel[0].eq(STEER_CMD)
+            yield dut.steerer.sel[1].eq(STEER_NOP)
+            yield
+
+            self.assertEqual((yield dut.dfi.phases[0].cke), 0b11)
+            self.assertEqual((yield dut.dfi.phases[1].cke), 0b11)
+            self.assertEqual((yield dut.dfi.phases[2].cke), 0b11)
+            self.assertEqual((yield dut.dfi.phases[3].cke), 0b11)
+
+        dut = SteererDUT(nranks=2, dfi_databits=16, nphases=4)
+        run_simulation(dut, main_generator(dut))
+
+    def test_odt_high_all_ranks(self):
+        # ODT should be 1 for all phases and ranks at all times.
+        #  NOTE: only until dynamic ODT is implemented.
+        def main_generator(dut):
+            yield dut.steerer.sel[0].eq(STEER_CMD)
+            yield dut.steerer.sel[1].eq(STEER_NOP)
+            yield
+
+            self.assertEqual((yield dut.dfi.phases[0].odt), 0b11)
+            self.assertEqual((yield dut.dfi.phases[1].odt), 0b11)
+            self.assertEqual((yield dut.dfi.phases[2].odt), 0b11)
+            self.assertEqual((yield dut.dfi.phases[3].odt), 0b11)
+
+        dut = SteererDUT(nranks=2, dfi_databits=16, nphases=4)
+        run_simulation(dut, main_generator(dut))
diff --git a/test/test_timing.py b/test/test_timing.py
new file mode 100644 (file)
index 0000000..1fbc212
--- /dev/null
@@ -0,0 +1,119 @@
+# This file is Copyright (c) 2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# License: BSD
+
+import unittest
+import random
+
+from migen import *
+
+from litedram.common import tXXDController, tFAWController
+
+
+def c2bool(c):
+    return {"-": 1, "_": 0}[c]
+
+
+class TestTiming(unittest.TestCase):
+    def txxd_controller_test(self, txxd, valids, readys):
+        def generator(dut):
+            dut.errors = 0
+            for valid, ready in zip(valids, readys):
+                yield dut.valid.eq(c2bool(valid))
+                yield
+                if (yield dut.ready) != c2bool(ready):
+                    dut.errors += 1
+
+        dut = tXXDController(txxd)
+        run_simulation(dut, [generator(dut)])
+        self.assertEqual(dut.errors, 0)
+
+    def test_txxd_controller(self):
+        txxd = 1
+        valids = "__-______"
+        readys = "_--------"
+        self.txxd_controller_test(txxd, valids, readys)
+
+        txxd = 2
+        valids = "__-______"
+        readys = "_--_-----"
+        self.txxd_controller_test(txxd, valids, readys)
+
+        txxd = 3
+        valids = "____-______"
+        readys = "___--__----"
+        self.txxd_controller_test(txxd, valids, readys)
+
+        txxd = 4
+        valids = "____-______"
+        readys = "___--___---"
+        self.txxd_controller_test(txxd, valids, readys)
+
+    def txxd_controller_random_test(self, txxd, loops):
+        def generator(dut, valid_rand):
+            prng = random.Random(42)
+            for l in range(loops):
+                while prng.randrange(100) < valid_rand:
+                    yield
+                yield dut.valid.eq(1)
+                yield
+                yield dut.valid.eq(0)
+
+        @passive
+        def checker(dut):
+            dut.ready_gaps = []
+            while True:
+                while (yield dut.ready) != 0:
+                    yield
+                ready_gap = 1
+                while (yield dut.ready) != 1:
+                    ready_gap += 1
+                    yield
+                dut.ready_gaps.append(ready_gap)
+
+        dut = tXXDController(txxd)
+        run_simulation(dut, [generator(dut, valid_rand=90), checker(dut)])
+        self.assertEqual(min(dut.ready_gaps), txxd)
+
+    def test_txxd_controller_random(self):
+        for txxd in range(2, 32):
+            with self.subTest(txxd=txxd):
+                self.txxd_controller_random_test(txxd, 512)
+
+
+    def tfaw_controller_test(self, txxd, valids, readys):
+        def generator(dut):
+            dut.errors = 0
+            for valid, ready in zip(valids, readys):
+                yield dut.valid.eq(c2bool(valid))
+                yield
+                if (yield dut.ready) != c2bool(ready):
+                    dut.errors += 1
+
+        dut = tFAWController(txxd)
+        run_simulation(dut, [generator(dut)])
+        self.assertEqual(dut.errors, 0)
+
+    def test_tfaw_controller(self):
+        tfaw = 8
+        valids = "_----___________"
+        readys = "-----______-----"
+        with self.subTest(tfaw=tfaw, valids=valids, readys=readys):
+            self.tfaw_controller_test(tfaw, valids, readys)
+
+        tfaw = 8
+        valids = "_-_-_-_-________"
+        readys = "--------___-----"
+        with self.subTest(tfaw=tfaw, valids=valids, readys=readys):
+            self.tfaw_controller_test(tfaw, valids, readys)
+
+        tfaw = 8
+        valids = "_-_-___-_-______"
+        readys = "----------_-----"
+        with self.subTest(tfaw=tfaw, valids=valids, readys=readys):
+            self.tfaw_controller_test(tfaw, valids, readys)
+
+        tfaw = 8
+        valids = "_-_-____-_-______"
+        readys = "-----------------"
+        with self.subTest(tfaw=tfaw, valids=valids, readys=readys):
+            self.tfaw_controller_test(tfaw, valids, readys)
diff --git a/test/test_wishbone.py b/test/test_wishbone.py
new file mode 100644 (file)
index 0000000..9b2da86
--- /dev/null
@@ -0,0 +1,111 @@
+# This file is Copyright (c) 2018-2019 Florent Kermarrec <florent@enjoy-digital.fr>
+# This file is Copyright (c) 2020 Antmicro <www.antmicro.com>
+# License: BSD
+
+import unittest
+
+from migen import *
+from litex.gen.sim import run_simulation
+from litex.soc.interconnect import wishbone
+
+from litedram.frontend.wishbone import LiteDRAMWishbone2Native
+from litedram.common import LiteDRAMNativePort
+
+from test.common import DRAMMemory, MemoryTestDataMixin
+
+
+class TestWishbone(MemoryTestDataMixin, unittest.TestCase):
+    def test_wishbone_data_width_not_smaller(self):
+        with self.assertRaises(AssertionError):
+            wb   = wishbone.Interface(data_width=32)
+            port = LiteDRAMNativePort("both", address_width=32, data_width=wb.data_width * 2)
+            LiteDRAMWishbone2Native(wb, port)
+
+    def wishbone_readback_test(self, pattern, mem_expected, wishbone, port, base_address=0):
+        class DUT(Module):
+            def __init__(self):
+                self.port = port
+                self.wb   = wishbone
+                self.submodules += LiteDRAMWishbone2Native(
+                    wishbone     = self.wb,
+                    port         = self.port,
+                    base_address = base_address)
+                self.mem = DRAMMemory(port.data_width, len(mem_expected))
+
+        def main_generator(dut):
+            for adr, data in pattern:
+                yield from dut.wb.write(adr, data)
+                data_r = (yield from dut.wb.read(adr))
+                self.assertEqual(data_r, data)
+
+        dut = DUT()
+        generators = [
+            main_generator(dut),
+            dut.mem.write_handler(dut.port),
+            dut.mem.read_handler(dut.port),
+        ]
+        run_simulation(dut, generators)
+        self.assertEqual(dut.mem.mem, mem_expected)
+
+    def test_wishbone_8bit(self):
+        # Verify Wishbone with 8-bit data width.
+        data = self.pattern_test_data["8bit"]
+        wb   = wishbone.Interface(adr_width=30, data_width=8)
+        port = LiteDRAMNativePort("both", address_width=30, data_width=8)
+        self.wishbone_readback_test(data["pattern"], data["expected"], wb, port)
+
+    def test_wishbone_32bit(self):
+        # Verify Wishbone with 32-bit data width.
+        data = self.pattern_test_data["32bit"]
+        wb   = wishbone.Interface(adr_width=30, data_width=32)
+        port = LiteDRAMNativePort("both", address_width=30, data_width=32)
+        self.wishbone_readback_test(data["pattern"], data["expected"], wb, port)
+
+    def test_wishbone_64bit(self):
+        # Verify Wishbone with 64-bit data width.
+        data = self.pattern_test_data["64bit"]
+        wb   = wishbone.Interface(adr_width=30, data_width=64)
+        port = LiteDRAMNativePort("both", address_width=30, data_width=64)
+        self.wishbone_readback_test(data["pattern"], data["expected"], wb, port)
+
+    def test_wishbone_64bit_to_32bit(self):
+        # Verify Wishbone with 64-bit data width down-converted to 32-bit data width.
+        data = self.pattern_test_data["64bit_to_32bit"]
+        wb   = wishbone.Interface(adr_width=30, data_width=64)
+        port = LiteDRAMNativePort("both", address_width=30, data_width=32)
+        self.wishbone_readback_test(data["pattern"], data["expected"], wb, port)
+
+    def test_wishbone_32bit_to_8bit(self):
+        # Verify Wishbone with 32-bit data width down-converted to 8-bit data width.
+        data = self.pattern_test_data["32bit_to_8bit"]
+        wb   = wishbone.Interface(adr_width=30, data_width=32)
+        port = LiteDRAMNativePort("both", address_width=30, data_width=8)
+        self.wishbone_readback_test(data["pattern"], data["expected"], wb, port)
+
+    def test_wishbone_32bit_base_address(self):
+        # Verify Wishbone with 32-bit data width and non-zero base address.
+        data   = self.pattern_test_data["32bit"]
+        wb     = wishbone.Interface(adr_width=30, data_width=32)
+        port   = LiteDRAMNativePort("both", address_width=30, data_width=32)
+        origin = 0x10000000
+        # add offset (in data words)
+        pattern = [(adr + origin//(32//8), data) for adr, data in data["pattern"]]
+        self.wishbone_readback_test(pattern, data["expected"], wb, port, base_address=origin)
+
+    def test_wishbone_64bit_to_32bit_base_address(self):
+        # Verify Wishbone with 64-bit data width down-converted to 32-bit data width and non-zero base address.
+        data    = self.pattern_test_data["64bit_to_32bit"]
+        wb      = wishbone.Interface(adr_width=30, data_width=64)
+        port    = LiteDRAMNativePort("both", address_width=30, data_width=32)
+        origin  = 0x10000000
+        pattern = [(adr + origin//(64//8), data) for adr, data in data["pattern"]]
+        self.wishbone_readback_test(pattern, data["expected"], wb, port, base_address=origin)
+
+    def test_wishbone_32bit_to_8bit_base_address(self):
+        # Verify Wishbone with 32-bit data width down-converted to 8-bit data width and non-zero base address.
+        data    = self.pattern_test_data["32bit_to_8bit"]
+        wb      = wishbone.Interface(adr_width=30, data_width=32)
+        port    = LiteDRAMNativePort("both", address_width=30, data_width=8)
+        origin  = 0x10000000
+        pattern = [(adr + origin//(32//8), data) for adr, data in data["pattern"]]
+        self.wishbone_readback_test(pattern, data["expected"], wb, port, base_address=origin)