--- /dev/null
+Unless otherwise noted, LitePCIe is copyright (C) 2015 Florent Kermarrec.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+Other authors retain ownership of their contributions. If a submission can
+reasonably be considered independently copyrightable, it's yours and we
+encourage you to claim it with appropriate copyright notices. This submission
+then falls under the "otherwise noted" category. All submissions are strongly
+encouraged to use the two-clause BSD license reproduced above.
--- /dev/null
+ __ _ __ ___ _________
+ / / (_) /____ / _ \/ ___/ _/__
+ / /__/ / __/ -_) ___/ /___/ // -_)
+ /____/_/\__/\__/_/ \___/___/\__/
+
+ Copyright 2015 / EnjoyDigital / M-Labs Ltd
+
+ A small footprint and configurable PCIe core
+ with MMAP interface and scatter-gather DMA
+ developed by EnjoyDigital
+
+[> Doc
+---------
+XXX
+
+[> Intro
+---------
+LitePCIe provides a small footprint and configurable PCIe gen1/2 core.
+
+LitePCIe is part of MiSoC libraries whose aims are to lower entry level of
+complex FPGA cores by providing simple, elegant and efficient implementations
+ofcomponents used in today's SoC such as Ethernet, SATA, PCIe, SDRAM Controller...
+
+The core uses simple and specific streaming buses and will provides in the future
+adapters to use standardized AXI or Avalon-ST streaming buses.
+
+Since Python is used to describe the HDL, the core is highly and easily
+configurable.
+
+LitePCIe uses technologies developed in partnership with M-Labs Ltd:
+ - Migen enables generating HDL with Python in an efficient way.
+ - MiSoC provides the basic blocks to build a powerful and small footprint SoC.
+
+LitePCIe can be used as MiSoC library or can be integrated with your standard
+design flow by generating the verilog rtl that you will use as a standard core.
+
+[> Features
+-----------
+- 7-Series Artix7/Kintex7 PHY (up to PCIe Gen2 X2)
+- Scatter-gather DMA
+- Wishbone bridge
+- Linux driver with DMA loopback demo and Sysfs
+
+
+[> Possibles improvements
+-------------------------
+- add standardized interfaces (AXI, Avalon-ST)
+- add support for PCIe Gen2 X4 and X8 on 7-Series
+- clean up 7-Series wrappers
+- add Altera/Lattice support
+- ... See below Support and consulting :)
+
+If you want to support these features, please contact us at florent [AT]
+enjoy-digital.fr. You can also contact our partner on the public mailing list
+devel [AT] lists.m-labs.hk.
+
+
+[> Getting started
+------------------
+1. Install Python3 and your vendor's software
+
+2. Obtain Migen and install it:
+ git clone https://github.com/m-labs/migen
+ cd migen
+ python3 setup.py install
+ cd ..
+
+3. Obtain MiSoC:
+ git clone https://github.com/m-labs/misoc --recursive
+
+4. Build and load PCIe DMA loopback design (only for KC705 for now):
+ go to misoclib/com/litepcie/example_designs/
+ run ./make.py all load-bitstream
+
+5. Build and load Linux Driver:
+ go to misoclib/com/litepcie/software/linux/kernel
+ make all
+ ./init.sh
+
+5. Build and load Linux utilities:
+ go to misoclib/com/litepcie/software/linux/user
+ make all
+ ./litepcie_util dma_loopback_test
+
+[> Simulations:
+ Simulations are available in misoclib/com/litepcie/test:
+ - wishbone_tb
+ - dma_tb
+ To run a simulation, move to misoclib/com/litepcie/test/ and run:
+ make simulation_name
+
+[> Tests :
+ A DMA loopback example with Wishbone over Sysfs is provided.
+ Please go to Getting Started section to see how to run the tests.
+
+[> License
+-----------
+LitePCIe is released under the very permissive two-clause BSD license. Under
+the terms of this license, you are authorized to use LiteEth for closed-source
+proprietary designs.
+Even though we do not require you to do so, those things are awesome, so please
+do them if possible:
+ - tell us that you are using LitePCIe
+ - cite LitePCIe in publications related to research it has helped
+ - send us feedback and suggestions for improvements
+ - send us bug reports when something goes wrong
+ - send us the modifications and improvements you have done to LitePCIe.
+
+[> Support and consulting
+--------------------------
+We love open-source hardware and like sharing our designs with others.
+
+LitePCIe is mainly developed and maintained by EnjoyDigital.
+
+If you would like to know more about LitePCIe or if you are already a happy
+user and would like to extend it for your needs, EnjoyDigital can provide standard
+commercial support as well as consulting services.
+
+So feel free to contact us, we'd love to work with you! (and eventually shorten
+the list of the possible improvements :)
+
+[> Contact
+E-mail: florent [AT] enjoy-digital.fr
--- /dev/null
+from migen.fhdl.std import *
+from migen.genlib.record import *
+from migen.flow.actor import *
+
+KB = 1024
+MB = 1024*KB
+GB = 1024*MB
+
+
+def get_bar_mask(size):
+ mask = 0
+ found = 0
+ for i in range(32):
+ if size%2:
+ found = 1
+ if found:
+ mask |= (1 << i)
+ size = size >> 1
+ return mask
+
+
+def reverse_bytes(v):
+ return Cat(v[24:32], v[16:24], v[8:16], v[0:8])
+
+
+def reverse_bits(v):
+ return Cat(v[3], v[2], v[1], v[0])
+
+
+def phy_layout(dw):
+ layout = [
+ ("dat", dw),
+ ("be", dw//8)
+ ]
+ return EndpointDescription(layout, packetized=True)
+
+
+def request_layout(dw):
+ layout = [
+ ("we", 1),
+ ("adr", 32),
+ ("len", 10),
+ ("req_id", 16),
+ ("tag", 8),
+ ("dat", dw),
+ ("channel", 8), # for routing
+ ("user_id", 8) # for packet identification
+ ]
+ return EndpointDescription(layout, packetized=True)
+
+
+def completion_layout(dw):
+ layout = [
+ ("adr", 32),
+ ("len", 10),
+ ("last", 1),
+ ("req_id", 16),
+ ("cmp_id", 16),
+ ("err", 1),
+ ("tag", 8),
+ ("dat", dw),
+ ("channel", 8), # for routing
+ ("user_id", 8) # for packet identification
+ ]
+ return EndpointDescription(layout, packetized=True)
+
+
+def interrupt_layout():
+ return [("dat", 8)]
+
+
+def dma_layout(dw):
+ layout = [("dat", dw)]
+ return EndpointDescription(layout, packetized=True)
--- /dev/null
+from migen.fhdl.std import *
+from migen.bank.description import *
+from migen.genlib.record import *
+
+from misoclib.com.litepcie.core.packet.depacketizer import Depacketizer
+from misoclib.com.litepcie.core.packet.packetizer import Packetizer
+from misoclib.com.litepcie.core.switch.crossbar import Crossbar
+
+
+class Endpoint(Module):
+ def __init__(self, phy, max_pending_requests=4, with_reordering=False):
+ self.phy = phy
+ self.max_pending_requests = max_pending_requests
+
+ # # #
+
+ # TLP Packetizer / Depacketizer
+ depacketizer = Depacketizer(phy.dw, phy.bar0_mask)
+ packetizer = Packetizer(phy.dw)
+ self.submodules += depacketizer, packetizer
+ self.comb += [
+ phy.source.connect(depacketizer.sink),
+ packetizer.source.connect(phy.sink)
+ ]
+
+ # Crossbar
+ self.crossbar = crossbar = Crossbar(phy.dw, max_pending_requests, with_reordering)
+ self.submodules += crossbar
+
+ # (Slave) HOST initiates the transactions
+ self.comb += [
+ Record.connect(depacketizer.req_source, crossbar.phy_slave.sink),
+ Record.connect(crossbar.phy_slave.source, packetizer.cmp_sink)
+ ]
+
+ # (Master) FPGA initiates the transactions
+ self.comb += [
+ Record.connect(crossbar.phy_master.source, packetizer.req_sink),
+ Record.connect(depacketizer.cmp_source, crossbar.phy_master.sink)
+ ]
--- /dev/null
+from migen.fhdl.std import *
+from migen.bank.description import *
+
+from misoclib.com.litepcie.common import *
+
+
+class InterruptController(Module, AutoCSR):
+ def __init__(self, n_irqs=32):
+ self.irqs = Signal(n_irqs)
+ self.source = Source(interrupt_layout())
+
+ self._enable = CSRStorage(n_irqs)
+ self._clear = CSR(n_irqs)
+ self._vector = CSRStatus(n_irqs)
+
+ # # #
+
+ enable = self._enable.storage
+ clear = Signal(n_irqs)
+ self.comb += If(self._clear.re, clear.eq(self._clear.r))
+
+ # memorize and clear irqs
+ vector = self._vector.status
+ self.sync += vector.eq(~clear & (vector | self.irqs))
+
+ self.comb += self.source.stb.eq((vector & enable) != 0)
--- /dev/null
+from migen.fhdl.std import *
+from migen.genlib.record import *
+from migen.flow.actor import EndpointDescription, Sink, Source
+
+from misoclib.com.litepcie.common import *
+
+# constants
+fmt_type_dict = {
+ "mem_rd32": 0b0000000,
+ "mem_wr32": 0b1000000,
+ "mem_rd64": 0b0100000,
+ "mem_wr64": 0b1100000,
+
+ "cpld": 0b1001010,
+ "cpl": 0b0001010
+}
+
+cpl_dict = {
+ "sc": 0b000,
+ "ur": 0b001,
+ "crs": 0b010,
+ "ca": 0b011
+}
+
+max_request_size = 512
+
+
+# headers
+class HField():
+ def __init__(self, word, offset, width):
+ self.word = word
+ self.offset = offset
+ self.width = width
+
+tlp_header_w = 128
+
+tlp_common_header = {
+ "fmt": HField(0, 29, 2),
+ "type": HField(0, 24, 5),
+}
+
+tlp_request_header = {
+ "fmt": HField(0, 29, 2),
+ "type": HField(0, 24, 5),
+ "tc": HField(0, 20, 3),
+ "td": HField(0, 15, 1),
+ "ep": HField(0, 14, 1),
+ "attr": HField(0, 12, 2),
+ "length": HField(0, 0, 10),
+
+ "requester_id": HField(1, 16, 16),
+ "tag": HField(1, 8, 8),
+ "last_be": HField(1, 4, 4),
+ "first_be": HField(1, 0, 4),
+
+ "address": HField(2, 2, 30),
+}
+
+tlp_completion_header = {
+ "fmt": HField(0, 29, 2),
+ "type": HField(0, 24, 5),
+ "tc": HField(0, 20, 3),
+ "td": HField(0, 15, 1),
+ "ep": HField(0, 14, 1),
+ "attr": HField(0, 12, 2),
+ "length": HField(0, 0, 10),
+
+ "completer_id": HField(1, 16, 16),
+ "status": HField(1, 13, 3),
+ "bcm": HField(1, 12, 1),
+ "byte_count": HField(1, 0, 12),
+
+ "requester_id": HField(2, 16, 16),
+ "tag": HField(2, 8, 8),
+ "lower_address": HField(2, 0, 7),
+}
+
+
+# layouts
+def _layout_from_header(header):
+ _layout = []
+ for k, v in sorted(header.items()):
+ _layout.append((k, v.width))
+ return _layout
+
+
+def tlp_raw_layout(dw):
+ layout = [
+ ("header", tlp_header_w),
+ ("dat", dw),
+ ("be", dw//8)
+ ]
+ return EndpointDescription(layout, packetized=True)
+
+
+def tlp_common_layout(dw):
+ layout = _layout_from_header(tlp_common_header) + [
+ ("dat", dw),
+ ("be", dw//8)
+ ]
+ return EndpointDescription(layout, packetized=True)
+
+
+def tlp_request_layout(dw):
+ layout = _layout_from_header(tlp_request_header) + [
+ ("dat", dw),
+ ("be", dw//8)
+ ]
+ return EndpointDescription(layout, packetized=True)
+
+
+def tlp_completion_layout(dw):
+ layout = _layout_from_header(tlp_completion_header) + [
+ ("dat", dw),
+ ("be", dw//8)
+ ]
+ return EndpointDescription(layout, packetized=True)
--- /dev/null
+from migen.fhdl.std import *
+from migen.actorlib.structuring import *
+from migen.genlib.fsm import FSM, NextState
+
+from misoclib.com.litepcie.core.packet.common import *
+from misoclib.com.litepcie.core.switch.dispatcher import Dispatcher
+
+
+def _decode_header(h_dict, h_signal, obj):
+ r = []
+ for k, v in sorted(h_dict.items()):
+ start = v.word*32+v.offset
+ end = start+v.width
+ r.append(getattr(obj, k).eq(h_signal[start:end]))
+ return r
+
+
+class HeaderExtracter(Module):
+ def __init__(self, dw):
+ self.sink = Sink(phy_layout(dw))
+ self.source = Source(tlp_raw_layout(dw))
+
+ ###
+
+ sink, source = self.sink, self.source
+
+ sop = Signal()
+ shift = Signal()
+
+ sink_dat_r = Signal(dw)
+ sink_be_r = Signal(dw//8)
+
+ fsm = FSM(reset_state="HEADER1")
+ self.submodules += fsm
+
+ fsm.act("HEADER1",
+ sink.ack.eq(1),
+ If(sink.stb,
+ shift.eq(1),
+ NextState("HEADER2")
+ )
+ )
+ fsm.act("HEADER2",
+ sink.ack.eq(1),
+ If(sink.stb,
+ shift.eq(1),
+ If(sink.eop,
+ sink.ack.eq(0),
+ NextState("TERMINATE"),
+ ).Else(
+ NextState("COPY")
+ )
+ )
+ )
+ self.sync += [
+ If(shift, self.source.header.eq(Cat(self.source.header[64:], sink.dat))),
+ If(sink.stb & sink.ack,
+ sink_dat_r.eq(sink.dat),
+ sink_be_r.eq(sink.be)
+ )
+ ]
+ fsm.act("COPY",
+ sink.ack.eq(source.ack),
+ source.stb.eq(sink.stb),
+ source.sop.eq(sop),
+ source.eop.eq(sink.eop),
+ source.dat.eq(Cat(reverse_bytes(sink_dat_r[32:]), reverse_bytes(sink.dat[:32]))),
+ source.be.eq(Cat(reverse_bits(sink_be_r[4:]), reverse_bits(sink.be[:4]))),
+ If(source.stb & source.ack & source.eop,
+ NextState("HEADER1")
+ )
+ )
+ self.sync += \
+ If(fsm.before_entering("COPY"),
+ sop.eq(1)
+ ).Elif(source.stb & source.ack,
+ sop.eq(0)
+ )
+ fsm.act("TERMINATE",
+ sink.ack.eq(source.ack),
+ source.stb.eq(1),
+ source.sop.eq(1),
+ source.eop.eq(1),
+ source.dat.eq(reverse_bytes(sink.dat[32:])),
+ source.be.eq(reverse_bits(sink.be[4:])),
+ If(source.stb & source.ack & source.eop,
+ NextState("HEADER1")
+ )
+ )
+
+
+class Depacketizer(Module):
+ def __init__(self, dw, address_mask=0):
+ self.sink = Sink(phy_layout(dw))
+
+ self.req_source = Source(request_layout(dw))
+ self.cmp_source = Source(completion_layout(dw))
+
+ ###
+
+ # extract raw header
+ header_extracter = HeaderExtracter(dw)
+ self.submodules += header_extracter
+ self.comb += Record.connect(self.sink, header_extracter.sink)
+ header = header_extracter.source.header
+
+
+ # dispatch data according to fmt/type
+ dispatch_source = Source(tlp_common_layout(dw))
+ dispatch_sinks = [Sink(tlp_common_layout(dw)) for i in range(2)]
+
+ self.comb += [
+ dispatch_source.stb.eq(header_extracter.source.stb),
+ header_extracter.source.ack.eq(dispatch_source.ack),
+ dispatch_source.sop.eq(header_extracter.source.sop),
+ dispatch_source.eop.eq(header_extracter.source.eop),
+ dispatch_source.dat.eq(header_extracter.source.dat),
+ dispatch_source.be.eq(header_extracter.source.be),
+ _decode_header(tlp_common_header, header, dispatch_source)
+ ]
+
+ self.submodules.dispatcher = Dispatcher(dispatch_source, dispatch_sinks)
+
+ fmt_type = Cat(dispatch_source.type, dispatch_source.fmt)
+ self.comb += \
+ If((fmt_type == fmt_type_dict["mem_rd32"]) | (fmt_type == fmt_type_dict["mem_wr32"]),
+ self.dispatcher.sel.eq(0),
+ ).Elif((fmt_type == fmt_type_dict["cpld"]) | (fmt_type == fmt_type_dict["cpl"]),
+ self.dispatcher.sel.eq(1),
+ )
+
+ # decode TLP request and format local request
+ tlp_req = Source(tlp_request_layout(dw))
+ self.comb += Record.connect(dispatch_sinks[0], tlp_req)
+ self.comb += _decode_header(tlp_request_header, header, tlp_req)
+
+ req_source = self.req_source
+ self.comb += [
+ req_source.stb.eq(tlp_req.stb),
+ req_source.we.eq(tlp_req.stb & (Cat(tlp_req.type, tlp_req.fmt) == fmt_type_dict["mem_wr32"])),
+ tlp_req.ack.eq(req_source.ack),
+ req_source.sop.eq(tlp_req.sop),
+ req_source.eop.eq(tlp_req.eop),
+ req_source.adr.eq(Cat(Signal(2), tlp_req.address & (~address_mask))),
+ req_source.len.eq(tlp_req.length),
+ req_source.req_id.eq(tlp_req.requester_id),
+ req_source.tag.eq(tlp_req.tag),
+ req_source.dat.eq(tlp_req.dat),
+ ]
+
+ # decode TLP completion and format local completion
+ tlp_cmp = Source(tlp_completion_layout(dw))
+ self.comb += Record.connect(dispatch_sinks[1], tlp_cmp)
+ self.comb += _decode_header(tlp_completion_header, header, tlp_cmp)
+
+ cmp_source = self.cmp_source
+ self.comb += [
+ cmp_source.stb.eq(tlp_cmp.stb),
+ tlp_cmp.ack.eq(cmp_source.ack),
+ cmp_source.sop.eq(tlp_cmp.sop),
+ cmp_source.eop.eq(tlp_cmp.eop),
+ cmp_source.len.eq(tlp_cmp.length),
+ cmp_source.last.eq(tlp_cmp.length == (tlp_cmp.byte_count[2:])),
+ cmp_source.adr.eq(tlp_cmp.lower_address),
+ cmp_source.req_id.eq(tlp_cmp.requester_id),
+ cmp_source.cmp_id.eq(tlp_cmp.completer_id),
+ cmp_source.err.eq(tlp_cmp.status != 0),
+ cmp_source.tag.eq(tlp_cmp.tag),
+ cmp_source.dat.eq(tlp_cmp.dat)
+ ]
--- /dev/null
+from migen.fhdl.std import *
+from migen.actorlib.structuring import *
+from migen.genlib.fsm import FSM, NextState
+from migen.genlib.misc import chooser
+
+from misoclib.com.litepcie.core.packet.common import *
+from misoclib.com.litepcie.core.switch.arbiter import Arbiter
+
+
+def _encode_header(h_dict, h_signal, obj):
+ r = []
+ for k, v in sorted(h_dict.items()):
+ start = v.word*32+v.offset
+ end = start+v.width
+ r.append(h_signal[start:end].eq(getattr(obj, k)))
+ return r
+
+
+class HeaderInserter(Module):
+ def __init__(self, dw):
+ self.sink = sink = Sink(tlp_raw_layout(dw))
+ self.source = source = Source(phy_layout(dw))
+
+ ###
+
+ fsm = FSM(reset_state="HEADER1")
+ self.submodules += fsm
+
+ sink_dat_r = Signal(dw)
+ sink_eop_r = Signal()
+ self.sync += \
+ If(sink.stb & sink.ack,
+ sink_dat_r.eq(sink.dat),
+ sink_eop_r.eq(sink.eop)
+ )
+
+ fsm.act("HEADER1",
+ sink.ack.eq(1),
+ If(sink.stb & sink.sop,
+ sink.ack.eq(0),
+ source.stb.eq(1),
+ source.sop.eq(1),
+ source.eop.eq(0),
+ source.dat.eq(sink.header[:64]),
+ source.be.eq(0xff),
+ If(source.stb & source.ack,
+ NextState("HEADER2"),
+ )
+ )
+ )
+ fsm.act("HEADER2",
+ source.stb.eq(1),
+ source.sop.eq(0),
+ source.eop.eq(sink.eop),
+ source.dat.eq(Cat(sink.header[64:96], reverse_bytes(sink.dat[:32]))),
+ source.be.eq(Cat(Signal(4, reset=0xf), reverse_bits(sink.be[:4]))),
+ If(source.stb & source.ack,
+ sink.ack.eq(1),
+ If(source.eop,
+ NextState("HEADER1")
+ ).Else(
+ NextState("COPY")
+ )
+ )
+ )
+ fsm.act("COPY",
+ source.stb.eq(sink.stb | sink_eop_r),
+ source.sop.eq(0),
+ source.eop.eq(sink_eop_r),
+ source.dat.eq(Cat(reverse_bytes(sink_dat_r[32:64]), reverse_bytes(sink.dat[:32]))),
+ If(sink_eop_r,
+ source.be.eq(0x0f)
+ ).Else(
+ source.be.eq(0xff)
+ ),
+ If(source.stb & source.ack,
+ sink.ack.eq(~sink_eop_r),
+ If(source.eop,
+ NextState("HEADER1")
+ )
+ )
+ )
+
+
+class Packetizer(Module):
+ def __init__(self, dw):
+ self.req_sink = req_sink = Sink(request_layout(dw))
+ self.cmp_sink = cmp_sink = Sink(completion_layout(dw))
+
+ self.source = Source(phy_layout(dw))
+
+ ###
+
+ # format TLP request and encode it
+ tlp_req = Sink(tlp_request_layout(dw))
+ self.comb += [
+ tlp_req.stb.eq(req_sink.stb),
+ req_sink.ack.eq(tlp_req.ack),
+ tlp_req.sop.eq(req_sink.sop),
+ tlp_req.eop.eq(req_sink.eop),
+
+ If(req_sink.we,
+ Cat(tlp_req.type, tlp_req.fmt).eq(fmt_type_dict["mem_wr32"])
+ ).Else(
+ Cat(tlp_req.type, tlp_req.fmt).eq(fmt_type_dict["mem_rd32"])
+ ),
+
+ tlp_req.tc.eq(0),
+ tlp_req.td.eq(0),
+ tlp_req.ep.eq(0),
+ tlp_req.attr.eq(0),
+ tlp_req.length.eq(req_sink.len),
+
+ tlp_req.requester_id.eq(req_sink.req_id),
+ tlp_req.tag.eq(req_sink.tag),
+ If(req_sink.len > 1,
+ tlp_req.last_be.eq(0xf)
+ ).Else(
+ tlp_req.last_be.eq(0x0)
+ ),
+ tlp_req.first_be.eq(0xf),
+ tlp_req.address.eq(req_sink.adr[2:]),
+
+ tlp_req.dat.eq(req_sink.dat),
+ If(req_sink.we,
+ tlp_req.be.eq(0xff)
+ ).Else(
+ tlp_req.be.eq(0x00)
+ ),
+ ]
+
+ tlp_raw_req = Sink(tlp_raw_layout(dw))
+ self.comb += [
+ tlp_raw_req.stb.eq(tlp_req.stb),
+ tlp_req.ack.eq(tlp_raw_req.ack),
+ tlp_raw_req.sop.eq(tlp_req.sop),
+ tlp_raw_req.eop.eq(tlp_req.eop),
+ _encode_header(tlp_request_header, tlp_raw_req.header, tlp_req),
+ tlp_raw_req.dat.eq(tlp_req.dat),
+ tlp_raw_req.be.eq(tlp_req.be),
+ ]
+
+ # format TLP completion and encode it
+ tlp_cmp = Sink(tlp_completion_layout(dw))
+ self.comb += [
+ tlp_cmp.stb.eq(cmp_sink.stb),
+ cmp_sink.ack.eq(tlp_cmp.ack),
+ tlp_cmp.sop.eq(cmp_sink.sop),
+ tlp_cmp.eop.eq(cmp_sink.eop),
+
+ tlp_cmp.tc.eq(0),
+ tlp_cmp.td.eq(0),
+ tlp_cmp.ep.eq(0),
+ tlp_cmp.attr.eq(0),
+ tlp_cmp.length.eq(cmp_sink.len),
+
+ tlp_cmp.completer_id.eq(cmp_sink.cmp_id),
+ If(cmp_sink.err,
+ Cat(tlp_cmp.type, tlp_cmp.fmt).eq(fmt_type_dict["cpl"]),
+ tlp_cmp.status.eq(cpl_dict["ur"])
+ ).Else(
+ Cat(tlp_cmp.type, tlp_cmp.fmt).eq(fmt_type_dict["cpld"]),
+ tlp_cmp.status.eq(cpl_dict["sc"])
+ ),
+ tlp_cmp.bcm.eq(0),
+ tlp_cmp.byte_count.eq(cmp_sink.len*4),
+
+ tlp_cmp.requester_id.eq(cmp_sink.req_id),
+ tlp_cmp.tag.eq(cmp_sink.tag),
+ tlp_cmp.lower_address.eq(cmp_sink.adr),
+
+ tlp_cmp.dat.eq(cmp_sink.dat),
+ tlp_cmp.be.eq(0xff)
+ ]
+
+ tlp_raw_cmp = Sink(tlp_raw_layout(dw))
+ self.comb += [
+ tlp_raw_cmp.stb.eq(tlp_cmp.stb),
+ tlp_cmp.ack.eq(tlp_raw_cmp.ack),
+ tlp_raw_cmp.sop.eq(tlp_cmp.sop),
+ tlp_raw_cmp.eop.eq(tlp_cmp.eop),
+ _encode_header(tlp_completion_header, tlp_raw_cmp.header, tlp_cmp),
+ tlp_raw_cmp.dat.eq(tlp_cmp.dat),
+ tlp_raw_cmp.be.eq(tlp_cmp.be),
+ ]
+
+ # arbitrate
+ tlp_raw = Sink(tlp_raw_layout(dw))
+ self.submodules.arbitrer = Arbiter([tlp_raw_req, tlp_raw_cmp], tlp_raw)
+
+ # insert header
+ header_inserter = HeaderInserter(dw)
+ self.submodules += header_inserter
+ self.comb += [
+ Record.connect(tlp_raw, header_inserter.sink),
+ Record.connect(header_inserter.source, self.source)
+ ]
--- /dev/null
+from migen.fhdl.std import *
+from migen.genlib.roundrobin import *
+from migen.genlib.record import *
+
+
+class Arbiter(Module):
+ def __init__(self, sources, sink):
+ if len(sources) == 0:
+ pass
+ else:
+ self.submodules.rr = RoundRobin(len(sources))
+ self.grant = self.rr.grant
+ cases = {}
+ for i, source in enumerate(sources):
+ sop = Signal()
+ eop = Signal()
+ ongoing = Signal()
+ self.comb += [
+ sop.eq(source.stb & source.sop),
+ eop.eq(source.stb & source.eop & source.ack),
+ ]
+ self.sync += ongoing.eq((sop | ongoing) & ~eop)
+ self.comb += self.rr.request[i].eq((sop | ongoing) & ~eop)
+ cases[i] = [Record.connect(source, sink)]
+ self.comb += Case(self.grant, cases)
--- /dev/null
+from migen.fhdl.std import *
+from migen.bank.description import *
+
+from misoclib.com.litepcie.common import *
+
+
+class SlaveInternalPort:
+ def __init__(self, dw, address_decoder=None):
+ self.address_decoder = address_decoder
+ self.sink = Sink(completion_layout(dw))
+ self.source = Source(request_layout(dw))
+
+
+class MasterInternalPort:
+ def __init__(self, dw, channel=None, write_only=False, read_only=False):
+ self.channel = channel
+ self.write_only = write_only
+ self.read_only = read_only
+ self.sink = Sink(request_layout(dw))
+ self.source = Source(completion_layout(dw))
+
+
+class SlavePort:
+ def __init__(self, port):
+ self.address_decoder = port.address_decoder
+ self.sink = port.source
+ self.source = port.sink
+
+
+class MasterPort:
+ def __init__(self, port):
+ self.channel = port.channel
+ self.sink = port.source
+ self.source = port.sink
--- /dev/null
+from migen.fhdl.std import *
+from migen.bank.description import *
+
+from misoclib.com.litepcie.common import *
+from misoclib.com.litepcie.core.switch.common import *
+from misoclib.com.litepcie.core.switch.arbiter import Arbiter
+from misoclib.com.litepcie.core.switch.dispatcher import Dispatcher
+from misoclib.com.litepcie.core.switch.request_controller import RequestController
+
+
+class Crossbar(Module, AutoCSR):
+ def __init__(self, dw, max_pending_requests, with_reordering=False):
+ self.dw = dw
+ self.max_pending_requests = max_pending_requests
+ self.with_reordering = with_reordering
+
+ self.master = MasterInternalPort(dw)
+ self.slave = SlaveInternalPort(dw)
+ self.phy_master = MasterPort(self.master)
+ self.phy_slave = SlavePort(self.slave)
+
+ self.user_masters = []
+ self.user_masters_channel = 0
+ self.user_slaves = []
+
+ def get_slave_port(self, address_decoder):
+ s = SlaveInternalPort(self.dw, address_decoder)
+ self.user_slaves.append(s)
+ return SlavePort(s)
+
+ def get_master_port(self, write_only=False, read_only=False):
+ m = MasterInternalPort(self.dw, self.user_masters_channel, write_only, read_only)
+ self.user_masters_channel += 1
+ self.user_masters.append(m)
+ return MasterPort(m)
+
+ def filter_masters(self, write_only, read_only):
+ masters = []
+ for m in self.user_masters:
+ if m.write_only == write_only and m.read_only == read_only:
+ masters.append(m)
+ return masters
+
+ def slave_dispatch_arbitrate(self, slaves, slave):
+ # dispatch
+ s_sources = [s.source for s in slaves]
+ s_dispatcher = Dispatcher(slave.source, s_sources, one_hot=True)
+ self.submodules += s_dispatcher
+ for i, s in enumerate(slaves):
+ self.comb += s_dispatcher.sel[i].eq(s.address_decoder(slave.source.adr))
+
+ # arbitrate
+ s_sinks = [s.sink for s in slaves]
+ s_arbiter = Arbiter(s_sinks, slave.sink)
+ self.submodules += s_arbiter
+
+ def master_arbitrate_dispatch(self, masters, master):
+ # arbitrate
+ m_sinks = [m.sink for m in masters]
+ m_arbiter = Arbiter(m_sinks, master.sink)
+ self.submodules += m_arbiter
+
+ # dispatch
+ m_sources = [m.source for m in masters]
+ m_dispatcher = Dispatcher(master.source, m_sources)
+ self.submodules += m_dispatcher
+ self.comb += m_dispatcher.sel.eq(master.source.channel)
+
+ def do_finalize(self):
+ # Slave path
+ # Dispatch request to user sources (according to address decoder)
+ # Arbitrate completion from user sinks
+ if self.user_slaves != []:
+ self.slave_dispatch_arbitrate(self.user_slaves, self.slave)
+
+ # Master path
+ # Abritrate requests from user sinks
+ # Dispatch completion to user sources (according to channel)
+
+ # +-------+
+ # reqs---> | RD |
+ # cmps<--- | PORTS |---------+
+ # +-------+ +---+----+ +----------+
+ # |Arb/Disp|-->|Controller|--+
+ # +-------+ +---+----+ +----------+ |
+ # reqs---> | RW | | |
+ # cmps<--- | PORTS |---------+ |
+ # +-------+ +---+----+
+ # |Arb/Disp|<--> to/from Packetizer/
+ # +-------+ +---+----+ Depacketizer
+ # reqs---> | WR | +--------+ |
+ # cmps<--- | PORTS |-----|Arb/Disp|-----------------+
+ # +-------+ +--------+
+ #
+ # The controller blocks RD requests when the max number of pending
+ # requests have been sent (max_pending_requests parameters).
+ # To avoid blocking write_only ports when RD requests are blocked,
+ # a separate arbitration stage is used.
+
+ if self.user_masters != []:
+ masters = []
+
+ # Arbitrate / dispatch read_only / read_write ports
+ # and insert controller
+ rd_rw_masters = self.filter_masters(False, True)
+ rd_rw_masters += self.filter_masters(False, False)
+ if rd_rw_masters != []:
+ rd_rw_master = MasterInternalPort(self.dw)
+ controller = RequestController(self.dw, self.max_pending_requests, self.with_reordering)
+ self.submodules += controller
+ self.master_arbitrate_dispatch(rd_rw_masters, controller.master_in)
+ masters.append(controller.master_out)
+
+ # Arbitrate / dispatch write_only ports
+ wr_masters = self.filter_masters(True, False)
+ if wr_masters != []:
+ wr_master = MasterInternalPort(self.dw)
+ self.master_arbitrate_dispatch(wr_masters, wr_master)
+ masters.append(wr_master)
+
+ # Final Arbitrate / dispatch stage
+ self.master_arbitrate_dispatch(masters, self.master)
--- /dev/null
+from migen.fhdl.std import *
+from migen.genlib.record import *
+
+
+class Dispatcher(Module):
+ def __init__(self, source, sinks, one_hot=False):
+ if len(sinks) == 0:
+ self.sel = Signal()
+ elif len(sinks) == 1:
+ self.comb += Record.connect(source, sinks[0])
+ self.sel = Signal()
+ else:
+ if one_hot:
+ self.sel = Signal(len(sinks))
+ else:
+ self.sel = Signal(max=len(sinks))
+ ###
+ sop = Signal()
+ self.comb += sop.eq(source.stb & source.sop)
+ sel = Signal(flen(self.sel))
+ sel_r = Signal(flen(self.sel))
+ self.sync += \
+ If(sop,
+ sel_r.eq(self.sel)
+ )
+ self.comb += \
+ If(sop,
+ sel.eq(self.sel)
+ ).Else(
+ sel.eq(sel_r)
+ )
+ cases = {}
+ for i, sink in enumerate(sinks):
+ if one_hot:
+ idx = 2**i
+ else:
+ idx = i
+ cases[idx] = [Record.connect(source, sink)]
+ cases["default"] = [source.ack.eq(1)]
+ self.comb += Case(sel, cases)
--- /dev/null
+from migen.fhdl.std import *
+from migen.actorlib.structuring import *
+from migen.genlib.fifo import SyncFIFO
+from migen.genlib.fsm import FSM, NextState
+from migen.actorlib.fifo import SyncFIFO as SyncFlowFIFO
+
+from misoclib.com.litepcie.common import *
+from misoclib.com.litepcie.core.packet.common import *
+from misoclib.com.litepcie.core.switch.common import *
+
+
+class Reordering(Module):
+ def __init__(self, dw, max_pending_requests):
+ self.sink = Sink(completion_layout(dw))
+ self.source = Source(completion_layout(dw))
+
+ self.req_we = Signal()
+ self.req_tag = Signal(log2_int(max_pending_requests))
+
+ # # #
+
+ tag_buffer = SyncFIFO(log2_int(max_pending_requests), 2*max_pending_requests)
+ self.submodules += tag_buffer
+ self.comb += [
+ tag_buffer.we.eq(self.req_we),
+ tag_buffer.din.eq(self.req_tag)
+ ]
+
+ reorder_buffers = [SyncFlowFIFO(completion_layout(dw), 2*max_request_size//(dw//8), buffered=True)
+ for i in range(max_pending_requests)]
+ self.submodules += iter(reorder_buffers)
+
+ # store incoming completion in "sink.tag" buffer
+ cases = {}
+ for i in range(max_pending_requests):
+ cases[i] = [Record.connect(self.sink, reorder_buffers[i].sink)]
+ cases["default"] = [self.sink.ack.eq(1)]
+ self.comb += Case(self.sink.tag, cases)
+
+ # read buffer according to tag_buffer order
+ cases = {}
+ for i in range(max_pending_requests):
+ cases[i] = [Record.connect(reorder_buffers[i].source, self.source)]
+ cases["default"] = []
+ self.comb += [
+ Case(tag_buffer.dout, cases),
+ If(self.source.stb & self.source.eop & self.source.last,
+ tag_buffer.re.eq(self.source.ack)
+ )
+ ]
+
+
+class RequestController(Module):
+ def __init__(self, dw, max_pending_requests, with_reordering=False):
+ self.master_in = MasterInternalPort(dw)
+ self.master_out = MasterInternalPort(dw)
+
+ # # #
+
+ req_sink, req_source = self.master_in.sink, self.master_out.sink
+ cmp_sink, cmp_source = self.master_out.source, self.master_in.source
+
+ tag_fifo = SyncFIFO(log2_int(max_pending_requests), max_pending_requests)
+ self.submodules += tag_fifo
+
+ info_mem = Memory(16, max_pending_requests)
+ info_mem_wr_port = info_mem.get_port(write_capable=True)
+ info_mem_rd_port = info_mem.get_port(async_read=False)
+ self.specials += info_mem, info_mem_wr_port, info_mem_rd_port
+
+ req_tag = Signal(max=max_pending_requests)
+ self.sync += \
+ If(tag_fifo.re,
+ req_tag.eq(tag_fifo.dout)
+ )
+
+ # requests mgt
+ req_fsm = FSM(reset_state="IDLE")
+ self.submodules += req_fsm
+
+ req_fsm.act("IDLE",
+ req_sink.ack.eq(0),
+ If(req_sink.stb & req_sink.sop & ~req_sink.we & tag_fifo.readable,
+ tag_fifo.re.eq(1),
+ NextState("SEND_READ")
+ ).Elif(req_sink.stb & req_sink.sop & req_sink.we,
+ NextState("SEND_WRITE")
+ )
+ )
+ req_fsm.act("SEND_READ",
+ Record.connect(req_sink, req_source),
+ req_sink.ack.eq(0),
+ req_source.tag.eq(req_tag),
+ If(req_source.stb & req_source.eop & req_source.ack,
+ NextState("UPDATE_INFO_MEM")
+ )
+ )
+ req_fsm.act("SEND_WRITE",
+ Record.connect(req_sink, req_source),
+ req_source.tag.eq(32),
+ If(req_source.stb & req_source.eop & req_source.ack,
+ NextState("IDLE")
+ )
+ )
+ req_fsm.act("UPDATE_INFO_MEM",
+ info_mem_wr_port.we.eq(1),
+ info_mem_wr_port.adr.eq(req_tag),
+ info_mem_wr_port.dat_w[0:8].eq(req_sink.channel),
+ info_mem_wr_port.dat_w[8:16].eq(req_sink.user_id),
+ req_sink.ack.eq(1),
+ NextState("IDLE")
+ )
+
+
+ # completions mgt
+ if with_reordering:
+ self.submodules.reordering = Reordering(dw, max_pending_requests)
+ self.comb += [
+ self.reordering.req_we.eq(info_mem_wr_port.we),
+ self.reordering.req_tag.eq(info_mem_wr_port.adr),
+ Record.connect(self.reordering.source, cmp_source)
+ ]
+ cmp_source = self.reordering.sink
+
+ cmp_fsm = FSM(reset_state="INIT")
+ self.submodules += cmp_fsm
+
+ tag_cnt = Signal(max=max_pending_requests)
+ inc_tag_cnt = Signal()
+ self.sync += \
+ If(inc_tag_cnt,
+ tag_cnt.eq(tag_cnt+1)
+ )
+
+ cmp_fsm.act("INIT",
+ inc_tag_cnt.eq(1),
+ tag_fifo.we.eq(1),
+ tag_fifo.din.eq(tag_cnt),
+ If(tag_cnt == (max_pending_requests-1),
+ NextState("IDLE")
+ )
+ )
+ cmp_fsm.act("IDLE",
+ cmp_sink.ack.eq(1),
+ info_mem_rd_port.adr.eq(cmp_sink.tag),
+ If(cmp_sink.stb & cmp_sink.sop,
+ cmp_sink.ack.eq(0),
+ NextState("COPY"),
+ )
+ )
+ cmp_fsm.act("COPY",
+ info_mem_rd_port.adr.eq(cmp_sink.tag),
+ If(cmp_sink.stb & cmp_sink.eop & cmp_sink.last,
+ cmp_sink.ack.eq(0),
+ NextState("UPDATE_TAG_FIFO"),
+ ).Else(
+ Record.connect(cmp_sink, cmp_source),
+ If(cmp_sink.stb & cmp_sink.eop & cmp_sink.ack,
+ NextState("IDLE")
+ )
+ ),
+ cmp_source.channel.eq(info_mem_rd_port.dat_r[0:8]),
+ cmp_source.user_id.eq(info_mem_rd_port.dat_r[8:16]),
+ )
+ cmp_fsm.act("UPDATE_TAG_FIFO",
+ tag_fifo.we.eq(1),
+ tag_fifo.din.eq(cmp_sink.tag),
+ info_mem_rd_port.adr.eq(cmp_sink.tag),
+ Record.connect(cmp_sink, cmp_source),
+ If(cmp_sink.stb & cmp_sink.ack,
+ NextState("IDLE")
+ ),
+ cmp_source.channel.eq(info_mem_rd_port.dat_r[0:8]),
+ cmp_source.user_id.eq(info_mem_rd_port.dat_r[8:16]),
+ )
--- /dev/null
+#!/usr/bin/env python3
+
+import sys
+import os
+import argparse
+import subprocess
+import struct
+import importlib
+
+from mibuild.tools import write_to_file
+from migen.util.misc import autotype
+from migen.fhdl import verilog, edif
+from migen.fhdl.structure import _Fragment
+from migen.bank.description import CSRStatus
+from mibuild import tools
+from mibuild.xilinx.common import *
+
+from misoclib.soc import cpuif
+from misoclib.com.litepcie.common import *
+
+
+def _import(default, name):
+ return importlib.import_module(default + "." + name)
+
+
+def _get_args():
+ parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
+ description="""\
+LitePCIe - based on Migen.
+
+This program builds and/or loads LitePCIe components.
+One or several actions can be specified:
+
+clean delete previous build(s).
+build-rtl build verilog rtl.
+build-bitstream build-bitstream build FPGA bitstream.
+build-csr-csv save CSR map into CSV file.
+build-csr-header save CSR map into C header file.
+
+load-bitstream load bitstream into volatile storage.
+
+all clean, build-csr-csv, build-bitstream, load-bitstream.
+""")
+
+ parser.add_argument("-t", "--target", default="dma", help="Core type to build")
+ parser.add_argument("-s", "--sub-target", default="", help="variant of the Core type to build")
+ parser.add_argument("-p", "--platform", default=None, help="platform to build for")
+ parser.add_argument("-Ot", "--target-option", default=[], nargs=2, action="append", help="set target-specific option")
+ parser.add_argument("-Op", "--platform-option", default=[], nargs=2, action="append", help="set platform-specific option")
+ parser.add_argument("--csr_csv", default="./test/csr.csv", help="CSV file to save the CSR map into")
+ parser.add_argument("--csr_header", default="../software/linux/kernel/csr.h", help="C header file to save the CSR map into")
+ parser.add_argument("action", nargs="+", help="specify an action")
+
+ return parser.parse_args()
+
+# Note: misoclib need to be installed as a python library
+
+if __name__ == "__main__":
+ args = _get_args()
+
+ # create top-level Core object
+ target_module = _import("targets", args.target)
+ if args.sub_target:
+ top_class = getattr(target_module, args.sub_target)
+ else:
+ top_class = target_module.default_subtarget
+
+ if args.platform is None:
+ platform_name = top_class.default_platform
+ else:
+ platform_name = args.platform
+ platform_module = _import("mibuild.platforms", platform_name)
+ platform_kwargs = dict((k, autotype(v)) for k, v in args.platform_option)
+ platform = platform_module.Platform(**platform_kwargs)
+
+ build_name = top_class.__name__.lower() + "-" + platform_name
+ top_kwargs = dict((k, autotype(v)) for k, v in args.target_option)
+ soc = top_class(platform, **top_kwargs)
+ soc.finalize()
+ memory_regions = soc.get_memory_regions()
+ csr_regions = soc.get_csr_regions()
+
+ # decode actions
+ action_list = ["clean", "build-csr-csv", "build-csr-header", "build-bitstream", "load-bitstream", "all"]
+ actions = {k: False for k in action_list}
+ for action in args.action:
+ if action in actions:
+ actions[action] = True
+ else:
+ print("Unknown action: "+action+". Valid actions are:")
+ for a in action_list:
+ print(" "+a)
+ sys.exit(1)
+
+ print("""
+ __ _ __ ______ __
+ / / (_) /____ / __/ /_/ /
+ / /__/ / __/ -_) _// __/ _ \\
+ /____/_/\__/\__/___/\__/_//_/
+
+ A small footprint and configurable PCIe
+ core powered by Migen
+====== Building options: ======
+Platform: {}
+Target: {}
+Subtarget: {}
+System Clk: {} MHz
+===============================""".format(
+ platform_name,
+ args.target,
+ top_class.__name__,
+ soc.clk_freq/1000000
+ )
+)
+
+ # dependencies
+ if actions["all"]:
+ actions["build-csr-csv"] = True
+ actions["build-csr-header"] = True
+ actions["build-bitstream"] = True
+ actions["load-bitstream"] = True
+
+ if actions["build-bitstream"]:
+ actions["build-csr-csv"] = True
+ actions["build-csr-header"] = True
+ actions["build-bitstream"] = True
+ actions["load-bitstream"] = True
+
+ if actions["clean"]:
+ subprocess.call(["rm", "-rf", "build/*"])
+
+ if actions["build-csr-csv"]:
+ csr_csv = cpuif.get_csr_csv(csr_regions)
+ write_to_file(args.csr_csv, csr_csv)
+
+ if actions["build-csr-header"]:
+ csr_header = cpuif.get_csr_header(csr_regions, soc.get_constants(), with_access_functions=False)
+ write_to_file(args.csr_header, csr_header)
+
+ if actions["build-bitstream"]:
+ vns = platform.build(soc, build_name=build_name)
+ if hasattr(soc, "do_exit") and vns is not None:
+ if hasattr(soc.do_exit, '__call__'):
+ soc.do_exit(vns)
+
+ if actions["load-bitstream"]:
+ prog = platform.create_programmer()
+ prog.load_bitstream("build/" + build_name + platform.bitstream_ext)
--- /dev/null
+from migen.bus import wishbone
+from migen.bank.description import *
+from migen.genlib.io import CRG
+from migen.genlib.resetsync import AsyncResetSynchronizer
+from migen.genlib.misc import timeline
+
+from misoclib.soc import SoC
+from misoclib.tools.litescope.common import *
+from misoclib.tools.litescope.bridge.uart2wb import LiteScopeUART2WB
+
+from misoclib.com.litepcie.phy.s7pciephy import S7PCIEPHY
+from misoclib.com.litepcie.core import Endpoint
+from misoclib.com.litepcie.core.irq.interrupt_controller import InterruptController
+from misoclib.com.litepcie.frontend.dma import DMA
+from misoclib.com.litepcie.frontend.bridge.wishbone import WishboneBridge
+
+
+class _CRG(Module, AutoCSR):
+ def __init__(self, platform):
+ self.clock_domains.cd_sys = ClockDomain("sys")
+ self.clock_domains.cd_clk125 = ClockDomain("clk125")
+
+ # soft reset generaton
+ self._soft_rst = CSR()
+ soft_rst = Signal()
+ # trigger soft reset 1us after CSR access to terminate
+ # Wishbone access when reseting from PCIe
+ self.sync += [
+ timeline(self._soft_rst.re & self._soft_rst.r, [(125, [soft_rst.eq(1)])]),
+ ]
+
+ # sys_clk / sys_rst (from PCIe)
+ self.comb += self.cd_sys.clk.eq(self.cd_clk125.clk)
+ self.specials += AsyncResetSynchronizer(self.cd_sys, self.cd_clk125.rst | soft_rst)
+
+ # scratch register
+ self._scratch = CSR(32)
+ self.sync += If(self._scratch.re, self._scratch.w.eq(self._scratch.r))
+
+
+class PCIeDMASoC(SoC, AutoCSR):
+ default_platform = "kc705"
+ csr_map = {
+ "crg": 16,
+ "pcie_phy": 17,
+ "dma": 18,
+ "irq_controller": 19
+ }
+ csr_map.update(SoC.csr_map)
+ interrupt_map = {
+ "dma_writer": 0,
+ "dma_reader": 1
+ }
+ interrupt_map.update(SoC.interrupt_map)
+ mem_map = {
+ "csr": 0x00000000, # (shadow @0x80000000)
+ }
+ mem_map.update(SoC.csr_map)
+
+ def __init__(self, platform, with_uart_bridge=True):
+ clk_freq = 125*1000000
+ SoC.__init__(self, platform, clk_freq,
+ cpu_type="none",
+ shadow_address=0x00000000,
+ with_csr=True, csr_data_width=32,
+ with_uart=False,
+ with_identifier=True,
+ with_timer=False
+ )
+ self.submodules.crg = _CRG(platform)
+
+ # PCIe endpoint
+ self.submodules.pcie_phy = S7PCIEPHY(platform, link_width=2)
+ self.submodules.pcie_endpoint = Endpoint(self.pcie_phy, with_reordering=True)
+
+ # PCIe Wishbone bridge
+ self.add_cpu_or_bridge(WishboneBridge(self.pcie_endpoint, lambda a: 1))
+ self.add_wb_master(self.cpu_or_bridge.wishbone)
+
+ # PCIe DMA
+ self.submodules.dma = DMA(self.pcie_phy, self.pcie_endpoint, with_loopback=True)
+ self.dma.source.connect(self.dma.sink)
+
+ if with_uart_bridge:
+ self.submodules.uart_bridge = LiteScopeUART2WB(platform.request("serial"), clk_freq, baudrate=115200)
+ self.add_wb_master(self.uart_bridge.wishbone)
+
+ # IRQs
+ self.submodules.irq_controller = InterruptController()
+ self.comb += self.irq_controller.source.connect(self.pcie_phy.interrupt)
+ self.interrupts = {
+ "dma_writer": self.dma.writer.table.irq,
+ "dma_reader": self.dma.reader.table.irq
+ }
+ for k, v in sorted(self.interrupts.items()):
+ self.comb += self.irq_controller.irqs[self.interrupt_map[k]].eq(v)
+
+default_subtarget = PCIeDMASoC
--- /dev/null
+#!/usr/bin/env python3
+import argparse
+import importlib
+
+
+def _get_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-b", "--bridge", default="uart", help="Bridge to use")
+ parser.add_argument("--port", default="2", help="UART port")
+ parser.add_argument("--baudrate", default=115200, help="UART baudrate")
+ parser.add_argument("--ip_address", default="192.168.0.42", help="Etherbone IP address")
+ parser.add_argument("--udp_port", default=20000, help="Etherbone UDP port")
+ parser.add_argument("--busword", default=32, help="CSR busword")
+
+ parser.add_argument("test", nargs="+", help="specify a test")
+
+ return parser.parse_args()
+
+if __name__ == "__main__":
+ args = _get_args()
+ if args.bridge == "uart":
+ from misoclib.tools.litescope.host.driver.uart import LiteScopeUARTDriver
+ port = args.port if not args.port.isdigit() else int(args.port)
+ wb = LiteScopeUARTDriver(port, args.baudrate, "./csr.csv", int(args.busword), debug=False)
+ elif args.bridge == "etherbone":
+ from misoclib.tools.litescope.host.driver.etherbone import LiteScopeEtherboneDriver
+ wb = LiteScopeEtherboneDriver(args.ip_address, int(args.udp_port), "./csr.csv", int(args.busword), debug=False)
+ else:
+ ValueError("Invalid bridge {}".format(args.bridge))
+
+ def _import(name):
+ return importlib.import_module(name)
+
+ for test in args.test:
+ t = _import(test)
+ t.main(wb)
--- /dev/null
+def main(wb):
+ wb.open()
+ regs = wb.regs
+ # # #
+ print("sysid : 0x{:04x}".format(regs.identifier_sysid.read()))
+ print("revision : 0x{:04x}".format(regs.identifier_revision.read()))
+ print("frequency : {}MHz".format(int(regs.identifier_frequency.read()/1000000)))
+ print("link up : {}".format(regs.pcie_phy_lnk_up.read()))
+ print("bus_master_enable : {}".format(regs.pcie_phy_bus_master_enable.read()))
+ print("msi_enable : {}".format(regs.pcie_phy_msi_enable.read()))
+ print("max_req_request_size : {}".format(regs.pcie_phy_max_request_size.read()))
+ print("max_payload_size : {}".format(regs.pcie_phy_max_payload_size.read()))
+ # # #
+ wb.close()
--- /dev/null
+from migen.fhdl.std import *
+from migen.genlib.fsm import FSM, NextState
+from migen.bus import wishbone
+
+from misoclib.com.litepcie.common import *
+
+
+class WishboneBridge(Module):
+ def __init__(self, endpoint, address_decoder):
+ self.wishbone = wishbone.Interface()
+
+ # # #
+
+ port = endpoint.crossbar.get_slave_port(address_decoder)
+ self.submodules.fsm = fsm = FSM()
+
+ fsm.act("IDLE",
+ If(port.sink.stb & port.sink.sop,
+ If(port.sink.we,
+ NextState("WRITE"),
+ ).Else(
+ NextState("READ")
+ )
+ ).Else(
+ port.sink.ack.eq(port.sink.stb)
+ )
+ )
+ fsm.act("WRITE",
+ self.wishbone.adr.eq(port.sink.adr[2:]),
+ self.wishbone.dat_w.eq(port.sink.dat[:32]),
+ self.wishbone.sel.eq(0xf),
+ self.wishbone.stb.eq(1),
+ self.wishbone.we.eq(1),
+ self.wishbone.cyc.eq(1),
+ If(self.wishbone.ack,
+ port.sink.ack.eq(1),
+ NextState("IDLE")
+ )
+ )
+ fsm.act("READ",
+ self.wishbone.adr.eq(port.sink.adr[2:]),
+ self.wishbone.stb.eq(1),
+ self.wishbone.we.eq(0),
+ self.wishbone.cyc.eq(1),
+ If(self.wishbone.ack,
+ NextState("COMPLETION")
+ )
+ )
+ self.sync += \
+ If(self.wishbone.stb & self.wishbone.ack,
+ port.source.dat.eq(self.wishbone.dat_r),
+ )
+ fsm.act("COMPLETION",
+ port.source.stb.eq(1),
+ port.source.sop.eq(1),
+ port.source.eop.eq(1),
+ port.source.len.eq(1),
+ port.source.err.eq(0),
+ port.source.tag.eq(port.sink.tag),
+ port.source.adr.eq(port.sink.adr),
+ port.source.cmp_id.eq(endpoint.phy.id),
+ port.source.req_id.eq(port.sink.req_id),
+ If(port.source.ack,
+ port.sink.ack.eq(1),
+ NextState("IDLE")
+ )
+ )
--- /dev/null
+from migen.fhdl.std import *
+from migen.bank.description import *
+from migen.actorlib.fifo import SyncFIFO as FIFO
+
+from misoclib.com.litepcie.common import *
+from misoclib.com.litepcie.frontend.dma.common import *
+from misoclib.com.litepcie.frontend.dma.writer import DMAWriter
+from misoclib.com.litepcie.frontend.dma.reader import DMAReader
+
+
+class DMALoopback(Module, AutoCSR):
+ def __init__(self, dw):
+ self._enable = CSRStorage()
+
+ self.sink = Sink(dma_layout(dw))
+ self.source = Source(dma_layout(dw))
+
+ self.next_source = Source(dma_layout(dw))
+ self.next_sink = Sink(dma_layout(dw))
+
+ # # #
+
+ enable = self._enable.storage
+ self.comb += \
+ If(enable,
+ Record.connect(self.sink, self.source)
+ ).Else(
+ Record.connect(self.sink, self.next_source),
+ Record.connect(self.next_sink, self.source)
+ )
+
+
+class DMASynchronizer(Module, AutoCSR):
+ def __init__(self, dw):
+ self._bypass = CSRStorage()
+ self._enable = CSRStorage()
+ self.ready = Signal(reset=1)
+ self.pps = Signal()
+
+ self.sink = Sink(dma_layout(dw))
+ self.source = Source(dma_layout(dw))
+
+ self.next_source = Source(dma_layout(dw))
+ self.next_sink = Sink(dma_layout(dw))
+
+ # # #
+
+ bypass = self._bypass.storage
+ enable = self._enable.storage
+ synced = Signal()
+
+ self.sync += \
+ If(~enable,
+ synced.eq(0)
+ ).Else(
+ If(self.ready & self.sink.stb & (self.pps | bypass),
+ synced.eq(1)
+ )
+ )
+
+ self.comb += \
+ If(synced,
+ Record.connect(self.sink, self.next_source),
+ Record.connect(self.next_sink, self.source),
+ ).Else(
+ # Block sink
+ self.next_source.stb.eq(0),
+ self.sink.ack.eq(0),
+
+ # Ack next_sink
+ self.source.stb.eq(0),
+ self.next_sink.ack.eq(1),
+ )
+
+
+class DMABuffering(Module, AutoCSR):
+ def __init__(self, dw, depth):
+ tx_fifo = FIFO(dma_layout(dw), depth//(dw//8), buffered=True)
+ rx_fifo = FIFO(dma_layout(dw), depth//(dw//8), buffered=True)
+ self.submodules += tx_fifo, rx_fifo
+
+ self.sink = tx_fifo.sink
+ self.source = rx_fifo.source
+
+ self.next_source = tx_fifo.source
+ self.next_sink = rx_fifo.sink
+
+
+class DMA(Module, AutoCSR):
+ def __init__(self, phy, endpoint,
+ with_buffering=False, buffering_depth=256*8,
+ with_loopback=False,
+ with_synchronizer=False):
+
+ # Writer, Reader
+ self.submodules.writer = DMAWriter(endpoint, endpoint.crossbar.get_master_port(write_only=True))
+ self.submodules.reader = DMAReader(endpoint, endpoint.crossbar.get_master_port(read_only=True))
+ self.sink, self.source = self.writer.sink, self.reader.source
+
+ # Loopback
+ if with_loopback:
+ self.submodules.loopback = DMALoopback(phy.dw)
+ self.insert_optional_module(self.loopback)
+
+ # Synchronizer
+ if with_synchronizer:
+ self.submodules.synchronizer = DMASynchronizer(phy.dw)
+ self.insert_optional_module(self.synchronizer)
+
+ # Buffering
+ if with_buffering:
+ self.submodules.buffering = DMABuffering(phy.dw, buffering_depth)
+ self.insert_optional_module(self.buffering)
+
+
+ def insert_optional_module(self, m):
+ self.comb += [
+ Record.connect(self.source, m.sink),
+ Record.connect(m.source, self.sink)
+ ]
+ self.sink, self.source = m.next_sink, m.next_source
--- /dev/null
+from migen.fhdl.std import *
+from migen.bank.description import *
+from migen.genlib.fifo import SyncFIFOBuffered as SyncFIFO
+from migen.genlib.fsm import FSM, NextState
+from migen.genlib.misc import chooser, displacer
+from migen.flow.plumbing import Buffer
+
+from misoclib.com.litepcie.common import *
+
+
+def descriptor_layout(with_user_id=False):
+ layout = [
+ ("address", 32),
+ ("length", 16)
+ ]
+ if with_user_id:
+ layout += [("user_id", 8)]
+ return EndpointDescription(layout, packetized=True)
+
+
+class DMARequestTable(Module, AutoCSR):
+ def __init__(self, depth):
+ self.source = source = Source(descriptor_layout())
+
+ aw = flen(source.address)
+ lw = flen(source.length)
+
+ self._value = CSRStorage(aw+lw)
+ self._we = CSR()
+ self._loop_prog_n = CSRStorage()
+ self._index = CSRStatus(log2_int(depth))
+ self._level = CSRStatus(log2_int(depth))
+ self._flush = CSR()
+
+ self.irq = Signal()
+
+ # # #
+
+ # CSR signals
+ value = self._value.storage
+ we = self._we.r & self._we.re
+ loop_prog_n = self._loop_prog_n.storage
+ index = self._index.status
+ level = self._level.status
+ flush = self._flush.r & self._flush.re
+
+ # FIFO
+ # instance
+ fifo_layout = [("address", aw), ("length", lw), ("start", 1)]
+ fifo = InsertReset(SyncFIFO(fifo_layout, depth))
+ self.submodules += fifo
+ self.comb += [
+ fifo.reset.eq(flush),
+ level.eq(fifo.level)
+ ]
+
+ # write part
+ self.sync += [
+ # in "loop" mode, each data output of the fifo is
+ # written back
+ If(loop_prog_n,
+ fifo.din.address.eq(fifo.dout.address),
+ fifo.din.length.eq(fifo.dout.length),
+ fifo.din.start.eq(fifo.dout.start),
+ fifo.we.eq(fifo.re)
+ # in "program" mode, fifo input is connected
+ # to registers
+ ).Else(
+ fifo.din.address.eq(value[:aw]),
+ fifo.din.length.eq(value[aw:aw+lw]),
+ fifo.din.start.eq(~fifo.readable),
+ fifo.we.eq(we)
+ )
+ ]
+
+ # read part
+ self.comb += [
+ source.stb.eq(fifo.readable),
+ fifo.re.eq(source.stb & source.ack),
+ source.address.eq(fifo.dout.address),
+ source.length.eq(fifo.dout.length)
+ ]
+
+ # index
+ # used by the software for synchronization in
+ # "loop" mode
+ self.sync += \
+ If(flush,
+ index.eq(0)
+ ).Elif(source.stb & source.ack,
+ If(fifo.dout.start,
+ index.eq(0)
+ ).Else(
+ index.eq(index+1)
+ )
+ )
+
+ # IRQ
+ self.comb += self.irq.eq(source.stb & source.ack)
+
+
+class DMARequestSplitter(Module, AutoCSR):
+ def __init__(self, max_size, buffered=True):
+ self.sink = sink = Sink(descriptor_layout())
+ if buffered:
+ self.submodules.buffer = Buffer(descriptor_layout(True))
+ source = self.buffer.d
+ self.source = self.buffer.q
+ else:
+ self.source = source = Source(descriptor_layout(True))
+
+ # # #
+
+ offset = Signal(32)
+ clr_offset = Signal()
+ inc_offset = Signal()
+ self.sync += \
+ If(clr_offset,
+ offset.eq(0)
+ ).Elif(inc_offset,
+ offset.eq(offset + max_size)
+ )
+ user_id = Signal(8)
+ self.sync += \
+ If(sink.stb & sink.ack,
+ user_id.eq(user_id+1)
+ )
+
+ fsm = FSM(reset_state="IDLE")
+ self.submodules += fsm
+
+ length = Signal(16)
+ update_length = Signal()
+ self.sync += If(update_length, length.eq(sink.length))
+
+ fsm.act("IDLE",
+ sink.ack.eq(1),
+ clr_offset.eq(1),
+ If(sink.stb,
+ update_length.eq(1),
+ sink.ack.eq(0),
+ NextState("RUN")
+ )
+ )
+ fsm.act("RUN",
+ source.stb.eq(1),
+ source.address.eq(sink.address + offset),
+ source.user_id.eq(user_id),
+ If((length - offset) > max_size,
+ source.length.eq(max_size),
+ inc_offset.eq(source.ack)
+ ).Else(
+ source.length.eq(length - offset),
+ If(source.ack,
+ NextState("ACK")
+ )
+ )
+ )
+ fsm.act("ACK",
+ sink.ack.eq(1),
+ NextState("IDLE")
+ )
--- /dev/null
+from migen.fhdl.std import *
+from migen.bank.description import *
+from migen.genlib.fsm import FSM, NextState
+from migen.actorlib.fifo import SyncFIFO as FIFO
+
+from misoclib.com.litepcie.common import *
+from misoclib.com.litepcie.core.packet.common import *
+from misoclib.com.litepcie.frontend.dma.common import *
+
+
+class DMAReader(Module, AutoCSR):
+ def __init__(self, endpoint, port, table_depth=256):
+ self.source = Source(dma_layout(endpoint.phy.dw))
+ self._enable = CSRStorage()
+
+ # # #
+
+ enable = self._enable.storage
+
+ max_words_per_request = max_request_size//(endpoint.phy.dw//8)
+ max_pending_words = endpoint.max_pending_requests*max_words_per_request
+
+ fifo_depth = 2*max_pending_words
+
+ # Request generation
+ # requests from table are splitted in chunks of "max_size"
+ self.table = table = DMARequestTable(table_depth)
+ splitter = InsertReset(DMARequestSplitter(endpoint.phy.max_request_size))
+ self.submodules += table, splitter
+ self.comb += splitter.reset.eq(~enable)
+ self.comb += table.source.connect(splitter.sink)
+
+ # Request FSM
+ self.submodules.fsm = fsm = FSM(reset_state="IDLE")
+
+ request_ready = Signal()
+ fsm.act("IDLE",
+ If(request_ready,
+ NextState("REQUEST"),
+ )
+ )
+ fsm.act("REQUEST",
+ port.source.stb.eq(1),
+ port.source.channel.eq(port.channel),
+ port.source.user_id.eq(splitter.source.user_id),
+ port.source.sop.eq(1),
+ port.source.eop.eq(1),
+ port.source.we.eq(0),
+ port.source.adr.eq(splitter.source.address),
+ port.source.len.eq(splitter.source.length[2:]),
+ port.source.req_id.eq(endpoint.phy.id),
+ port.source.dat.eq(0),
+ If(port.source.ack,
+ splitter.source.ack.eq(1),
+ NextState("IDLE"),
+ )
+ )
+
+ # Data FIFO
+ # issue read requests when enough space available in fifo
+ fifo = InsertReset(FIFO(dma_layout(endpoint.phy.dw), fifo_depth, buffered=True))
+ self.submodules += fifo
+ self.comb += fifo.reset.eq(~enable)
+
+ last_user_id = Signal(8, reset=255)
+ self.sync += \
+ If(port.sink.stb & port.sink.sop & port.sink.ack,
+ last_user_id.eq(port.sink.user_id)
+ )
+ self.comb += [
+ fifo.sink.stb.eq(port.sink.stb),
+ fifo.sink.sop.eq(port.sink.sop & (port.sink.user_id != last_user_id)),
+ fifo.sink.dat.eq(port.sink.dat),
+ port.sink.ack.eq(fifo.sink.ack | ~enable),
+ ]
+ self.comb += Record.connect(fifo.source, self.source)
+
+ fifo_ready = fifo.fifo.level < (fifo_depth//2)
+ self.comb += request_ready.eq(splitter.source.stb & fifo_ready)
--- /dev/null
+from migen.fhdl.std import *
+from migen.bank.description import *
+from migen.genlib.fifo import SyncFIFOBuffered as SyncFIFO
+from migen.genlib.fsm import FSM, NextState
+
+from misoclib.com.litepcie.common import *
+from misoclib.com.litepcie.core.packet.common import *
+from misoclib.com.litepcie.frontend.dma.common import *
+
+
+class DMAWriter(Module, AutoCSR):
+ def __init__(self, endpoint, port, table_depth=256):
+ self.sink = sink = Sink(dma_layout(endpoint.phy.dw))
+ self._enable = CSRStorage()
+
+ # # #
+
+ enable = self._enable.storage
+
+ max_words_per_request = max_request_size//(endpoint.phy.dw//8)
+ fifo_depth = 4*max_words_per_request
+
+ # Data FIFO
+ # store data until we have enough data to issue a
+ # write request
+ fifo = InsertReset(SyncFIFO(endpoint.phy.dw, fifo_depth))
+ self.submodules += fifo
+ self.comb += [
+ fifo.we.eq(sink.stb & enable),
+ sink.ack.eq(fifo.writable & sink.stb & enable),
+ fifo.din.eq(sink.dat),
+ fifo.reset.eq(~enable)
+ ]
+
+ # Request generation
+ # requests from table are splitted in chunks of "max_size"
+ self.table = table = DMARequestTable(table_depth)
+ splitter = InsertReset(DMARequestSplitter(endpoint.phy.max_payload_size))
+ self.submodules += table, splitter
+ self.comb += splitter.reset.eq(~enable)
+ self.comb += table.source.connect(splitter.sink)
+
+ # Request FSM
+ cnt = Signal(max=(2**flen(endpoint.phy.max_payload_size))/8)
+ clr_cnt = Signal()
+ inc_cnt = Signal()
+ self.sync += \
+ If(clr_cnt,
+ cnt.eq(0)
+ ).Elif(inc_cnt,
+ cnt.eq(cnt + 1)
+ )
+
+ self.submodules.fsm = fsm = FSM(reset_state="IDLE")
+
+ request_ready = Signal()
+ fsm.act("IDLE",
+ clr_cnt.eq(1),
+ If(request_ready,
+ NextState("REQUEST"),
+ )
+ )
+ fsm.act("REQUEST",
+ inc_cnt.eq(port.source.stb & port.source.ack),
+
+ port.source.stb.eq(1),
+ port.source.channel.eq(port.channel),
+ port.source.user_id.eq(splitter.source.user_id),
+ port.source.sop.eq(cnt == 0),
+ port.source.eop.eq(cnt == splitter.source.length[3:]-1),
+ port.source.we.eq(1),
+ port.source.adr.eq(splitter.source.address),
+ port.source.req_id.eq(endpoint.phy.id),
+ port.source.tag.eq(0),
+ port.source.len.eq(splitter.source.length[2:]),
+ port.source.dat.eq(fifo.dout),
+
+ If(port.source.ack,
+ fifo.re.eq(1),
+ If(port.source.eop,
+ splitter.source.ack.eq(1),
+ NextState("IDLE"),
+ )
+ )
+ )
+
+ fifo_ready = fifo.level >= splitter.source.length[3:]
+ self.sync += request_ready.eq(splitter.source.stb & fifo_ready)
--- /dev/null
+import os
+from migen.fhdl.std import *
+from migen.bank.description import *
+
+from misoclib.com.litepcie.common import *
+
+
+def get_gt(device):
+ if device[:4] == "xc7k":
+ return "GTX"
+ elif device[:4] == "xc7a":
+ return "GTP"
+ else:
+ raise ValueError("Unsupported device"+device)
+
+
+class S7PCIEPHY(Module, AutoCSR):
+ def __init__(self, platform, dw=64, link_width=2, bar0_size=1*MB):
+ pads = platform.request("pcie_x"+str(link_width))
+ device = platform.device
+ self.dw = dw
+ self.link_width = link_width
+
+ self.sink = Sink(phy_layout(dw))
+ self.source = Source(phy_layout(dw))
+ self.interrupt = Sink(interrupt_layout())
+
+ self.id = Signal(16)
+
+ self.tx_buf_av = Signal(8)
+ self.tx_terr_drop = Signal()
+ self.tx_cfg_req = Signal()
+ self.tx_cfg_gnt = Signal(reset=1)
+
+ self.rx_np_ok = Signal(reset=1)
+ self.rx_np_req = Signal(reset=1)
+
+ self.cfg_to_turnoff = Signal()
+
+ self._lnk_up = CSRStatus()
+ self._msi_enable = CSRStatus()
+ self._bus_master_enable = CSRStatus()
+ self._max_request_size = CSRStatus(16)
+ self._max_payload_size = CSRStatus(16)
+ self.max_request_size = self._max_request_size.status
+ self.max_payload_size = self._max_payload_size.status
+
+ self.bar0_size = bar0_size
+ self.bar0_mask = get_bar_mask(bar0_size)
+
+ # SHARED clock
+ # In case we want to use the second QPLL of the quad
+ self.shared_qpll_pd = Signal(reset=1)
+ self.shared_qpll_rst = Signal(reset=1)
+ self.shared_qpll_refclk = Signal()
+ self.shared_qpll_outclk = Signal()
+ self.shared_qpll_outrefclk = Signal()
+ self.shared_qpll_lock = Signal()
+
+ # # #
+
+ clk100 = Signal()
+ self.specials += Instance("IBUFDS_GTE2",
+ i_CEB=0,
+ i_I=pads.clk_p,
+ i_IB=pads.clk_n,
+ o_O=clk100,
+ o_ODIV2=Signal()
+ )
+
+ bus_number = Signal(8)
+ device_number = Signal(5)
+ function_number = Signal(3)
+ command = Signal(16)
+ dcommand = Signal(16)
+
+ self.specials += Instance("pcie_phy",
+ p_C_DATA_WIDTH=dw,
+ p_C_PCIE_GT_DEVICE=get_gt(device),
+ p_C_BAR0=get_bar_mask(self.bar0_size),
+
+ i_sys_clk=clk100,
+ i_sys_rst_n=pads.rst_n,
+
+ o_pci_exp_txp=pads.tx_p,
+ o_pci_exp_txn=pads.tx_n,
+
+ i_pci_exp_rxp=pads.rx_p,
+ i_pci_exp_rxn=pads.rx_n,
+
+ o_user_clk=ClockSignal("clk125"),
+ o_user_reset=ResetSignal("clk125"),
+ o_user_lnk_up=self._lnk_up.status,
+
+ o_tx_buf_av=self.tx_buf_av,
+ o_tx_terr_drop=self.tx_terr_drop,
+ o_tx_cfg_req=self.tx_cfg_req,
+ i_tx_cfg_gnt=self.tx_cfg_gnt,
+
+ i_s_axis_tx_tvalid=self.sink.stb,
+ i_s_axis_tx_tlast=self.sink.eop,
+ o_s_axis_tx_tready=self.sink.ack,
+ i_s_axis_tx_tdata=self.sink.dat,
+ i_s_axis_tx_tkeep=self.sink.be,
+ i_s_axis_tx_tuser=0,
+
+ i_rx_np_ok=self.rx_np_ok,
+ i_rx_np_req=self.rx_np_req,
+
+ o_m_axis_rx_tvalid=self.source.stb,
+ o_m_axis_rx_tlast=self.source.eop,
+ i_m_axis_rx_tready=self.source.ack,
+ o_m_axis_rx_tdata=self.source.dat,
+ o_m_axis_rx_tkeep=self.source.be,
+ o_m_axis_rx_tuser=Signal(4),
+
+ o_cfg_to_turnoff=self.cfg_to_turnoff,
+ o_cfg_bus_number=bus_number,
+ o_cfg_device_number=device_number,
+ o_cfg_function_number=function_number,
+ o_cfg_command=command,
+ o_cfg_dcommand=dcommand,
+ o_cfg_interrupt_msienable=self._msi_enable.status,
+
+ i_cfg_interrupt=self.interrupt.stb,
+ o_cfg_interrupt_rdy=self.interrupt.ack,
+ i_cfg_interrupt_di=self.interrupt.dat,
+
+ i_SHARED_QPLL_PD=self.shared_qpll_pd,
+ i_SHARED_QPLL_RST=self.shared_qpll_rst,
+ i_SHARED_QPLL_REFCLK=self.shared_qpll_refclk,
+ o_SHARED_QPLL_OUTCLK=self.shared_qpll_outclk,
+ o_SHARED_QPLL_OUTREFCLK=self.shared_qpll_outrefclk,
+ o_SHARED_QPLL_LOCK=self.shared_qpll_lock,
+ )
+
+ # id
+ self.comb += self.id.eq(Cat(function_number, device_number, bus_number))
+
+ # config
+ def convert_size(command, size):
+ cases = {}
+ value = 128
+ for i in range(6):
+ cases[i] = size.eq(value)
+ value = value*2
+ return Case(command, cases)
+
+ self.sync += [
+ self._bus_master_enable.status.eq(command[2]),
+ convert_size(dcommand[12:15], self.max_request_size),
+ convert_size(dcommand[5:8], self.max_payload_size)
+ ]
+
+ extcores_path = "extcores"
+ # XXX find a better way to do this?
+ current_path = os.getcwd()
+ current_path = current_path.replace("\\", "/")
+ if "litepcie/example_designs" in current_path:
+ extcores_path = os.path.join("..", "..", "..", "..", extcores_path)
+ platform.add_source_dir(os.path.join(extcores_path, "litepcie_phy_wrappers", "xilinx", "7-series", "common"))
+ if device[:4] == "xc7k":
+ platform.add_source_dir(os.path.join(extcores_path, "litepcie_phy_wrappers", "xilinx", "7-series", "kintex7"))
+ elif device[:4] == "xc7a":
+ platform.add_source_dir(os.path.join(extcores_path, "litepcie_phy_wrappers", "xilinx", "7-series", "artix7"))
--- /dev/null
+MSCDIR = ../../../../
+PYTHON = python3
+
+CMD = PYTHONPATH=$(MSCDIR) $(PYTHON)
+
+wishbone_tb:
+ $(CMD) wishbone_tb.py
+
+dma_tb:
+ $(CMD) dma_tb.py
--- /dev/null
+import random
+
+
+def print_with_prefix(s, prefix=""):
+ if not isinstance(s, str):
+ s = s.__repr__()
+ s = s.split("\n")
+ for l in s:
+ print(prefix + l)
+
+
+def seed_to_data(seed, random=True):
+ if random:
+ return (seed * 0x31415979 + 1) & 0xffffffff
+ else:
+ return seed
+
+
+def check(ref, res):
+ if isinstance(ref, int):
+ return 0, 1, int(ref != res)
+ else:
+ shift = 0
+ while((ref[0] != res[0]) and (len(res) > 1)):
+ res.pop(0)
+ shift += 1
+ length = min(len(ref), len(res))
+ errors = 0
+ for i in range(length):
+ if ref.pop(0) != res.pop(0):
+ errors += 1
+ return shift, length, errors
+
+
+def randn(max_n):
+ return random.randint(0, max_n-1)
--- /dev/null
+import random
+from migen.fhdl.std import *
+from migen.sim.generic import run_simulation
+from migen.actorlib.structuring import Converter
+
+from misoclib.com.litepcie.common import *
+from misoclib.com.litepcie.core import Endpoint
+from misoclib.com.litepcie.core.irq import interrupt_controller
+from misoclib.com.litepcie.frontend.dma import writer, reader
+
+from misoclib.com.litepcie.test.common import *
+from misoclib.com.litepcie.test.model.host import *
+
+DMA_READER_IRQ = 1
+DMA_WRITER_IRQ = 2
+
+root_id = 0x100
+endpoint_id = 0x400
+max_length = Signal(8, reset=128)
+dma_size = 1024
+
+
+class DMADriver():
+ def __init__(self, dma, selfp):
+ self.dma = dma
+ self.selfp = selfp
+
+ def set_prog_mode(self):
+ dma = getattr(self.selfp, self.dma)
+ dma.table._loop_prog_n.storage = 0
+ yield
+
+ def set_loop_mode(self):
+ dma = getattr(self.selfp, self.dma)
+ dma.table._loop_prog_n.storage = 1
+ yield
+
+ def flush(self):
+ dma = getattr(self.selfp, self.dma)
+ dma.table._flush.re = 1
+ yield
+ dma.table._flush.re = 0
+ yield
+
+ def program_descriptor(self, address, length):
+ value = address
+ value |= (length << 32)
+
+ dma = getattr(self.selfp, self.dma)
+
+ dma.table._value.storage = value
+ dma.table._we.r = 1
+ dma.table._we.re = 1
+ yield
+ dma.table._we.re = 0
+ yield
+
+ def enable(self):
+ dma = getattr(self.selfp, self.dma)
+ dma._enable.storage = 1
+ yield
+
+ def disable(self):
+ dma = getattr(self.selfp, self.dma)
+ dma._enable.storage = 0
+ yield
+
+
+class InterruptHandler(Module):
+ def __init__(self, debug=False):
+ self.debug = debug
+ self.sink = Sink(interrupt_layout())
+ self.dma_writer_irq = 0
+
+ def set_tb_selfp(self, tb_selfp):
+ self.tb_selfp = tb_selfp
+
+ def do_simulation(self, selfp):
+ tb_selfp = self.tb_selfp
+ tb_selfp.irq_controller._clear.r = 0
+ tb_selfp.irq_controller._clear.re = 0
+ selfp.sink.ack = 1
+ self.dma_writer_irq = 0
+ if selfp.sink.stb and (selfp.simulator.cycle_counter%4 == 0):
+ # get vector
+ irq_vector = tb_selfp.irq_controller._vector.status
+
+ # handle irq
+ if irq_vector & DMA_READER_IRQ:
+ if self.debug:
+ print("DMA_READER IRQ : {}".format(tb_selfp.dma_reader.table._index.status))
+ # clear irq_controller
+ tb_selfp.irq_controller._clear.re = 1
+ tb_selfp.irq_controller._clear.r |= DMA_READER_IRQ
+
+ if irq_vector & DMA_WRITER_IRQ:
+ if self.debug:
+ print("DMA_WRITER IRQ : {}".format(tb_selfp.dma_writer.table._index.status))
+ # clear irq_controller
+ tb_selfp.irq_controller._clear.re = 1
+ tb_selfp.irq_controller._clear.r |= DMA_WRITER_IRQ
+ self.dma_writer_irq = 1
+
+
+test_size = 16*1024
+
+
+class TB(Module):
+ def __init__(self, with_converter=False):
+ self.submodules.host = Host(64, root_id, endpoint_id,
+ phy_debug=False,
+ chipset_debug=False, chipset_split=True, chipset_reordering=True,
+ host_debug=True)
+ self.submodules.endpoint = Endpoint(self.host.phy, max_pending_requests=8, with_reordering=True)
+ self.submodules.dma_reader = reader.DMAReader(self.endpoint, self.endpoint.crossbar.get_master_port(read_only=True))
+ self.submodules.dma_writer = writer.DMAWriter(self.endpoint, self.endpoint.crossbar.get_master_port(write_only=True))
+
+ if with_converter:
+ self.submodules.up_converter = Converter(dma_layout(16), dma_layout(64))
+ self.submodules.down_converter = Converter(dma_layout(64), dma_layout(16))
+
+ self.comb += [
+ self.dma_reader.source.connect(self.down_converter.sink),
+ self.down_converter.source.connect(self.up_converter.sink),
+ self.up_converter.source.connect(self.dma_writer.sink)
+ ]
+ else:
+ self.comb += self.dma_reader.source.connect(self.dma_writer.sink)
+
+ self.submodules.irq_controller = interrupt_controller.InterruptController(2)
+ self.comb += [
+ self.irq_controller.irqs[log2_int(DMA_READER_IRQ)].eq(self.dma_reader.table.irq),
+ self.irq_controller.irqs[log2_int(DMA_WRITER_IRQ)].eq(self.dma_writer.table.irq)
+ ]
+ self.submodules.irq_handler = InterruptHandler()
+ self.comb += self.irq_controller.source.connect(self.irq_handler.sink)
+
+ def gen_simulation(self, selfp):
+ self.host.malloc(0x00000000, test_size*2)
+ self.host.chipset.enable()
+ host_datas = [seed_to_data(i, True) for i in range(test_size//4)]
+ self.host.write_mem(0x00000000, host_datas)
+
+ dma_reader_driver = DMADriver("dma_reader", selfp)
+ dma_writer_driver = DMADriver("dma_writer", selfp)
+
+ self.irq_handler.set_tb_selfp(selfp)
+
+ yield from dma_reader_driver.set_prog_mode()
+ yield from dma_reader_driver.flush()
+ for i in range(8):
+ yield from dma_reader_driver.program_descriptor((test_size//8)*i, test_size//8)
+
+ yield from dma_writer_driver.set_prog_mode()
+ yield from dma_writer_driver.flush()
+ for i in range(8):
+ yield from dma_writer_driver.program_descriptor(test_size + (test_size//8)*i, test_size//8)
+
+ selfp.irq_controller._enable.storage = DMA_READER_IRQ | DMA_WRITER_IRQ
+
+ yield from dma_reader_driver.enable()
+ yield from dma_writer_driver.enable()
+
+ i = 0
+ while i != 8:
+ i += self.irq_handler.dma_writer_irq
+ yield
+
+ for i in range(100):
+ yield
+ loopback_datas = self.host.read_mem(test_size, test_size)
+
+ s, l, e = check(host_datas, loopback_datas)
+ print("shift " + str(s) + " / length " + str(l) + " / errors " + str(e))
+
+if __name__ == "__main__":
+ run_simulation(TB(with_converter=False), ncycles=4000, vcd_name="my.vcd", keep_files=True)
--- /dev/null
+from misoclib.com.litepcie.common import *
+from misoclib.com.litepcie.core.packet.common import *
+from misoclib.com.litepcie.test.common import *
+from misoclib.com.litepcie.test.model.tlp import *
+
+
+def print_chipset(s):
+ print_with_prefix(s, "[CHIPSET] ")
+
+
+def find_cmp_tags(queue):
+ tags = []
+ for tag, dwords in queue:
+ if tag not in tags:
+ tags.append(tag)
+ return tags
+
+
+def find_first_cmp_msg(queue, msg_tag):
+ for i, (tag, dwords) in enumerate(queue):
+ if tag == msg_tag:
+ return i
+
+
+# Chipset model
+class Chipset(Module):
+ def __init__(self, phy, root_id, debug=False, with_reordering=False):
+ self.phy = phy
+ self.root_id = root_id
+ self.debug = debug
+ self.with_reordering = with_reordering
+ ###
+ self.rd32_data = []
+ self.cmp_queue = []
+ self.en = False
+
+ def set_host_callback(self, callback):
+ self.host_callback = callback
+
+ def enable(self):
+ self.en = True
+
+ def disable(self):
+ self.en = False
+
+ def wr32(self, adr, data):
+ wr32 = WR32()
+ wr32.fmt = 0b10
+ wr32.type = 0b00000
+ wr32.length = len(data)
+ wr32.first_be = 0xf
+ wr32.address = adr
+ wr32.requester_id = self.root_id
+ dwords = wr32.encode_dwords(data)
+ if self.debug:
+ print_chipset(">>>>>>>>")
+ print_chipset(parse_dwords(dwords))
+ yield from self.phy.send_blocking(dwords)
+
+ def rd32(self, adr, length=1):
+ rd32 = RD32()
+ rd32.fmt = 0b00
+ rd32.type = 0b00000
+ rd32.length = length
+ rd32.first_be = 0xf
+ rd32.address = adr
+ rd32.requester_id = self.root_id
+ dwords = rd32.encode_dwords()
+ if self.debug:
+ print_chipset(">>>>>>>>")
+ print_chipset(parse_dwords(dwords))
+ yield from self.phy.send_blocking(dwords)
+ dwords = None
+ while dwords is None:
+ dwords = self.phy.receive()
+ yield
+ cpld = CPLD(dwords)
+ self.rd32_data = cpld.data
+ if self.debug:
+ print_chipset("<<<<<<<<")
+ print_chipset(cpld)
+
+ def cmp(self, req_id, data, byte_count=None, lower_address=0, tag=0, with_split=False):
+ if with_split:
+ d = random.choice([64, 128, 256])
+ n = byte_count//d
+ if n == 0:
+ self.cmp(req_id, data, byte_count=byte_count, tag=tag)
+ else:
+ for i in range(n):
+ cmp_data = data[i*byte_count//(4*n):(i+1)*byte_count//(4*n)]
+ self.cmp(req_id, cmp_data, byte_count=byte_count-i*byte_count//n, tag=tag)
+ else:
+ if len(data) == 0:
+ fmt = 0b00
+ cpl = CPL()
+ else:
+ fmt = 0b10
+ cpl = CPLD()
+ cpl.fmt = fmt
+ cpl.type = 0b01010
+ cpl.length = len(data)
+ cpl.lower_address = lower_address
+ cpl.requester_id = req_id
+ cpl.completer_id = self.root_id
+ if byte_count is None:
+ cpl.byte_count = len(data)*4
+ else:
+ cpl.byte_count = byte_count
+ cpl.tag = tag
+ if len(data) == 0:
+ dwords = cpl.encode_dwords()
+ else:
+ dwords = cpl.encode_dwords(data)
+ self.cmp_queue.append((tag, dwords))
+
+ def cmp_callback(self):
+ if len(self.cmp_queue):
+ if self.with_reordering:
+ tags = find_cmp_tags(self.cmp_queue)
+ tag = random.choice(tags)
+ n = find_first_cmp_msg(self.cmp_queue, tag)
+ tag, dwords = self.cmp_queue.pop(n)
+ else:
+ tag, dwords = self.cmp_queue.pop(0)
+ if self.debug:
+ print_chipset(">>>>>>>>")
+ print_chipset(parse_dwords(dwords))
+ self.phy.send(dwords)
+
+ def gen_simulation(self, selfp):
+ while True:
+ if self.en:
+ dwords = self.phy.receive()
+ if dwords is not None:
+ msg = parse_dwords(dwords)
+ if self.debug:
+ print_chipset(" <<<<<<<< (Callback)")
+ print_chipset(msg)
+ self.host_callback(msg)
+ self.cmp_callback()
+ yield
--- /dev/null
+from misoclib.com.litepcie.common import *
+from misoclib.com.litepcie.core.packet.common import *
+from misoclib.com.litepcie.test.common import *
+from misoclib.com.litepcie.test.model.phy import PHY
+from misoclib.com.litepcie.test.model.tlp import *
+from misoclib.com.litepcie.test.model.chipset import Chipset
+
+
+def print_host(s):
+ print_with_prefix(s, "[HOST] ")
+
+
+# Host model
+class Host(Module):
+ def __init__(self, dw, root_id, endpoint_id, bar0_size=1*MB,
+ phy_debug=False,
+ chipset_debug=False, chipset_split=False, chipset_reordering=False,
+ host_debug=False):
+ self.debug = host_debug
+ self.chipset_split = chipset_split
+ ###
+ self.submodules.phy = PHY(dw, endpoint_id, bar0_size, phy_debug)
+ self.submodules.chipset = Chipset(self.phy, root_id, chipset_debug, chipset_reordering)
+ self.chipset.set_host_callback(self.callback)
+
+ self.rd32_queue = []
+
+ def malloc(self, base, length):
+ self.base = base
+ self.buffer = [0]*(length//4)
+
+ def write_mem(self, adr, data):
+ if self.debug:
+ print_host("Writing {} bytes at 0x{:08x}".format(len(data)*4, adr))
+ current_adr = (adr-self.base)//4
+ for i in range(len(data)):
+ self.buffer[current_adr+i] = data[i]
+
+ def read_mem(self, adr, length=1):
+ if self.debug:
+ print_host("Reading {} bytes at 0x{:08x}".format(length, adr))
+ current_adr = (adr-self.base)//4
+ data = []
+ for i in range(length//4):
+ data.append(self.buffer[current_adr+i])
+ return data
+
+ def callback(self, msg):
+ if isinstance(msg, WR32):
+ address = msg.address*4
+ self.write_mem(address, msg.data)
+ elif isinstance(msg, RD32):
+ self.rd32_queue.append(msg)
+
+ def gen_simulation(self, selfp):
+ while True:
+ if len(self.rd32_queue):
+ msg = self.rd32_queue.pop(0)
+ address = msg.address*4
+ length = msg.length*4
+ data = self.read_mem(address, length)
+ self.chipset.cmp(msg.requester_id, data, byte_count=length, tag=msg.tag, with_split=self.chipset_split)
+ else:
+ yield
--- /dev/null
+import math
+from misoclib.com.litepcie.common import *
+from misoclib.com.litepcie.core.packet.common import *
+from misoclib.com.litepcie.test.common import *
+
+
+def print_chipset(s):
+ print_with_prefix(s, "[PHY] ")
+
+
+# PHY Layer model
+class PHYPacket():
+ def __init__(self, dat=[], be=[]):
+ self.dat = dat
+ self.be = be
+ self.start = 1
+ self.done = 0
+
+
+class PHYSource(Module):
+ def __init__(self, dw):
+ self.source = Source(phy_layout(dw))
+ ###
+ self.packets = []
+ self.packet = PHYPacket()
+ self.packet.done = 1
+
+ def send(self, packet):
+ self.packets.append(packet)
+
+ def send_blocking(self, packet):
+ self.send(packet)
+ while packet.done == 0:
+ yield
+
+ def do_simulation(self, selfp):
+ if len(self.packets) and self.packet.done:
+ self.packet = self.packets.pop(0)
+ if self.packet.start and not self.packet.done:
+ selfp.source.stb = 1
+ selfp.source.sop = 1
+ selfp.source.dat = self.packet.dat.pop(0)
+ selfp.source.be = self.packet.be.pop(0)
+ self.packet.start = 0
+ elif selfp.source.stb == 1 and selfp.source.ack == 1:
+ selfp.source.sop = 0
+ selfp.source.eop = (len(self.packet.dat) == 1)
+ if len(self.packet.dat) > 0:
+ selfp.source.stb = 1
+ selfp.source.dat = self.packet.dat.pop(0)
+ selfp.source.be = self.packet.be.pop(0)
+ else:
+ self.packet.done = 1
+ selfp.source.stb = 0
+
+
+class PHYSink(Module):
+ def __init__(self, dw):
+ self.sink = Sink(phy_layout(dw))
+ ###
+ self.packet = PHYPacket()
+
+ def receive(self):
+ self.packet.done = 0
+ while self.packet.done == 0:
+ yield
+
+ def do_simulation(self, selfp):
+ self.packet.done = 0
+ selfp.sink.ack = 1
+ if selfp.sink.stb == 1 and selfp.sink.sop == 1:
+ self.packet.start = 1
+ self.packet.dat = [selfp.sink.dat]
+ self.packet.be = [selfp.sink.be]
+ elif selfp.sink.stb:
+ self.packet.start = 0
+ self.packet.dat.append(selfp.sink.dat)
+ self.packet.be.append(selfp.sink.be)
+ if (selfp.sink.stb == 1 and selfp.sink.eop == 1):
+ self.packet.done = 1
+
+
+class PHY(Module):
+ def __init__(self, dw, id, bar0_size, debug):
+ self.dw = dw
+
+ self.id = id
+
+ self.bar0_size = bar0_size
+ self.bar0_mask = get_bar_mask(bar0_size)
+
+ self.max_request_size = 512
+ self.max_payload_size = 128
+
+ self.submodules.phy_source = PHYSource(dw)
+ self.submodules.phy_sink = PHYSink(dw)
+
+ self.source = self.phy_source.source
+ self.sink = self.phy_sink.sink
+
+ def dwords2packet(self, dwords):
+ ratio = self.dw//32
+ length = math.ceil(len(dwords)/ratio)
+ dat = [0]*length
+ be = [0]*length
+ for n in range(length):
+ for i in reversed(range(ratio)):
+ dat[n] = dat[n] << 32
+ be[n] = be[n] << 4
+ try:
+ dat[n] |= dwords[2*n+i]
+ be[n] |= 0xF
+ except:
+ pass
+ return dat, be
+
+ def send(self, dwords):
+ dat, be = self.dwords2packet(dwords)
+ packet = PHYPacket(dat, be)
+ self.phy_source.send(packet)
+
+ def send_blocking(self, dwords):
+ dat, be = self.dwords2packet(dwords)
+ packet = PHYPacket(dat, be)
+ yield from self.phy_source.send_blocking(packet)
+
+ def packet2dwords(self, p_dat, p_be):
+ ratio = self.dw//32
+ dwords = []
+ for dat, be in zip(p_dat, p_be):
+ for i in range(ratio):
+ dword_be = (be >> (4*i)) & 0xf
+ dword_dat = (dat >> (32*i)) & 0xffffffff
+ if dword_be == 0xf:
+ dwords.append(dword_dat)
+ return dwords
+
+ def receive(self):
+ if self.phy_sink.packet.done:
+ self.phy_sink.packet.done = 0
+ return self.packet2dwords(self.phy_sink.packet.dat, self.phy_sink.packet.be)
+ else:
+ return None
+
--- /dev/null
+from misoclib.com.litepcie.common import *
+from misoclib.com.litepcie.core.packet.common import *
+
+
+# TLP Layer model
+def get_field_data(field, dwords):
+ return (dwords[field.word] >> field.offset) & (2**field.width-1)
+
+tlp_headers_dict = {
+ "RD32": tlp_request_header,
+ "WR32": tlp_request_header,
+ "CPLD": tlp_completion_header,
+ "CPL": tlp_completion_header
+}
+
+
+class TLP():
+ def __init__(self, name, dwords=[0, 0, 0]):
+ self.name = name
+ self.header = dwords[:3]
+ self.data = dwords[3:]
+ self.dwords = self.header + self.data
+ self.decode_dwords()
+
+ def decode_dwords(self):
+ for k, v in tlp_headers_dict[self.name].items():
+ setattr(self, k, get_field_data(v, self.header))
+
+ def encode_dwords(self, data=[]):
+ self.header = [0, 0, 0]
+ for k, v in tlp_headers_dict[self.name].items():
+ field = tlp_headers_dict[self.name][k]
+ self.header[field.word] |= (getattr(self, k) << field.offset)
+ self.data = data
+ self.dwords = self.header + self.data
+ return self.dwords
+
+ def __repr__(self):
+ r = self.name + "\n"
+ r += "--------\n"
+ for k in sorted(tlp_headers_dict[self.name].keys()):
+ r += k + " : 0x{:x}".format(getattr(self, k) + "\n")
+ if len(self.data) != 0:
+ r += "data:\n"
+ for d in self.data:
+ r += "{:08x}\n".format(d)
+ return r
+
+
+class RD32(TLP):
+ def __init__(self, dwords=[0, 0, 0]):
+ TLP.__init__(self, "RD32", dwords)
+
+
+class WR32(TLP):
+ def __init__(self, dwords=[0, 0, 0]):
+ TLP.__init__(self, "WR32", dwords)
+
+
+class CPLD(TLP):
+ def __init__(self, dwords=[0, 0, 0]):
+ TLP.__init__(self, "CPLD", dwords)
+
+
+class CPL():
+ def __init__(self, dwords=[0, 0, 0]):
+ TLP.__init__(self, "CPL", dwords)
+
+
+class Unknown():
+ def __repr__(self):
+ r = "UNKNOWN\n"
+ return r
+
+fmt_type_dict = {
+ fmt_type_dict["mem_rd32"]: (RD32, 3),
+ fmt_type_dict["mem_wr32"]: (WR32, 4),
+ fmt_type_dict["cpld"]: (CPLD, 4),
+ fmt_type_dict["cpl"]: (CPL, 3)
+}
+
+
+def parse_dwords(dwords):
+ f = get_field_data(tlp_common_header["fmt"], dwords)
+ t = get_field_data(tlp_common_header["type"], dwords)
+ fmt_type = (f << 5) | t
+ try:
+ tlp, min_len = fmt_type_dict[fmt_type]
+ if len(dwords) >= min_len:
+ return tlp(dwords)
+ else:
+ return Unknown()
+ except:
+ return Unknown()
--- /dev/null
+from migen.fhdl.std import *
+from migen.bus import wishbone
+from migen.sim.generic import run_simulation
+
+from misoclib.com.litepcie.core import Endpoint
+from misoclib.com.litepcie.frontend.bridge.wishbone import WishboneBridge
+
+from misoclib.com.litepcie.test.common import *
+from misoclib.com.litepcie.test.model.host import *
+
+root_id = 0x100
+endpoint_id = 0x400
+
+
+class TB(Module):
+ def __init__(self):
+ self.submodules.host = Host(64, root_id, endpoint_id,
+ phy_debug=False,
+ chipset_debug=False,
+ host_debug=False)
+ self.submodules.endpoint = Endpoint(self.host.phy)
+
+ self.submodules.wishbone_bridge = WishboneBridge(self.endpoint, lambda a: 1)
+ self.submodules.sram = wishbone.SRAM(1024, bus=self.wishbone_bridge.wishbone)
+
+ def gen_simulation(self, selfp):
+ wr_datas = [seed_to_data(i, True) for i in range(64)]
+ for i in range(64):
+ yield from self.host.chipset.wr32(i, [wr_datas[i]])
+
+ rd_datas = []
+ for i in range(64):
+ yield from self.host.chipset.rd32(i)
+ rd_datas.append(self.host.chipset.rd32_data[0])
+
+ s, l, e = check(wr_datas, rd_datas)
+ print("shift " + str(s) + " / length " + str(l) + " / errors " + str(e))
+
+if __name__ == "__main__":
+ run_simulation(TB(), ncycles=1000, vcd_name="my.vcd", keep_files=True)