From: Raptor Engineering Development Team Date: Tue, 1 Mar 2022 21:20:59 +0000 (-0600) Subject: [WIP] Add initial version of Aquila LPC slave core X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=37fc2da1fb436ee306ccdad30a68310c07f19f15;p=microwatt.git [WIP] Add initial version of Aquila LPC slave core Significant modifications needed to enable DMA on 64-bit Microwatt system. Waiting on hardware to test, updates may be required after test results available. --- diff --git a/LICENSE.aquila b/LICENSE.aquila new file mode 100644 index 0000000..92284d3 --- /dev/null +++ b/LICENSE.aquila @@ -0,0 +1,3 @@ +The Aquila core is licensed under the terms of the GPU GPL v3 +The included third party asynchronous FIFO is licensed under the terms of the Apache License 2.0 + diff --git a/Makefile b/Makefile index 4623300..5cc2a05 100644 --- a/Makefile +++ b/Makefile @@ -178,7 +178,7 @@ CLK_INPUT=125000000 CLK_FREQUENCY=48000000 LPF=constraints/arctic-tern.lpf PACKAGE=CABGA381 -NEXTPNR_FLAGS=--um5g-85k --freq 48 +NEXTPNR_FLAGS=--um5g-85k --freq 48 --ignore-loops OPENOCD_JTAG_CONFIG=openocd/olimex-arm-usb-tiny-h.cfg OPENOCD_DEVICE_CONFIG=openocd/LFE5UM5G-85F.cfg toplevel=fpga/top-rcs-arctic-tern-bmc-card.vhdl @@ -188,6 +188,19 @@ soc_extra_v += litedram/generated/rcs-arctic-tern-bmc-card/litedram_core.v soc_extra_v += liteeth/generated/rcs-arctic-tern-bmc-card/liteeth_core.v soc_extra_v += tercel/phy.v soc_extra_v += tercel/wishbone_spi_master.v +soc_extra_v += aquila/io_blocks.v +soc_extra_v += aquila/lpc_slave.v +soc_extra_v += aquila/wishbone_lpc_slave_interface.v +soc_extra_v += aquila/third_party/async_fifo/async_bidir_fifo.v +soc_extra_v += aquila/third_party/async_fifo/async_bidir_ramif_fifo.v +soc_extra_v += aquila/third_party/async_fifo/async_fifo.v +soc_extra_v += aquila/third_party/async_fifo/fifo_2mem.v +soc_extra_v += aquila/third_party/async_fifo/fifomem_dp.v +soc_extra_v += aquila/third_party/async_fifo/rptr_empty.v +soc_extra_v += aquila/third_party/async_fifo/sync_ptr.v +soc_extra_v += aquila/third_party/async_fifo/sync_r2w.v +soc_extra_v += aquila/third_party/async_fifo/sync_w2r.v +soc_extra_v += aquila/third_party/async_fifo/wptr_full.v endif GHDL_IMAGE_GENERICS=-gMEMORY_SIZE=$(MEMORY_SIZE) -gRAM_INIT_FILE=$(RAM_INIT_FILE) \ diff --git a/README.aquila.md b/README.aquila.md new file mode 100644 index 0000000..7aa5b6a --- /dev/null +++ b/README.aquila.md @@ -0,0 +1,345 @@ +# ARCHITECTURE + +Aquila is a Wishbone-compatible, 32-bit, LPC slave device with 64-bit DMA support. + +Aquila provides two interfaces to the system: +1. A 32-bit Wishbone slave interface with IRQ support. All functions are supported on this interface in a CPU-interactive mode. +2. A 64-bit Wishbone master (DMA) interface, providing high speed data access and configurable DMA access protection ranges. + +# USAGE + +## General Usage + +??? + +# REGISTER MAP + +## [0x00 - 0x07] Device ID + + Device make/model unique identifier for PnP functionality + Fixed value: 0x7c5250545350494d + +## [0x08 - 0x0b] Device version + + Device revision (stepping) + + | Bits | Description | + |-------|---------------| + | 31:16 | Major version | + | 15:8 | Minor version | + | 7:0 | Patch level | + +## [0x0c - 0x0f] System clock frequency + + Can be used to set divisor to meet specific SPI Flash clock frequency requirements + +## [0x10 - 0x13] Control register 1 + + Default: 0x00000000 + + Definitions: + - CIRQ: Interrupt request as wired to Wishbone-attached internal CPU + - HIRQ: Interrupt request as wired to external host platform over LPC serial IRQ line + + | Bits | Description | + |-------|---------------------------------------------------------------------------------------------------| + | 31:20 | Reserved | + | 19 | Fire CIRQ on LPC I/O cycle access | + | 18 | Fire CIRQ on LPC TPM cycle access | + | 17 | Fire CIRQ on LPC firmware cycle access | + | 16 | Enable BMC BT interface CIRQ | + | 15:8 | IPMI BT I/O port address | + | 7 | Use alternate IPMI BT HIRQ (IRQ #11) instead of standard IPMI BT HIRQ (IRQ #10) | + | 6 | Enable IPMI BT host interface | + | 5 | Enable VUART2 host interface | + | 4 | Enable VUART1 host interface | + | 3 | Allow LPC I/O cycles from host | + | 2 | Allow LPC TPM cycles from host | + | 1 | Allow LPC firmware cycles from host | + | 0 | Global CIRQ enable, 0 disables all CIRQs, 1 allows any enabled CIRQs to assert main LPC core CIRQ | + +## [0x14 - 0x17] Control register 2 + + Default: 0x00000000 + + This register is used only in the CPU-interactive transfer mode. Any activate DMA ranges will take precendence over this register for HOST firmware cycles. + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + + | Bits | Description | + |-------|----------------------------------------------------------------------------------------------------| + | 31:16 | Reserved | + | 15:8 | LPC cycle data out (CPU to HOST) | + | 7:2 | Reserved | + | 1 | Signal LPC bus error to HOST if asserted when bit 0 asserted | + | 0 | Assert to transfer data in bits [15:8], [1] to HOST. Completes the active LPC cycle on assertion. | + +## [0x18 - 0x1b] LPC address range 1 configuration register 1 + + Default: 0x00000000 + + | Bits | Description | + |------|-------------------------------------| + | 31 | Enable this LPC slave address range | + | 30 | Allow I/O cycles for this range | + | 29 | Allow TPM cycles for this range | + | 28 | Reserved | + | 27:0 | LPC range start address | + +## [0x1c - 0x1f] LPC address range 1 configuration register 2 + + Default: 0x00000000 + + | Bits | Description | + |-------|-----------------------| + | 31:28 | Reserved | + | 27:0 | LPC range end address | + +## [0x20 - 0x23] LPC address range 2 configuration register 1 + + Default: 0x00000000 + + Same bit mapping as "LPC address range 1 configuration register 1" + +## [0x24 - 0x27] LPC address range 2 configuration register 2 + + Default: 0x00000000 + + Same bit mapping as "LPC address range 1 configuration register 2" + +## [0x28 - 0x2b] LPC address range 3 configuration register 1 + + Default: 0x00000000 + + Same bit mapping as "LPC address range 1 configuration register 1" + +## [0x2c - 0x2f] LPC address range 3 configuration register 2 + + Default: 0x00000000 + + Same bit mapping as "LPC address range 1 configuration register 2" + +## [0x30 - 0x33] LPC address range 4 configuration register 1 + + Default: 0x00000000 + + Same bit mapping as "LPC address range 1 configuration register 1" + +## [0x34 - 0x37] LPC address range 4 configuration register 2 + + Default: 0x00000000 + + Same bit mapping as "LPC address range 1 configuration register 2" + +## [0x38 - 0x3b] LPC address range 5 configuration register 1 + + Default: 0x00000000 + + Same bit mapping as "LPC address range 1 configuration register 1" + +## [0x3c - 0x3f] LPC address range 5 configuration register 2 + + Default: 0x00000000 + + Same bit mapping as "LPC address range 1 configuration register 2" + +## [0x40 - 0x43] LPC address range 6 configuration register 1 + + Default: 0x00000000 + + Same bit mapping as "LPC address range 1 configuration register 1" + +## [0x44 - 0x47] LPC address range 6 configuration register 2 + + Default: 0x00000000 + + Same bit mapping as "LPC address range 1 configuration register 2" + +## [0x48 - 0x4b] DMA configuration register 1 + + Default: 0x00000000 + + | Bits | Description | + |------|---------------------------------------------------------------------------------------------------------------------------| + | 31:8 | Reserved | + | 7:4 | LPC IDSEL filter | + | 3 | Reserved | + | 2 | IDSEL filter enable. When asserted, the DMA engine will require the LPC IDSEL to match the configured filter IDSEL value | + | 1 | Enable DMA for LPC firmware write cycles | + | 0 | Enable DMA for LPC firmware read cycles | + +## [0x4c - 0x4f] DMA configuration register 2 + + Default: 0x00000000 + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + + | Bits | Description | + |------|------------------------------------| + | 31:0 | CPU DMA window base address [31:0] | + + NOTE: The DMA engine only supports full word length (64 bit) CPU bus alignment, therefore bits [3:0] of this register are hardwired to zero. + +## [0x4c - 0x4f] DMA configuration register 3 + + Default: 0x00000000 + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + + | Bits | Description | + |------|-------------------------------------| + | 31:0 | CPU DMA window base address [63:32] | + +## [0x50 - 0x53] DMA configuration register 4 + + Default: 0x00000000 + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + + | Bits | Description | + |------|------------------------------------| + | 31:0 | LPC firmware window length (bytes) | + + NOTE: The DMA engine only supports full word length (64 bit) CPU bus alignment, therefore bits [3:0] of this register are hardwired to zero. + +## [0x54 - 0x57] DMA configuration register 5 + + Default: 0x00000000 + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + + | Bits | Description | + |------|------------------------------------------| + | 31:0 | LPC firmware window start offset (bytes) | + + This register defines the start address (DMA window offset) of the active LPC firmware access window. + + All LPC firmware transfers start with an implicit LPC base address of 0x0, which corresponds to offset 0x0 in the configured CPU DMA window (see "DMA configuration register 2"). + This register allows remapping of the LPC base address within the CPU DMA window, thus allowing LPC address 0x0 to be placed anywhere within the configured CPU DMA memory region. In effect, it is the offset into DMA memory space where the LPC memory space origin is placed. + + Together with the "DMA configuration register 6" register, a defined region of LPC firmware memory space can be set up for DMA access, which is then mapped onto an equivalent region of CPU DMA memory. + +## [0x58 - 0x5b] DMA configuration register 6 + + Default: 0x00000000 + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + + | Bits | Description | + |------|----------------------------------------| + | 31:0 | LPC firmware window end offset (bytes) | + + This register defines the end address of the active LPC firmware access window. + + Together with the "DMA configuration register 5" register, a defined region of LPC firmware memory space can be set up for DMA access, which is then mapped onto an equivalent region of CPU DMA memory. + +## [0x5c - 0x5f] DMA configuration register 7 + + Default: 0x00000000 + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + + | Bits | Description | + |------|---------------------------| + | 31:0 | LPC firmware address mask | + + This register defines the mask applied to all inbound LPC firmware space addresses, prior to any mapping of those addresses into the DMA region. + + This design allows a specific section of CPU DMA memory to be effectively replicated through the entire LPC address space. In particular, it helps to ensure the DMA window data is available at the end of the LPC firmware address space, as expected by various HOST access patterns. + +## [0x60 - 0x63] Status register 1 + + Default: 0x00000000 + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + + | Bits | Description | + |-------|------------------------------------------------------------------------------| + | 31:24 | Reserved | + | 23:20 | IDSEL of pending LPC firmware cycle | + | 19:16 | MSIZE of pending LPC firmware cycle | + | 15:5 | Reserved | + | 4 | Asserted when LPC bus is in external HOST-driven reset | + | 3:2 | LPC cycle type from host -- 0 == I/O, 1 == TPM, 2 == firmware, 3 == reserved | + | 1 | LPC cycle direction from HOST -- 0 == read, 1 == write | + | 0 | Attention flag from LPC core | + +## [0x64 - 0x67] Status register 2 + + Default: 0x00000000 + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + + | Bits | Description | + |-------|------------------------------| + | 31:28 | Reserved | + | 27:0 | Address of pending LPC cycle | + + This register contains the target LPC address of any pending LPC transaction initiated by the HOST. + +## [0x68 - 0x6b] Status register 3 + + Default: 0x00000000 + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + + | Bits | Description | + |------|-----------------------------------------| + | 31:8 | Reserved | + | 7:0 | HOST-provided data of pending LPC cycle | + + This register contains the HOST-provided data of any pending LPC transaction initiated by the HOST. + + The contents of this register are only defined when the LPC cycle type is WRITE; the contents are undefined for all other cycle types. + +## [0x6c - 0x6f] Status register 4 + + Default: 0x00000000 + + Definitions: + - CPU: Wishbone-attached internal CPU + - HOST: External host platform attached via LPC + - CIRQ: Interrupt request as wired to Wishbone-attached internal CPU + - HIRQ: Interrupt request as wired to external host platform over LPC serial IRQ line + + | Bits | Description | + |-------|--------------------------------------------------------------------------------------------------| + | 31:12 | Reserved | + | 11:10 | Reason for VUART2 IRQ assert -- 0 == undefined, 1 == queue threshold reached, 2 == queue timeout | + | 9:8 | Reason for VUART1 IRQ assert -- 0 == undefined, 1 == queue threshold reached, 2 == queue timeout | + | 7 | Reserved | + | 6 | LPC I/O cycle CIRQ asserted | + | 5 | LPC TPM cycle CIRQ asserted | + | 4 | LPC firmware cycle CIRQ asserted | + | 3 | IPMI BT CIRQ asserted | + | 2 | VUART2 CIRQ asserted | + | 1 | VUART1 CIRQ asserted | + | 0 | LPC global CIRQ asserted | + +# LICENSE + +Aquila is licensed under the terms of the GNU LGPLv3+, with included third party components licensed under Apache 2.0. See LICENSE.aquila for details. + +# DOCUMENTATION CREDITS + +(c) 2022 Raptor Engineering, LLC diff --git a/aquila/io_blocks.v b/aquila/io_blocks.v new file mode 100644 index 0000000..86b3ef3 --- /dev/null +++ b/aquila/io_blocks.v @@ -0,0 +1,58 @@ +// © 2017 - 2022 Raptor Engineering, LLC +// +// Released under the terms of the GPL v3 +// See the LICENSE file for full details +// +// Generic I/O blocks required for the LPC I/O signals +// Currently implemented for the ECP5 (Trellis) only, other implementations welcome! + +`define FPGA_TYPE_ECP5 + +module aquila_lpc_sdr_tristate( + output wire i, + input wire oe, + input wire o, + input wire clk, + inout p + ); + + `ifdef FPGA_TYPE_ECP5 + + wire buffer_i; + reg buffer_oe = 0; + wire buffer_o; + + always @(posedge clk) begin + buffer_oe <= oe; + end + + // SDR input buffer + IFS1P3BX ib0( + .SCLK(clk), + .PD(0), + .SP(1), + .D(buffer_i), + .Q(i) + ); + + // SDR output buffer + OFS1P3BX ob0( + .SCLK(clk), + .PD(0), + .SP(1), + .D(o), + .Q(buffer_o) + ); + + TRELLIS_IO #( + .DIR("BIDIR") + ) io0( + .B(p), + .I(buffer_o), + .O(buffer_i), + .T(~buffer_oe) + ); + + `endif // FPGA_TYPE_ECP5 + +endmodule \ No newline at end of file diff --git a/aquila/lpc_slave.v b/aquila/lpc_slave.v new file mode 100644 index 0000000..d407b9e --- /dev/null +++ b/aquila/lpc_slave.v @@ -0,0 +1,2279 @@ +// © 2017 - 2020 Raptor Engineering, LLC +// +// Released under the terms of the GPL v3 +// See the LICENSE file for full details + +module lpc_slave_interface( + output wire [27:0] address, + input wire [7:0] tx_data, + output reg [7:0] rx_data, + output reg tpm_cycle, + output reg firmware_cycle, + input wire continue, + input wire data_ack, + input wire transfer_error, + input wire exception_ack, + output reg address_ready, + output reg data_ready, + output reg data_ready_cont, + output reg continue_cont, + output reg [2:0] exception, + output wire data_direction, // 0 == read from slave, 1 == write to slave + input wire [16:0] irq_request, + input wire irq_tx_ready, + output reg irq_tx_queued, + + input wire [8:0] lpc_fw_input_xfer_write_addr, + input wire [7:0] lpc_fw_input_xfer_write_data, + input wire lpc_fw_input_xfer_write_clk, + input wire lpc_fw_input_xfer_write_wren, + input wire [8:0] lpc_fw_output_xfer_read_addr, + output wire [7:0] lpc_fw_output_xfer_read_data, + input wire lpc_fw_output_xfer_read_clk, + + input wire [8:0] ipmi_bt_input_xfer_write_addr, + input wire [7:0] ipmi_bt_input_xfer_write_data, + input wire ipmi_bt_input_xfer_write_clk, + input wire ipmi_bt_input_xfer_write_wren, + input wire [8:0] ipmi_bt_output_xfer_read_addr, + output wire [7:0] ipmi_bt_output_xfer_read_data, + input wire ipmi_bt_output_xfer_read_clk, + + input wire [15:0] range1_start, + input wire [15:0] range1_end, + input wire [15:0] range2_start, + input wire [15:0] range2_end, + input wire [15:0] range3_start, + input wire [15:0] range3_end, + input wire [15:0] range4_start, + input wire [15:0] range4_end, + input wire [15:0] range5_start, + input wire [15:0] range5_end, + input wire [15:0] range6_start, + input wire [15:0] range6_end, + + input wire enable_vuart1, + output wire [31:0] vuart1_status_register, + input wire [31:0] vuart1_control_register, + output wire vuart1_assert_b2h_break_clear, + output wire vuart1_tx_fifo_reset, + output wire vuart1_tx_fifo_wren, + output wire [7:0] vuart1_tx_fifo_data, + input wire vuart1_tx_fifo_full, + input wire vuart1_tx_fifo_almost_full, + input wire vuart1_tx_fifo_empty, + + output wire vuart1_rx_fifo_reset, + output wire vuart1_rx_fifo_rpop, + input wire [7:0] vuart1_rx_fifo_data, + input wire vuart1_rx_fifo_empty, + input wire vuart1_rx_fifo_almost_empty, + input wire vuart1_rx_fifo_full, + input wire [3:0] vuart1_rx_data_available_count, + + input wire enable_vuart2, + output wire [31:0] vuart2_status_register, + input wire [31:0] vuart2_control_register, + output wire vuart2_assert_b2h_break_clear, + output wire vuart2_tx_fifo_reset, + output wire vuart2_tx_fifo_wren, + output wire [7:0] vuart2_tx_fifo_data, + input wire vuart2_tx_fifo_full, + input wire vuart2_tx_fifo_almost_full, + input wire vuart2_tx_fifo_empty, + + output wire vuart2_rx_fifo_reset, + output wire vuart2_rx_fifo_rpop, + input wire [7:0] vuart2_rx_fifo_data, + input wire vuart2_rx_fifo_empty, + input wire vuart2_rx_fifo_almost_empty, + input wire vuart2_rx_fifo_full, + input wire [3:0] vuart2_rx_data_available_count, + + input wire enable_ipmi_bt, + input wire ipmi_bt_alt_irq, + input wire [15:0] ipmi_bt_port_base_address, + + output wire ipmi_bt_bmc_to_host_ctl_sms_ack, + output wire ipmi_bt_bmc_to_host_ctl_attn_ack, + output wire ipmi_bt_host_to_bmc_ctl_attn_req, + output wire ipmi_bt_host_to_bmc_ctl_oem0_req, + output wire ipmi_bt_irq_ack, + output wire ipmi_bt_irq_bmc_reset, + output wire ipmi_bt_host_to_bmc_ctl_h_busy, + output wire ipmi_bt_irq_enable, + + input wire ipmi_bt_bmc_to_host_ctl_sms_req, + input wire ipmi_bt_bmc_to_host_ctl_attn_req, + input wire ipmi_bt_bmc_to_host_ctl_sms_ack_cont, + input wire ipmi_bt_bmc_to_host_ctl_attn_ack_cont, + input wire ipmi_bt_host_to_bmc_ctl_attn_req_cont, + input wire ipmi_bt_host_to_bmc_ctl_oem0_req_cont, + input wire ipmi_bt_irq_ack_cont, + input wire ipmi_bt_irq_bmc_reset_cont, + input wire ipmi_bt_bmc_to_host_ctl_b_busy, + input wire ipmi_bt_irq_req, + + output wire [3:0] fw_idsel, + output wire [3:0] fw_msize, + + output wire [15:0] debug_port, + + output reg [3:0] lpc_data_out, // These three signals must have I/O output register enabled in top level SB_IO or equivalent + input wire [3:0] lpc_data_in, + output reg lpc_data_direction, // 0 == tristate (input), 1 == driven (output) + output reg lpc_irq_out, + input wire lpc_irq_in, + output wire lpc_irq_direction, // 0 == tristate (input), 1 == driven (output) + + input wire lpc_frame_n, + input wire lpc_reset_n, + input wire lpc_clock + ); + + parameter VUART1_BASE_ADDRESS = 16'h03f8; + parameter VUART1_IRQ = 4; + parameter VUART2_BASE_ADDRESS = 16'h02f8; + parameter VUART2_IRQ = 3; + + parameter IPMI_BT_IRQ = 10; + parameter IPMI_BT_ALT_IRQ = 11; + + parameter LPC_CODEWORD_ISA_START = 4'b0000; + parameter LPC_CODEWORD_FWR_START = 4'b1101; + parameter LPC_CODEWORD_FWW_START = 4'b1110; + parameter LPC_CODEWORD_TPM_START = 4'b0101; + + parameter LPC_CODEWORD_SYNC_READY = 4'b0000; + parameter LPC_CODEWORD_SYNC_SWAIT = 4'b0101; + parameter LPC_CODEWORD_SYNC_LWAIT = 4'b0110; + parameter LPC_CODEWORD_SYNC_ERROR = 4'b1010; + parameter LPC_CODEWORD_TURNAROUND = 4'b1111; + + parameter LPC_CYCLE_TYPE_IO = 2'b00; + + parameter LPC_RX_TRANSFER_STATE_IDLE = 0; + parameter LPC_RX_TRANSFER_STATE_TR01 = 1; + parameter LPC_RX_TRANSFER_STATE_TR02 = 2; + parameter LPC_RX_TRANSFER_STATE_TR03 = 3; + parameter LPC_RX_TRANSFER_STATE_TR04 = 4; + parameter LPC_RX_TRANSFER_STATE_TR05 = 5; + parameter LPC_RX_TRANSFER_STATE_TR06 = 6; + parameter LPC_RX_TRANSFER_STATE_TR07 = 7; + parameter LPC_RX_TRANSFER_STATE_TR08 = 8; + parameter LPC_RX_TRANSFER_STATE_TR09 = 9; + + parameter LPC_RX_TRANSFER_STATE_FR01 = 10; + parameter LPC_RX_TRANSFER_STATE_FR02 = 11; + parameter LPC_RX_TRANSFER_STATE_FR03 = 12; + parameter LPC_RX_TRANSFER_STATE_FR04 = 13; + parameter LPC_RX_TRANSFER_STATE_FR05 = 14; + parameter LPC_RX_TRANSFER_STATE_FR06 = 15; + parameter LPC_RX_TRANSFER_STATE_FR07 = 16; + parameter LPC_RX_TRANSFER_STATE_FR08 = 17; + parameter LPC_RX_TRANSFER_STATE_FR09 = 18; + parameter LPC_RX_TRANSFER_STATE_FR10 = 19; + + parameter LPC_RX_TRANSFER_STATE_IW01 = 20; + + parameter LPC_TX_TRANSFER_STATE_IDLE = 0; + parameter LPC_TX_TRANSFER_STATE_TR01 = 1; + parameter LPC_TX_TRANSFER_STATE_TR02 = 2; + parameter LPC_TX_TRANSFER_STATE_TR03 = 3; + parameter LPC_TX_TRANSFER_STATE_TR04 = 4; + parameter LPC_TX_TRANSFER_STATE_TR05 = 5; + parameter LPC_TX_TRANSFER_STATE_TR06 = 6; + parameter LPC_TX_TRANSFER_STATE_TR07 = 7; + parameter LPC_TX_TRANSFER_STATE_TR08 = 8; + parameter LPC_TX_TRANSFER_STATE_TR09 = 9; + parameter LPC_TX_TRANSFER_STATE_TR10 = 10; + parameter LPC_TX_TRANSFER_STATE_TR11 = 11; + + parameter LPC_TX_TRANSFER_STATE_FR01 = 12; + parameter LPC_TX_TRANSFER_STATE_FR02 = 13; + parameter LPC_TX_TRANSFER_STATE_FR03 = 14; + parameter LPC_TX_TRANSFER_STATE_FR04 = 15; + parameter LPC_TX_TRANSFER_STATE_FR05 = 16; + + parameter LPC_SERIRQ_STATE_IDLE = 0; + parameter LPC_SERIRQ_STATE_TR01 = 1; + parameter LPC_SERIRQ_STATE_TR02 = 2; + parameter LPC_SERIRQ_STATE_TR03 = 3; + parameter LPC_SERIRQ_STATE_TR04 = 4; + + reg [4:0] rx_transfer_state = 0; + reg [4:0] tx_transfer_state = 0; + reg [2:0] serirq_state = 0; + reg start_tx_cycle = 0; + reg abort_tx_cycle = 0; + reg tx_cycle_done = 0; + reg lpc_frame_n_prev = 1; + reg [1:0] cycle_type = 0; + reg cycle_direction; // 0 == read, 1 == write + reg [27:0] io_address = 0; // Lower 16 bits I/O cycles only, full 28 bits used for FW cycles + + reg [3:0] fw_cycle_idsel = 0; + reg [3:0] fw_cycle_msize = 0; + + reg vuart1_cycle = 0; + reg vuart2_cycle = 0; + reg ipmi_bt_cycle = 0; + reg range_select_cycle = 0; + + reg [3:0] vuart1_ier = 0; + wire [7:0] vuart1_iir; + reg [7:0] vuart1_lcr = 0; + reg [4:0] vuart1_mcr = 0; + wire [7:0] vuart1_lsr; + reg [7:0] vuart1_msr = 0; + reg [7:0] vuart1_scr = 0; + reg [7:0] vuart1_dll = 0; + reg [7:0] vuart1_dlm = 0; + reg [2:0] vuart1_interrupt_id = 0; + reg vuart1_interrupt_pending = 0; + reg vuart1_iir_read_tx_empty_assert = 0; + reg vuart1_lsr_read_assert = 0; + reg vuart1_rx_break_irq_pending = 0; + reg vuart1_rx_break_request_prev = 0; + reg vuart1_tx_fifo_empty_prev = 0; + reg vuart1_tx_fifo_empty_irq_pending = 0; + reg vuart1_fifos_enabled = 0; + reg [1:0] vuart1_rcvr_trigger = 0; + reg vuart1_assert_b2h_break_clear_reg = 0; + reg [8:0] vuart1_rx_fifo_read_timeout_counter = 0; + reg vuart1_rx_data_queue_contents_read_timeout = 0; + reg vuart1_rx_data_queue_contents_past_trigger = 0; + + assign vuart1_iir[7] = vuart1_fifos_enabled; + assign vuart1_iir[6] = vuart1_fifos_enabled; + assign vuart1_iir[5:4] = 0; + assign vuart1_iir[3:1] = vuart1_interrupt_id; + assign vuart1_iir[0] = !vuart1_interrupt_pending; + + assign vuart1_lsr[7] = 0; + assign vuart1_lsr[6] = vuart1_tx_fifo_empty; + assign vuart1_lsr[5] = vuart1_tx_fifo_empty; + assign vuart1_lsr[4] = 0; // BREAK is implemented via an external signal from the BMC, ORed over this bit + assign vuart1_lsr[3] = 0; + assign vuart1_lsr[2] = 0; + assign vuart1_lsr[1] = 0; + assign vuart1_lsr[0] = !vuart1_rx_fifo_empty; + + assign vuart1_assert_b2h_break_clear = vuart1_assert_b2h_break_clear_reg; + + reg vuart1_tx_fifo_reset_reg = 0; + reg vuart1_tx_fifo_wren_reg = 0; + reg [7:0] vuart1_tx_fifo_data_reg = 0; + reg vuart1_rx_fifo_reset_reg = 0; + reg vuart1_rx_fifo_rpop_reg = 0; + + assign vuart1_tx_fifo_reset = vuart1_tx_fifo_reset_reg; + assign vuart1_tx_fifo_wren = vuart1_tx_fifo_wren_reg; + assign vuart1_tx_fifo_data = vuart1_tx_fifo_data_reg; + assign vuart1_rx_fifo_reset = vuart1_rx_fifo_reset_reg; + assign vuart1_rx_fifo_rpop = vuart1_rx_fifo_rpop_reg; + + reg [3:0] vuart2_ier = 0; + wire [7:0] vuart2_iir; + reg [7:0] vuart2_lcr = 0; + reg [4:0] vuart2_mcr = 0; + wire [7:0] vuart2_lsr; + reg [7:0] vuart2_msr = 0; + reg [7:0] vuart2_scr = 0; + reg [7:0] vuart2_dll = 0; + reg [7:0] vuart2_dlm = 0; + reg [2:0] vuart2_interrupt_id = 0; + reg vuart2_interrupt_pending = 0; + reg vuart2_iir_read_tx_empty_assert = 0; + reg vuart2_lsr_read_assert = 0; + reg vuart2_rx_break_irq_pending = 0; + reg vuart2_rx_break_request_prev = 0; + reg vuart2_tx_fifo_empty_prev = 0; + reg vuart2_tx_fifo_empty_irq_pending = 0; + reg vuart2_fifos_enabled = 0; + reg [1:0] vuart2_rcvr_trigger = 0; + reg vuart2_assert_b2h_break_clear_reg = 0; + reg [8:0] vuart2_rx_fifo_read_timeout_counter = 0; + reg vuart2_rx_data_queue_contents_read_timeout = 0; + reg vuart2_rx_data_queue_contents_past_trigger = 0; + + assign vuart2_iir[7] = vuart2_fifos_enabled; + assign vuart2_iir[6] = vuart2_fifos_enabled; + assign vuart2_iir[5:4] = 0; + assign vuart2_iir[3:1] = vuart2_interrupt_id; + assign vuart2_iir[0] = !vuart2_interrupt_pending; + + assign vuart2_lsr[7] = 0; + assign vuart2_lsr[6] = vuart2_tx_fifo_empty; + assign vuart2_lsr[5] = vuart2_tx_fifo_empty; + assign vuart2_lsr[4] = 0; // BREAK is implemented via an external signal from the BMC, ORed over this bit + assign vuart2_lsr[3] = 0; + assign vuart2_lsr[2] = 0; + assign vuart2_lsr[1] = 0; + assign vuart2_lsr[0] = !vuart2_rx_fifo_empty; + + assign vuart2_assert_b2h_break_clear = vuart2_assert_b2h_break_clear_reg; + + reg vuart2_tx_fifo_reset_reg = 0; + reg vuart2_tx_fifo_wren_reg = 0; + reg [7:0] vuart2_tx_fifo_data_reg = 0; + reg vuart2_rx_fifo_reset_reg = 0; + reg vuart2_rx_fifo_rpop_reg = 0; + + assign vuart2_tx_fifo_reset = vuart2_tx_fifo_reset_reg; + assign vuart2_tx_fifo_wren = vuart2_tx_fifo_wren_reg; + assign vuart2_tx_fifo_data = vuart2_tx_fifo_data_reg; + assign vuart2_rx_fifo_reset = vuart2_rx_fifo_reset_reg; + assign vuart2_rx_fifo_rpop = vuart2_rx_fifo_rpop_reg; + + assign vuart1_status_register = {16'h00, vuart1_fifos_enabled, 1'b0, vuart1_rcvr_trigger, vuart1_mcr, vuart1_lcr}; + assign vuart2_status_register = {16'h00, vuart2_fifos_enabled, 1'b0, vuart2_rcvr_trigger, vuart2_mcr, vuart2_lcr}; + + reg [16:0] active_irq_request = 0; + reg [3:0] irq_delay_counter = 0; + reg [4:0] irq_frame_number = 0; + reg lpc_irq_in_prev_1 = 1; + reg lpc_irq_in_prev_2 = 1; + reg lpc_irq_in_prev_3 = 1; + reg irq_tx_ready_prev = 0; + reg irq_quiet_mode = 0; + + reg lpc_irq_direction_reg = 0; + + reg lpc_slave_write_complete = 0; + + assign address = io_address; + assign data_direction = cycle_direction; + + assign fw_idsel = fw_cycle_idsel; + assign fw_msize = fw_cycle_msize; + +`ifdef LPC_SLAVE_DEBUG + // Debug port + assign debug_port[3:0] = lpc_data_in; + assign debug_port[4] = lpc_frame_n; + assign debug_port[5] = lpc_reset_n; + assign debug_port[6] = cycle_direction; + assign debug_port[7] = lpc_clock; +// assign debug_port[11:8] = rx_transfer_state[3:0]; +// assign debug_port[9:8] = rx_transfer_state[1:0]; +// assign debug_port[11] = vuart1_cycle; +// assign debug_port[10] = ipmi_bt_cycle; +// assign debug_port[15:12] = tx_transfer_state[3:0]; +// assign debug_port[15:8] = lpc_fw_input_xfer_read_data; + assign debug_port[12] = lpc_irq_in; + assign debug_port[11] = lpc_irq_direction; + assign debug_port[10:8] = serirq_state[2:0]; +// assign debug_port[12:11] = irq_delay_counter[1:0]; +// assign debug_port[12] = lpc_irq_out; +// assign debug_port[14:13] = irq_frame_number[1:0]; +// assign debug_port[14:12] = serirq_state; +// assign debug_port[15] = irq_quiet_mode; +// assign debug_port[8] = 0; +// assign debug_port[9] = firmware_cycle; +// assign debug_port[10] = data_ready; +// assign debug_port[11] = address_ready; +// assign debug_port[15:12] = fw_cycle_msize; +`else + assign debug_port = 16'h0000; +`endif + + reg tx_cycle_done_reg_rx = 0; + + reg [16:0] irq_request_reg = 0; + reg irq_tx_ready_reg = 0; + + reg [8:0] lpc_fw_input_xfer_read_addr; + wire [7:0] lpc_fw_input_xfer_read_data; + reg [8:0] lpc_fw_output_xfer_write_addr; + reg [7:0] lpc_fw_output_xfer_write_data; + reg lpc_fw_output_xfer_write_wren; + + reg [8:0] ipmi_bt_input_xfer_read_addr; + wire [7:0] ipmi_bt_input_xfer_read_data; + reg [8:0] ipmi_bt_output_xfer_write_addr; + reg [7:0] ipmi_bt_output_xfer_write_data; + reg ipmi_bt_output_xfer_write_wren; + + reg [8:0] fw_cycle_rx_nibble_counter; + reg [7:0] fw_cycle_tx_byte_counter; + + reg rx_special_data_ack = 0; + reg rx_special_continue = 0; + reg [7:0] special_tx_data = 0; + + reg ipmi_bt_bmc_to_host_ctl_sms_ack_reg = 0; + reg ipmi_bt_bmc_to_host_ctl_attn_ack_reg = 0; + reg ipmi_bt_host_to_bmc_ctl_attn_req_reg = 0; + reg ipmi_bt_host_to_bmc_ctl_oem0_req_reg = 0; + reg ipmi_bt_irq_ack_reg = 0; + reg ipmi_bt_irq_bmc_reset_reg = 0; + reg ipmi_bt_host_to_bmc_ctl_h_busy_reg = 0; + reg ipmi_bt_irq_enable = 0; + + assign ipmi_bt_bmc_to_host_ctl_sms_ack = ipmi_bt_bmc_to_host_ctl_sms_ack_reg; + assign ipmi_bt_bmc_to_host_ctl_attn_ack = ipmi_bt_bmc_to_host_ctl_attn_ack_reg; + assign ipmi_bt_host_to_bmc_ctl_attn_req = ipmi_bt_host_to_bmc_ctl_attn_req_reg; + assign ipmi_bt_host_to_bmc_ctl_oem0_req = ipmi_bt_host_to_bmc_ctl_oem0_req_reg; + assign ipmi_bt_irq_ack = ipmi_bt_irq_ack_reg; + assign ipmi_bt_irq_bmc_reset = ipmi_bt_irq_bmc_reset_reg; + assign ipmi_bt_host_to_bmc_ctl_h_busy = ipmi_bt_host_to_bmc_ctl_h_busy_reg; + + assign lpc_irq_direction = lpc_irq_direction_reg; + + wire [16:0] vuart_irq_request_overlay; + assign vuart_irq_request_overlay = (vuart2_interrupt_pending << VUART2_IRQ) | (vuart1_interrupt_pending << VUART1_IRQ); + + wire [16:0] ipmi_bt_irq_request_overlay; + assign ipmi_bt_irq_request_overlay = (ipmi_bt_alt_irq)?(ipmi_bt_irq_req << IPMI_BT_ALT_IRQ):(ipmi_bt_irq_req << IPMI_BT_IRQ); + + always @(posedge lpc_clock) begin + // Avoid logic glitches due to these signals crossing clock domains + irq_request_reg <= irq_request; + irq_tx_ready_reg <= irq_tx_ready; + + if (!lpc_reset_n) begin + irq_quiet_mode <= 0; + irq_tx_queued <= 0; + lpc_irq_in_prev_1 <= 1; + lpc_irq_in_prev_2 <= 1; + lpc_irq_in_prev_3 <= 1; + lpc_irq_out <= 1; + lpc_irq_direction_reg <= 0; + serirq_state <= LPC_SERIRQ_STATE_IDLE; + end else begin + case (serirq_state) + LPC_SERIRQ_STATE_IDLE: begin + if (irq_quiet_mode && irq_tx_ready_reg && !irq_tx_ready_prev) begin + active_irq_request <= active_irq_request | irq_request_reg | vuart_irq_request_overlay | ipmi_bt_irq_request_overlay; + irq_tx_queued <= 1; + irq_delay_counter <= 0; + + // Initiate quiet mode transfer + lpc_irq_out <= 0; + lpc_irq_direction_reg <= 1; + serirq_state <= LPC_SERIRQ_STATE_TR01; + end else begin + // Detect potential start signal from host + // This can occur in either quiet or continuous mode + if (!lpc_irq_in) begin + if (irq_delay_counter > 2) begin + // Latch current IRQ requests + active_irq_request <= active_irq_request | irq_request_reg | vuart_irq_request_overlay | ipmi_bt_irq_request_overlay; + serirq_state <= LPC_SERIRQ_STATE_TR02; + end else begin + irq_delay_counter <= irq_delay_counter + 1; + end + end else begin + irq_delay_counter <= 0; + end + end + end + LPC_SERIRQ_STATE_TR01: begin + // Tristate bus + lpc_irq_out <= 0; + lpc_irq_direction_reg <= 0; + serirq_state <= LPC_SERIRQ_STATE_TR02; + end + LPC_SERIRQ_STATE_TR02: begin + // Wait for completion of start signal from host + if (lpc_irq_in) begin + // IRQ0 needs to be asserted nearly immediately after the end of the start pulse + // if it is to be asserted at all. Handle IRQ0 start pulse assertion here, as the + // heavy pipelining of the IRQ transmitter will not allow a short enough delay to + // launch IRQ0 in the next state... + if (active_irq_request[0]) begin + // Drive IRQ assert for IRQ0 + lpc_irq_out <= 0; + lpc_irq_direction_reg <= 1; + end + irq_delay_counter <= 1; + irq_frame_number <= 0; + serirq_state <= LPC_SERIRQ_STATE_TR03; + end + end + LPC_SERIRQ_STATE_TR03: begin + if (irq_frame_number < 17) begin + if (irq_delay_counter == 0) begin + if (active_irq_request[irq_frame_number]) begin + // Drive IRQ assert + lpc_irq_out <= 0; + lpc_irq_direction_reg <= 1; + end + end else if (irq_delay_counter == 1) begin + if (active_irq_request[irq_frame_number]) begin + // Drive line back high to prepare for TAR cycle. + // This avoids the line floating low / undetermined for an extended period of time + // after we stop driving it; i.e. not relying solely on pullup resistor response. + lpc_irq_out <= 1; + lpc_irq_direction_reg <= 1; + end + end else begin + lpc_irq_out <= 1; + lpc_irq_direction_reg <= 0; + end + end else begin + lpc_irq_out <= 1; + serirq_state <= LPC_SERIRQ_STATE_TR04; + end + + if (irq_delay_counter > 1) begin + irq_frame_number <= irq_frame_number + 1; + irq_delay_counter <= 0; + end else begin + irq_delay_counter <= irq_delay_counter + 1; + end + end + LPC_SERIRQ_STATE_TR04: begin + // Wait for rising edge + if (!lpc_irq_in_prev_1 && lpc_irq_in) begin + if (!lpc_irq_in_prev_3 && !lpc_irq_in_prev_2 && !lpc_irq_in_prev_1) begin + irq_quiet_mode <= 0; + end else begin + irq_quiet_mode <= 1; + end + active_irq_request <= 0; + serirq_state <= LPC_SERIRQ_STATE_IDLE; + end + + // Ensure bus is tristated + lpc_irq_direction_reg <= 0; + end + default: begin + // Should never reach this state + serirq_state <= LPC_SERIRQ_STATE_IDLE; + end + endcase + end + + lpc_irq_in_prev_1 <= lpc_irq_in; + lpc_irq_in_prev_2 <= lpc_irq_in_prev_1; + lpc_irq_in_prev_3 <= lpc_irq_in_prev_2; + irq_tx_ready_prev <= irq_tx_ready_reg; + + if ((serirq_state != LPC_SERIRQ_STATE_IDLE) && !irq_tx_ready_reg) begin + irq_tx_queued <= 0; + end + end + + always @(posedge lpc_clock) begin + // Avoid logic glitches due to this signal crossing clock domains + tx_cycle_done_reg_rx = tx_cycle_done; + + if (!lpc_reset_n) begin + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; + + lpc_data_direction <= 0; + abort_tx_cycle <= 1; + + rx_special_data_ack <= 0; + rx_special_continue <= 0; + + vuart1_lcr <= 0; + vuart1_fifos_enabled <= 0; + vuart1_interrupt_pending <= 0; + vuart1_iir_read_tx_empty_assert <= 0; + vuart1_tx_fifo_empty_irq_pending <= 0; + vuart1_lsr_read_assert <= 0; + vuart1_rx_break_irq_pending <= 0; + vuart1_rx_break_request_prev <= 0; + vuart1_interrupt_id <= 0; + vuart1_rcvr_trigger <= 0; + vuart1_tx_fifo_reset_reg <= 0; + vuart1_rx_fifo_reset_reg <= 0; + vuart1_rx_fifo_rpop_reg <= 0; + vuart1_rx_data_queue_contents_read_timeout <= 0; + vuart1_rx_data_queue_contents_past_trigger <= 0; + vuart2_lcr <= 0; + vuart2_fifos_enabled <= 0; + vuart2_interrupt_pending <= 0; + vuart2_iir_read_tx_empty_assert <= 0; + vuart2_tx_fifo_empty_irq_pending <= 0; + vuart2_lsr_read_assert <= 0; + vuart2_rx_break_irq_pending <= 0; + vuart2_rx_break_request_prev <= 0; + vuart2_interrupt_id <= 0; + vuart2_rcvr_trigger <= 0; + vuart2_tx_fifo_reset_reg <= 0; + vuart2_rx_fifo_reset_reg <= 0; + vuart2_rx_fifo_rpop_reg <= 0; + vuart2_rx_data_queue_contents_read_timeout <= 0; + vuart2_rx_data_queue_contents_past_trigger <= 0; + + ipmi_bt_bmc_to_host_ctl_sms_ack_reg <= 0; + ipmi_bt_bmc_to_host_ctl_attn_ack_reg <= 0; + ipmi_bt_host_to_bmc_ctl_attn_req_reg <= 0; + ipmi_bt_host_to_bmc_ctl_oem0_req_reg <= 0; + ipmi_bt_irq_ack_reg <= 0; + ipmi_bt_irq_bmc_reset_reg <= 0; + ipmi_bt_host_to_bmc_ctl_h_busy_reg <= 0; + ipmi_bt_irq_enable <= 0; + + // Signal exception to CPU + if (!exception_ack) begin + exception[1] <= 1; + end + end else begin + if (!lpc_frame_n) begin + if ((rx_transfer_state == LPC_RX_TRANSFER_STATE_IDLE) + || (rx_transfer_state == LPC_RX_TRANSFER_STATE_TR01)) begin + cycle_type <= 0; + io_address <= 0; + data_ready <= 0; + address_ready <= 0; + + vuart1_cycle <= 0; + vuart2_cycle <= 0; + ipmi_bt_cycle <= 0; + range_select_cycle <= 0; + + abort_tx_cycle <= 1; + if (lpc_data_in == LPC_CODEWORD_ISA_START) begin + cycle_direction <= 0; + tpm_cycle <= 0; + firmware_cycle <= 0; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR01; + end else begin + if (lpc_data_in == LPC_CODEWORD_TPM_START) begin + cycle_direction <= 0; + tpm_cycle <= 1; + firmware_cycle <= 0; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR01; + end else begin + if ((lpc_data_in == LPC_CODEWORD_FWR_START) || (lpc_data_in == LPC_CODEWORD_FWW_START)) begin +`ifdef ENABLE_FIRMWARE_MEMORY_CYCLES + tpm_cycle <= 0; + firmware_cycle <= 1; + if (lpc_data_in == LPC_CODEWORD_FWW_START) begin + cycle_direction <= 1; + end else begin + cycle_direction <= 0; + end + rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR01; +`else + cycle_direction <= 0; + tpm_cycle <= 0; + firmware_cycle <= 0; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; +`endif + end else begin + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; + end + end + end + end else begin + if (!lpc_frame_n_prev) begin + // Host requested active cycle abort + lpc_data_direction <= 0; + abort_tx_cycle <= 1; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; + + // Signal exception to CPU + if (!exception_ack) begin + exception[0] <= 1; + end + end + end + end else begin + case (rx_transfer_state) + LPC_RX_TRANSFER_STATE_IDLE: begin + // Idle state + cycle_type <= 0; + cycle_direction <= 0; + io_address <= 0; + tpm_cycle <= 0; + firmware_cycle <= 0; + data_ready <= 0; + address_ready <= 0; + + vuart1_cycle <= 0; + vuart2_cycle <= 0; + ipmi_bt_cycle <= 0; + range_select_cycle <= 0; + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; + + abort_tx_cycle <= 1; + lpc_data_direction <= 0; + end + LPC_RX_TRANSFER_STATE_TR01: begin + // Receive cycle type and direction + cycle_type <= lpc_data_in[3:2]; + cycle_direction <= lpc_data_in[1]; + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR02; + end + LPC_RX_TRANSFER_STATE_TR02: begin + if (cycle_type == LPC_CYCLE_TYPE_IO) begin + // Receive I/O address -- nibble 1 + io_address[15:12] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR03; + end else begin + // Cycle type not handled by this peripheral, return to idle + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; + end + + abort_tx_cycle <= 0; + end + LPC_RX_TRANSFER_STATE_TR03: begin + // Receive I/O address -- nibble 2 + io_address[11:8] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR04; + end + LPC_RX_TRANSFER_STATE_TR04: begin + // Receive I/O address -- nibble 3 + io_address[7:4] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR05; + end + LPC_RX_TRANSFER_STATE_TR05: begin + // Receive I/O address -- nibble 4 + io_address[3:0] <= lpc_data_in; + + // Preliminary target peripheral routing + if (enable_vuart1 && ({io_address[15:4], lpc_data_in[3], 3'b000} == VUART1_BASE_ADDRESS)) begin + vuart1_cycle <= 1; + + if (cycle_direction == 0) begin + // Start driving LAD lines + lpc_data_direction <= 1; + end + end + if (enable_vuart2 && ({io_address[15:4], lpc_data_in[3], 3'b000} == VUART2_BASE_ADDRESS)) begin + vuart2_cycle <= 1; + + if (cycle_direction == 0) begin + // Start driving LAD lines + lpc_data_direction <= 1; + end + end + if (enable_ipmi_bt && ({io_address[15:4], lpc_data_in[3:2], 2'b00} == ipmi_bt_port_base_address)) begin + ipmi_bt_cycle <= 1; + + if (cycle_direction == 0) begin + // Start driving LAD lines + lpc_data_direction <= 1; + end + end + if ((({io_address[15:4], lpc_data_in[3:0]} >= range1_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range1_end)) + || (({io_address[15:4], lpc_data_in[3:0]} >= range2_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range2_end)) + || (({io_address[15:4], lpc_data_in[3:0]} >= range3_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range3_end)) + || (({io_address[15:4], lpc_data_in[3:0]} >= range4_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range4_end)) + || (({io_address[15:4], lpc_data_in[3:0]} >= range5_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range5_end)) + || (({io_address[15:4], lpc_data_in[3:0]} >= range6_start) && ({io_address[15:4], lpc_data_in[3:0]} <= range6_end)) + ) begin + range_select_cycle <= 1; + + if (cycle_direction == 0) begin + // Start driving LAD lines + lpc_data_direction <= 1; + end + end + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR06; + end + LPC_RX_TRANSFER_STATE_TR06: begin + if (vuart1_cycle || vuart2_cycle + || ipmi_bt_cycle || range_select_cycle + || tpm_cycle) begin // TPM cycles are always decoded + // Address handled by this peripheral + if (cycle_direction == 1) begin + // Receive I/O data -- nibble 1 + rx_data[3:0] <= lpc_data_in; + if (!vuart1_cycle && !vuart2_cycle && !ipmi_bt_cycle) begin + address_ready <= 1; + end + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR07; + end else begin + if (vuart1_cycle) begin + case (io_address[2:0]) + 0: begin + if (vuart1_lcr[7]) begin + special_tx_data <= vuart1_dll; + end else begin + if (!vuart1_rx_fifo_empty) begin + special_tx_data <= vuart1_rx_fifo_data; + vuart1_rx_fifo_rpop_reg <= 1; + end else begin + special_tx_data <= 8'hff; + end + end + end + 1: begin + if (vuart1_lcr[7]) begin + special_tx_data <= vuart1_dlm; + end else begin + special_tx_data <= {4'b0000, vuart1_ier}; + end + end + 2: begin + if (vuart1_interrupt_pending && (vuart1_interrupt_id == 3'b001)) begin + vuart1_iir_read_tx_empty_assert <= 1; + end + special_tx_data <= vuart1_iir; + end + 3: special_tx_data <= vuart1_lcr; + 4: special_tx_data <= {3'b111, vuart1_mcr}; + 5: begin + if (vuart1_control_register[0]) begin + vuart1_assert_b2h_break_clear_reg <= 1; + special_tx_data <= vuart1_lsr | 8'b00010000; + end else begin + special_tx_data <= vuart1_lsr; + end + vuart1_lsr_read_assert <= 1; + end + 6: special_tx_data <= vuart1_msr; + 7: special_tx_data <= vuart1_scr; + endcase + + rx_special_continue <= 1; + end else if (vuart2_cycle) begin + case (io_address[2:0]) + 0: begin + if (vuart2_lcr[7]) begin + special_tx_data <= vuart2_dll; + end else begin + if (!vuart2_rx_fifo_empty) begin + special_tx_data <= vuart2_rx_fifo_data; + vuart2_rx_fifo_rpop_reg <= 1; + end else begin + special_tx_data <= 8'hff; + end + end + end + 1: begin + if (vuart2_lcr[7]) begin + special_tx_data <= vuart2_dlm; + end else begin + special_tx_data <= {4'b0000, vuart2_ier}; + end + end + 2: begin + if (vuart2_interrupt_pending && (vuart2_interrupt_id == 3'b001)) begin + vuart2_iir_read_tx_empty_assert <= 1; + end + special_tx_data <= vuart2_iir; + end + 3: special_tx_data <= vuart2_lcr; + 4: special_tx_data <= {3'b111, vuart2_mcr}; + 5: begin + if (vuart2_control_register[0]) begin + vuart2_assert_b2h_break_clear_reg <= 1; + special_tx_data <= vuart2_lsr | 8'b00010000; + end else begin + special_tx_data <= vuart2_lsr; + end + vuart2_lsr_read_assert <= 1; + end + 6: special_tx_data <= vuart2_msr; + 7: special_tx_data <= vuart2_scr; + endcase + + rx_special_continue <= 1; + end else if (ipmi_bt_cycle) begin + case (io_address[1:0]) + 0: begin + special_tx_data[7] <= ipmi_bt_bmc_to_host_ctl_b_busy; + special_tx_data[6] <= ipmi_bt_host_to_bmc_ctl_h_busy_reg; + special_tx_data[5] <= ipmi_bt_host_to_bmc_ctl_oem0_req_reg; + special_tx_data[4] <= ipmi_bt_bmc_to_host_ctl_sms_req; + special_tx_data[3] <= ipmi_bt_bmc_to_host_ctl_attn_req; + special_tx_data[2] <= ipmi_bt_host_to_bmc_ctl_attn_req_reg; + special_tx_data[1] <= 1'b0; + special_tx_data[0] <= 1'b0; + end + 1: begin + special_tx_data <= ipmi_bt_input_xfer_read_data; + ipmi_bt_input_xfer_read_addr <= ipmi_bt_input_xfer_read_addr + 1; + end + 2: begin + special_tx_data[7:2] = 6'b000000; + special_tx_data[1] = ipmi_bt_irq_req; + special_tx_data[0] = ipmi_bt_irq_enable; + end + endcase + + rx_special_continue <= 1; + end else begin + // Signal CPU that address is ready + address_ready <= 1; + end + + // Assert TX cycle start flag for > 1 clock + start_tx_cycle <= 1; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR09; + end + end else begin + // Address not handled by this peripheral, return to idle + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; + + abort_tx_cycle <= 1; + lpc_data_direction <= 0; + end + + end + LPC_RX_TRANSFER_STATE_TR07: begin + // Receive I/O data -- nibble 2 + rx_data[7:4] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; + + // Start driving LAD lines + lpc_data_direction <= 1; + + // Assert TX cycle start flag for > 1 clock + start_tx_cycle <= 1; + end + LPC_RX_TRANSFER_STATE_TR08: begin + if (vuart1_cycle) begin + case (io_address[2:0]) + 0: begin + if (vuart1_lcr[7]) begin + vuart1_dll <= rx_data; + end else begin + if (!vuart1_tx_fifo_full) begin + vuart1_tx_fifo_data_reg <= rx_data; + vuart1_tx_fifo_wren_reg <= 1; + end + end + end + 1: begin + if (vuart1_lcr[7]) begin + vuart1_dlm <= rx_data; + end else begin + vuart1_ier <= rx_data[3:0]; + end + end + 2: begin + // FIFO control + vuart1_fifos_enabled <= rx_data[0]; + if (rx_data[1]) begin + vuart1_rx_fifo_reset_reg <= 1; + end + if (rx_data[2]) begin + vuart1_tx_fifo_reset_reg <= 1; + end + vuart1_rcvr_trigger <= rx_data[7:6]; + end + 3: vuart1_lcr <= rx_data; + 4: vuart1_mcr <= rx_data[4:0]; + 6: vuart1_msr <= rx_data; + 7: vuart1_scr <= rx_data; + endcase + rx_special_data_ack <= 1; + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR09; + end else if (vuart2_cycle) begin + case (io_address[2:0]) + 0: begin + if (vuart2_lcr[7]) begin + vuart2_dll <= rx_data; + end else begin + if (!vuart2_tx_fifo_full) begin + vuart2_tx_fifo_data_reg <= rx_data; + vuart2_tx_fifo_wren_reg <= 1; + end + end + end + 1: begin + if (vuart2_lcr[7]) begin + vuart2_dlm <= rx_data; + end else begin + vuart2_ier <= rx_data[3:0]; + end + end + 2: begin + // FIFO control + vuart2_fifos_enabled <= rx_data[0]; + if (rx_data[1]) begin + vuart2_rx_fifo_reset_reg <= 1; + end + if (rx_data[2]) begin + vuart2_tx_fifo_reset_reg <= 1; + end + vuart2_rcvr_trigger <= rx_data[7:6]; + end + 3: vuart2_lcr <= rx_data; + 4: vuart2_mcr <= rx_data[4:0]; + 6: vuart2_msr <= rx_data; + 7: vuart2_scr <= rx_data; + endcase + rx_special_data_ack <= 1; + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR09; + end else if (ipmi_bt_cycle) begin + case (io_address[1:0]) + 0: begin + if (rx_data[6]) begin + if (ipmi_bt_host_to_bmc_ctl_h_busy_reg) begin + ipmi_bt_host_to_bmc_ctl_h_busy_reg <= 1'b0; + end else begin + ipmi_bt_host_to_bmc_ctl_h_busy_reg <= 1'b1; + end + end + if (rx_data[5]) begin + ipmi_bt_host_to_bmc_ctl_oem0_req_reg <= 1'b1; + end + if (rx_data[4]) begin + ipmi_bt_bmc_to_host_ctl_sms_ack_reg <= 1'b1; + end + if (rx_data[3]) begin + ipmi_bt_bmc_to_host_ctl_attn_ack_reg <= 1'b1; + end + if (rx_data[2]) begin + ipmi_bt_host_to_bmc_ctl_attn_req_reg <= 1'b1; + end + if (rx_data[1]) begin + ipmi_bt_input_xfer_read_addr <= 0; + end + if (rx_data[0]) begin + ipmi_bt_output_xfer_write_addr <= 0; + ipmi_bt_output_xfer_write_wren <= 0; + end + end + 1: begin + ipmi_bt_output_xfer_write_data <= rx_data; + ipmi_bt_output_xfer_write_wren <= 1; + end + 2: begin + if (rx_data[7]) begin + ipmi_bt_irq_bmc_reset_reg <= 1'b1; + end + if (rx_data[1]) begin + ipmi_bt_irq_ack_reg <= 1'b1; + end + ipmi_bt_irq_enable <= rx_data[0]; + end + endcase + + lpc_slave_write_complete <= 0; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IW01; + end else begin + // Signal CPU that address / data are ready + address_ready <= 1; + if (cycle_direction == 1) begin + data_ready <= 1; + end + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR09; + end + end + LPC_RX_TRANSFER_STATE_IW01: begin + if (!lpc_slave_write_complete) begin + if (ipmi_bt_cycle) begin + case (io_address[1:0]) + 1: begin + ipmi_bt_output_xfer_write_addr <= ipmi_bt_output_xfer_write_addr + 1; + ipmi_bt_output_xfer_write_wren <= 0; + lpc_slave_write_complete <= 1; + end + 2: begin + // Handle synchronous IPMI BT IRQ reset handshake signals + if (ipmi_bt_irq_bmc_reset_cont) begin + ipmi_bt_irq_bmc_reset_reg <= 0; + end + + // Do not continue write until slave has completed its reset cycle + if (!ipmi_bt_irq_bmc_reset_reg) begin + lpc_slave_write_complete <= 1; + end + end + default: begin + lpc_slave_write_complete <= 1; + end + endcase + end else begin + lpc_slave_write_complete <= 1; + end + end else begin + rx_special_data_ack <= 1; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR09; + end + end + LPC_RX_TRANSFER_STATE_TR09: begin + // Clear special cycle flags if set + if (data_ready_cont) begin + rx_special_data_ack <= 0; + end + if (continue_cont) begin + rx_special_continue <= 0; + end + + // Reset VUART FIFO control signals + vuart1_tx_fifo_wren_reg <= 0; + vuart1_tx_fifo_reset_reg <= 0; + vuart1_rx_fifo_rpop_reg <= 0; + vuart1_rx_fifo_reset_reg <= 0; + vuart2_tx_fifo_wren_reg <= 0; + vuart2_tx_fifo_reset_reg <= 0; + vuart2_rx_fifo_rpop_reg <= 0; + vuart2_rx_fifo_reset_reg <= 0; + + // Wait for TX cycle to complete + start_tx_cycle <= 0; + if (tx_cycle_done_reg_rx) begin + lpc_data_direction <= 0; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; + end + end + LPC_RX_TRANSFER_STATE_FR01: begin + // Receive IDSEL field + fw_cycle_idsel <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR02; + end + LPC_RX_TRANSFER_STATE_FR02: begin + // Receive firmware cycle address -- nibble 1 + io_address[27:24] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR03; + + abort_tx_cycle <= 0; + end + LPC_RX_TRANSFER_STATE_FR03: begin + // Receive firmware cycle address -- nibble 2 + io_address[23:20] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR04; + end + LPC_RX_TRANSFER_STATE_FR04: begin + // Receive firmware cycle address -- nibble 3 + io_address[19:16] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR05; + end + LPC_RX_TRANSFER_STATE_FR05: begin + // Receive firmware cycle address -- nibble 4 + io_address[15:12] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR06; + end + LPC_RX_TRANSFER_STATE_FR06: begin + // Receive firmware cycle address -- nibble 5 + io_address[11:8] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR07; + end + LPC_RX_TRANSFER_STATE_FR07: begin + // Receive firmware cycle address -- nibble 6 + io_address[7:4] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR08; + end + LPC_RX_TRANSFER_STATE_FR08: begin + // Receive firmware cycle address -- nibble 7 + io_address[3:0] <= lpc_data_in; + rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR09; + end + LPC_RX_TRANSFER_STATE_FR09: begin + // Receive MSIZE field + fw_cycle_msize <= lpc_data_in; + + // Handle data transfer + if (cycle_direction == 1) begin + rx_transfer_state <= LPC_RX_TRANSFER_STATE_FR10; + + fw_cycle_rx_nibble_counter <= 0; + end else begin + // Start driving LAD lines + lpc_data_direction <= 1; + + // Assert TX cycle start flag for > 1 clock + start_tx_cycle <= 1; + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; + end + end + LPC_RX_TRANSFER_STATE_FR10: begin + // Signal CPU that address is ready + address_ready <= 1; + + // Receive data, LSN first + if (!fw_cycle_rx_nibble_counter[0]) begin + lpc_fw_output_xfer_write_addr <= fw_cycle_rx_nibble_counter[8:1]; + lpc_fw_output_xfer_write_data[3:0] <= lpc_data_in; + lpc_fw_output_xfer_write_wren <= 0; + end else begin + lpc_fw_output_xfer_write_data[7:4] <= lpc_data_in; + lpc_fw_output_xfer_write_wren <= 1; + end + + case (fw_cycle_msize) + 4'b0000: begin + if (fw_cycle_rx_nibble_counter == 0) begin + // Start driving LAD lines + // One cycle of delay is introduced by the register on the tristate control line, + // so to avoid missed LWAIT at the LPC master output direction has to be set one + // cycle "early"... + lpc_data_direction <= 1; + end else if (fw_cycle_rx_nibble_counter >= 1) begin + // Assert TX cycle start flag for > 1 clock + start_tx_cycle <= 1; + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; + end + end + 4'b0001: begin + if (fw_cycle_rx_nibble_counter == 1) begin + // Start driving LAD lines + // One cycle of delay is introduced by the register on the tristate control line, + // so to avoid missed LWAIT at the LPC master output direction has to be set one + // cycle "early"... + lpc_data_direction <= 1; + end else if (fw_cycle_rx_nibble_counter >= 2) begin + // Assert TX cycle start flag for > 1 clock + start_tx_cycle <= 1; + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; + end + end + 4'b0010: begin + if (fw_cycle_rx_nibble_counter == 7) begin + // Start driving LAD lines + // One cycle of delay is introduced by the register on the tristate control line, + // so to avoid missed LWAIT at the LPC master output direction has to be set one + // cycle "early"... + lpc_data_direction <= 1; + end else if (fw_cycle_rx_nibble_counter >= 8) begin + // Assert TX cycle start flag for > 1 clock + start_tx_cycle <= 1; + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; + end + end + 4'b0100: begin + if (fw_cycle_rx_nibble_counter == 31) begin + // Start driving LAD lines + // One cycle of delay is introduced by the register on the tristate control line, + // so to avoid missed LWAIT at the LPC master output direction has to be set one + // cycle "early"... + lpc_data_direction <= 1; + end else if (fw_cycle_rx_nibble_counter >= 32) begin + // Assert TX cycle start flag for > 1 clock + start_tx_cycle <= 1; + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; + end + end + 4'b0111: begin + if (fw_cycle_rx_nibble_counter == 255) begin + // Start driving LAD lines + // One cycle of delay is introduced by the register on the tristate control line, + // so to avoid missed LWAIT at the LPC master output direction has to be set one + // cycle "early"... + lpc_data_direction <= 1; + end else if (fw_cycle_rx_nibble_counter >= 256) begin + // Assert TX cycle start flag for > 1 clock + start_tx_cycle <= 1; + + rx_transfer_state <= LPC_RX_TRANSFER_STATE_TR08; + end + end + default: begin + // Disallowed size codeword + // Abort cycle and signal exception + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; + + // Signal exception to CPU + if (!exception_ack) begin + exception[2] <= 1; + end + end + endcase + + fw_cycle_rx_nibble_counter <= fw_cycle_rx_nibble_counter + 1; + end + default: begin + // Not reachable under normal operation! + rx_transfer_state <= LPC_RX_TRANSFER_STATE_IDLE; + end + endcase + + if (rx_transfer_state != LPC_RX_TRANSFER_STATE_IW01) begin + // Handle asynchronous IPMI BT interface handshake signals + if (ipmi_bt_bmc_to_host_ctl_sms_ack_cont) begin + ipmi_bt_bmc_to_host_ctl_sms_ack_reg <= 0; + end + if (ipmi_bt_bmc_to_host_ctl_attn_ack_cont) begin + ipmi_bt_bmc_to_host_ctl_attn_ack_reg <= 0; + end + if (ipmi_bt_host_to_bmc_ctl_attn_req_cont) begin + ipmi_bt_host_to_bmc_ctl_attn_req_reg <= 0; + end + if (ipmi_bt_host_to_bmc_ctl_oem0_req_cont) begin + ipmi_bt_host_to_bmc_ctl_oem0_req_reg <= 0; + end + if (ipmi_bt_irq_ack_cont) begin + ipmi_bt_irq_ack_reg <= 0; + end + end + + if (exception_ack) begin + exception <= 0; + end + end + end + + // VUART IRQ signalling handlers + if (vuart1_rx_fifo_rpop_reg) begin + vuart1_rx_fifo_read_timeout_counter <= 0; + vuart1_rx_data_queue_contents_read_timeout <= 0; + end else begin + if (vuart1_rx_fifo_empty) begin + vuart1_rx_fifo_read_timeout_counter <= 0; + vuart1_rx_data_queue_contents_read_timeout <= 0; + end else begin + // NOTE + // This deviates intentionally from the 16550 UART timeouts to keep overall logic simple + // In a VUART situation we don't care that much about exact character timing, since we'll continue + // to eat up bytes until the FIFOs are full. + // Use 10us as a reasonable value for the timeout here (slightly longer than 1 character time at 115200 baud) + if (vuart1_rx_fifo_read_timeout_counter > 333) begin + vuart1_rx_data_queue_contents_read_timeout <= 1; + end else begin + vuart1_rx_fifo_read_timeout_counter <= vuart1_rx_fifo_read_timeout_counter + 1; + end + end + end + if (vuart2_rx_fifo_rpop_reg) begin + vuart2_rx_fifo_read_timeout_counter <= 0; + vuart2_rx_data_queue_contents_read_timeout <= 0; + end else begin + if (vuart2_rx_fifo_empty) begin + vuart2_rx_fifo_read_timeout_counter <= 0; + vuart2_rx_data_queue_contents_read_timeout <= 0; + end else begin + // NOTE + // This deviates intentionally from the 16550 UART timeouts to keep overall logic simple + // In a VUART situation we don't care that much about exact character timing, since we'll continue + // to eat up bytes until the FIFOs are full. + // Use 10us as a reasonable value for the timeout here (slightly longer than 1 character time at 115200 baud) + if (vuart2_rx_fifo_read_timeout_counter > 333) begin + vuart2_rx_data_queue_contents_read_timeout <= 1; + end else begin + vuart2_rx_fifo_read_timeout_counter <= vuart2_rx_fifo_read_timeout_counter + 1; + end + end + end + case (vuart1_rcvr_trigger) + 2'b00: begin + if ((vuart1_rx_data_available_count >= 1) || vuart1_rx_fifo_full) begin + vuart1_rx_data_queue_contents_past_trigger <= 1; + end else begin + vuart1_rx_data_queue_contents_past_trigger <= 0; + end + end + 2'b01: begin + if ((vuart1_rx_data_available_count >= 4) || vuart1_rx_fifo_full) begin + vuart1_rx_data_queue_contents_past_trigger <= 1; + end else begin + vuart1_rx_data_queue_contents_past_trigger <= 0; + end + end + 2'b10: begin + if ((vuart1_rx_data_available_count >= 8) || vuart1_rx_fifo_full) begin + vuart1_rx_data_queue_contents_past_trigger <= 1; + end else begin + vuart1_rx_data_queue_contents_past_trigger <= 0; + end + end + 2'b11: begin + if ((vuart1_rx_data_available_count >= 14) || vuart1_rx_fifo_full) begin + vuart1_rx_data_queue_contents_past_trigger <= 1; + end else begin + vuart1_rx_data_queue_contents_past_trigger <= 0; + end + end + endcase + case (vuart2_rcvr_trigger) + 2'b00: begin + if ((vuart2_rx_data_available_count >= 1) || vuart2_rx_fifo_full) begin + vuart2_rx_data_queue_contents_past_trigger <= 1; + end else begin + vuart2_rx_data_queue_contents_past_trigger <= 0; + end + end + 2'b01: begin + if ((vuart2_rx_data_available_count >= 4) || vuart2_rx_fifo_full) begin + vuart2_rx_data_queue_contents_past_trigger <= 1; + end else begin + vuart2_rx_data_queue_contents_past_trigger <= 0; + end + end + 2'b10: begin + if ((vuart2_rx_data_available_count >= 8) || vuart2_rx_fifo_full) begin + vuart2_rx_data_queue_contents_past_trigger <= 1; + end else begin + vuart2_rx_data_queue_contents_past_trigger <= 0; + end + end + 2'b11: begin + if ((vuart2_rx_data_available_count >= 14) || vuart2_rx_fifo_full) begin + vuart2_rx_data_queue_contents_past_trigger <= 1; + end else begin + vuart2_rx_data_queue_contents_past_trigger <= 0; + end + end + endcase + + if (vuart1_ier[2] && vuart1_rx_break_irq_pending) begin + vuart1_interrupt_pending <= 1; + vuart1_interrupt_id <= 3'b010; + end else if (vuart1_ier[0] && vuart1_rx_data_queue_contents_past_trigger) begin + vuart1_interrupt_pending <= 1; + vuart1_interrupt_id <= 3'b010; + end else if (vuart1_ier[0] && vuart1_rx_data_queue_contents_read_timeout) begin + vuart1_interrupt_pending <= 1; + vuart1_interrupt_id <= 3'b110; + end else if (vuart1_ier[1] && vuart1_tx_fifo_empty_irq_pending) begin + vuart1_interrupt_pending <= 1; + vuart1_interrupt_id <= 3'b001; + end else begin + vuart1_interrupt_pending <= 0; + vuart1_interrupt_id <= 3'b000; + end + + if (vuart1_tx_fifo_wren_reg || vuart1_iir_read_tx_empty_assert) begin + vuart1_tx_fifo_empty_irq_pending <= 0; + end else begin + if (vuart1_tx_fifo_empty && !vuart1_tx_fifo_empty_prev) begin + vuart1_tx_fifo_empty_irq_pending <= 1; + end + end + if (vuart1_lsr_read_assert || !vuart1_control_register[0]) begin + vuart1_rx_break_irq_pending <= 0; + end else begin + if (vuart1_control_register[0] && !vuart1_rx_break_request_prev) begin + vuart1_rx_break_irq_pending <= 1; + end + end + + if (vuart2_ier[2] && vuart2_rx_break_irq_pending) begin + vuart2_interrupt_pending <= 1; + vuart2_interrupt_id <= 3'b010; + end else if (vuart2_ier[0] && vuart2_rx_data_queue_contents_past_trigger) begin + vuart2_interrupt_pending <= 1; + vuart2_interrupt_id <= 3'b010; + end else if (vuart2_ier[0] && vuart2_rx_data_queue_contents_read_timeout) begin + vuart2_interrupt_pending <= 1; + vuart2_interrupt_id <= 3'b110; + end else if (vuart2_ier[1] && vuart2_tx_fifo_empty_irq_pending) begin + vuart2_interrupt_pending <= 1; + vuart2_interrupt_id <= 3'b001; + end else begin + vuart2_interrupt_pending <= 0; + vuart2_interrupt_id <= 3'b000; + end + + if (vuart2_tx_fifo_wren_reg || vuart2_iir_read_tx_empty_assert) begin + vuart2_tx_fifo_empty_irq_pending <= 0; + end else begin + if (vuart2_tx_fifo_empty && !vuart2_tx_fifo_empty_prev) begin + vuart2_tx_fifo_empty_irq_pending <= 1; + end + end + if (vuart2_lsr_read_assert || !vuart2_control_register[0]) begin + vuart2_rx_break_irq_pending <= 0; + end else begin + if (vuart2_control_register[0] && !vuart2_rx_break_request_prev) begin + vuart2_rx_break_irq_pending <= 1; + end + end + + if (vuart1_iir_read_tx_empty_assert) begin + vuart1_iir_read_tx_empty_assert <= 0; + end + if (vuart2_iir_read_tx_empty_assert) begin + vuart2_iir_read_tx_empty_assert <= 0; + end + + if (!vuart1_control_register[0]) begin + vuart1_assert_b2h_break_clear_reg <= 0; + end + if (!vuart2_control_register[0]) begin + vuart2_assert_b2h_break_clear_reg <= 0; + end + if (vuart1_lsr_read_assert) begin + vuart1_lsr_read_assert <= 0; + end + if (vuart2_lsr_read_assert) begin + vuart2_lsr_read_assert <= 0; + end + + vuart1_tx_fifo_empty_prev <= vuart1_tx_fifo_empty; + vuart2_tx_fifo_empty_prev <= vuart2_tx_fifo_empty; + vuart1_rx_break_request_prev <= vuart1_control_register[0]; + vuart2_rx_break_request_prev <= vuart2_control_register[0]; + lpc_frame_n_prev <= lpc_frame_n; + end + + reg start_tx_cycle_reg_tx = 0; + reg abort_tx_cycle_reg_tx = 0; + reg data_ack_reg_tx = 0; + reg continue_reg_tx = 0; + reg special_data_ack_reg_tx = 0; + reg special_continue_reg_tx = 0; + reg read_is_special_tx = 0; + reg [7:0] lpc_tx_data_buffer = 0; + reg [7:0] cycle_completion_codeword = 0; + + always @(posedge lpc_clock) begin + // Avoid logic glitches due to these signals crossing clock domains + start_tx_cycle_reg_tx <= start_tx_cycle; + abort_tx_cycle_reg_tx <= abort_tx_cycle; + data_ack_reg_tx <= data_ack; + continue_reg_tx <= continue; + special_data_ack_reg_tx <= rx_special_data_ack; + special_continue_reg_tx <= rx_special_continue; + + if (abort_tx_cycle_reg_tx) begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_IDLE; + end else begin + case (tx_transfer_state) + LPC_TX_TRANSFER_STATE_IDLE: begin + if (start_tx_cycle_reg_tx) begin + if (cycle_direction == 1) begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR01; + end else begin + if (firmware_cycle) begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR01; + end else begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR04; + end + end + end + + data_ready_cont <= 0; + continue_cont <= 0; + tx_cycle_done <= 0; + + // Drive LWAIT by default + lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; + end + LPC_TX_TRANSFER_STATE_TR01: begin + if (data_ack_reg_tx || special_data_ack_reg_tx) begin + data_ready_cont <= 1; + if (transfer_error && !special_data_ack_reg_tx) begin + cycle_completion_codeword <= LPC_CODEWORD_SYNC_ERROR; + end else begin + cycle_completion_codeword <= LPC_CODEWORD_SYNC_READY; + end + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR02; + end + + // Drive LWAIT + lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; + end + LPC_TX_TRANSFER_STATE_TR02: begin + if (!data_ack_reg_tx && !special_data_ack_reg_tx) begin + data_ready_cont <= 0; + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR03; + end + + // Drive LWAIT + lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; + end + LPC_TX_TRANSFER_STATE_TR03: begin + // Drive sync + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; + + // Drive sync + lpc_data_out <= cycle_completion_codeword; + end + LPC_TX_TRANSFER_STATE_TR04: begin + if (continue_reg_tx || special_continue_reg_tx) begin + continue_cont <= 1; + if (transfer_error && !special_continue_reg_tx) begin + cycle_completion_codeword <= LPC_CODEWORD_SYNC_ERROR; + end else begin + cycle_completion_codeword <= LPC_CODEWORD_SYNC_READY; + end + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR05; + end + + if (special_continue_reg_tx) begin + read_is_special_tx <= 1; + end else begin + read_is_special_tx <= 0; + end + + // Drive LWAIT + lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; + end + LPC_TX_TRANSFER_STATE_TR05: begin + if (!continue_reg_tx && !special_continue_reg_tx) begin + continue_cont <= 0; + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR06; + end + + // Drive LWAIT + lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; + end + LPC_TX_TRANSFER_STATE_TR06: begin + // Drive sync + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR07; + + // Drive sync + lpc_data_out <= cycle_completion_codeword; + end + LPC_TX_TRANSFER_STATE_TR07: begin + // Transmit first nibble of I/O data + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR08; + + // Transmit first nibble of I/O data + if (read_is_special_tx) begin + lpc_data_out <= special_tx_data[3:0]; + end else begin + lpc_data_out <= tx_data[3:0]; + end + end + LPC_TX_TRANSFER_STATE_TR08: begin + // Transmit second nibble of I/O data + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; + + // Transmit second nibble of I/O data + if (read_is_special_tx) begin + lpc_data_out <= special_tx_data[7:4]; + end else begin + lpc_data_out <= tx_data[7:4]; + end + end + LPC_TX_TRANSFER_STATE_TR09: begin + // Drive turn-around cycle part 1 + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR10; + + // Drive turn-around cycle part 1 + lpc_data_out <= LPC_CODEWORD_TURNAROUND; + end + LPC_TX_TRANSFER_STATE_TR10: begin + // Drive turn-around cycle part 2 + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR11; + tx_cycle_done <= 1; + + // Drive turn-around cycle part 2 + lpc_data_out <= LPC_CODEWORD_TURNAROUND; + end + LPC_TX_TRANSFER_STATE_TR11: begin + // Assert done flag for > 1 clock, then return to idle + tx_transfer_state <= LPC_TX_TRANSFER_STATE_IDLE; + + // Keep driving turn-around cycle during I/O direction switch + lpc_data_out <= LPC_CODEWORD_TURNAROUND; + end + LPC_TX_TRANSFER_STATE_FR01: begin + if (continue_reg_tx) begin + continue_cont <= 1; + tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR02; + end + + // Drive LWAIT + lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; + end + LPC_TX_TRANSFER_STATE_FR02: begin + if (!continue_reg_tx) begin + continue_cont <= 0; + tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR03; + end + + // Set up transfer + lpc_fw_input_xfer_read_addr <= 0; + fw_cycle_tx_byte_counter <= 0; + + // Drive LWAIT + lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; + end + LPC_TX_TRANSFER_STATE_FR03: begin + // Drive sync + tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR04; + + // Drive sync + lpc_data_out <= LPC_CODEWORD_SYNC_READY; + end + LPC_TX_TRANSFER_STATE_FR04: begin + // Drive first nibble in TX state machine, then set up next byte read from RAM + tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR05; + + lpc_tx_data_buffer <= lpc_fw_input_xfer_read_data; + fw_cycle_tx_byte_counter <= fw_cycle_tx_byte_counter + 1; + lpc_fw_input_xfer_read_addr <= fw_cycle_tx_byte_counter + 1; + + // Transmit first nibble of FW data byte + lpc_data_out <= lpc_fw_input_xfer_read_data[3:0]; + end + LPC_TX_TRANSFER_STATE_FR05: begin + // Drive second nibble in TX state machine + case (fw_cycle_msize) + 4'b0000: begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; + end + 4'b0001: begin + if (fw_cycle_tx_byte_counter >= 1) begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; + end else begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR04; + end + end + 4'b0010: begin + if (fw_cycle_tx_byte_counter >= 4) begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; + end else begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR04; + end + end + 4'b0100: begin + if (fw_cycle_tx_byte_counter >= 16) begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; + end else begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR04; + end + end + 4'b0111: begin + if (fw_cycle_tx_byte_counter >= 128) begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; + end else begin + tx_transfer_state <= LPC_TX_TRANSFER_STATE_FR04; + end + end + default: begin + // Disallowed size codeword + // Abort cycle + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; + end + endcase + + // Transmit second nibble of FW data byte + lpc_data_out <= lpc_tx_data_buffer[7:4]; + end + default: begin + // Should never reach this point! + // In case of a glitch into this state, drive + // turnaround in preparation to unlock bus... + tx_transfer_state <= LPC_TX_TRANSFER_STATE_TR09; + + // Drive LWAIT + lpc_data_out <= LPC_CODEWORD_SYNC_LWAIT; + end + endcase + end + end + + PDPW16KD #( + .INITVAL_00(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_01(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_02(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_03(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_04(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_05(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_06(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_07(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_08(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_09(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_10(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_11(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_12(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_13(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_14(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_15(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_16(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_17(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_18(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_19(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_20(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_21(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_22(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_23(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_24(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_25(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_26(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_27(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_28(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_29(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_30(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_31(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_32(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_33(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_34(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_35(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_36(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_37(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_38(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_39(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + // Ordinarily this would be 2048 elements deep, 9 bits wide, but Yosys doesn't have support + // for bit widths other than 36 (https://github.com/YosysHQ/yosys/issues/2143) + // Since we don't need 2048 (or even 512) elements here, run it in 36 bit data mode with 512 elements + .DATA_WIDTH_W(36), + .DATA_WIDTH_R(36) + ) lpc_fw_cycle_input_xfer_bram( + .BE3(1'b1), + .BE2(1'b1), + .BE1(1'b1), + .BE0(1'b1), + .DI7(lpc_fw_input_xfer_write_data[7]), + .DI6(lpc_fw_input_xfer_write_data[6]), + .DI5(lpc_fw_input_xfer_write_data[5]), + .DI4(lpc_fw_input_xfer_write_data[4]), + .DI3(lpc_fw_input_xfer_write_data[3]), + .DI2(lpc_fw_input_xfer_write_data[2]), + .DI1(lpc_fw_input_xfer_write_data[1]), + .DI0(lpc_fw_input_xfer_write_data[0]), + .CSW2(1'b0), + .CSW1(1'b0), + .CSW0(1'b0), + .ADW8(lpc_fw_input_xfer_write_addr[8]), + .ADW7(lpc_fw_input_xfer_write_addr[7]), + .ADW6(lpc_fw_input_xfer_write_addr[6]), + .ADW5(lpc_fw_input_xfer_write_addr[5]), + .ADW4(lpc_fw_input_xfer_write_addr[4]), + .ADW3(lpc_fw_input_xfer_write_addr[3]), + .ADW2(lpc_fw_input_xfer_write_addr[2]), + .ADW1(lpc_fw_input_xfer_write_addr[1]), + .ADW0(lpc_fw_input_xfer_write_addr[0]), + // NOTE + // The ECP5 block RAMs operate in a non-intuitive manner. + // To connect both ports to the same set of memory cells, one port has to be attached + // 1:1 in address and data lines, while the other is offset (data + 18, address + 5). + .DO25(lpc_fw_input_xfer_read_data[7]), + .DO24(lpc_fw_input_xfer_read_data[6]), + .DO23(lpc_fw_input_xfer_read_data[5]), + .DO22(lpc_fw_input_xfer_read_data[4]), + .DO21(lpc_fw_input_xfer_read_data[3]), + .DO20(lpc_fw_input_xfer_read_data[2]), + .DO19(lpc_fw_input_xfer_read_data[1]), + .DO18(lpc_fw_input_xfer_read_data[0]), + .CSR2(1'b0), + .CSR1(1'b0), + .CSR0(1'b0), + .ADR13(lpc_fw_input_xfer_read_addr[8]), + .ADR12(lpc_fw_input_xfer_read_addr[7]), + .ADR11(lpc_fw_input_xfer_read_addr[6]), + .ADR10(lpc_fw_input_xfer_read_addr[5]), + .ADR9(lpc_fw_input_xfer_read_addr[4]), + .ADR8(lpc_fw_input_xfer_read_addr[3]), + .ADR7(lpc_fw_input_xfer_read_addr[2]), + .ADR6(lpc_fw_input_xfer_read_addr[1]), + .ADR5(lpc_fw_input_xfer_read_addr[0]), + .ADR4(1'b0), + .ADR3(1'b0), + .ADR2(1'b0), + .ADR1(1'b0), + .ADR0(1'b0), + .CEW(lpc_fw_input_xfer_write_wren), + .CLKW(lpc_fw_input_xfer_write_clk), + .CER(1'b1), + .CLKR(lpc_clock), + .OCER(1'b1), + .RST(1'b0) + ); + + PDPW16KD #( + .INITVAL_00(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_01(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_02(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_03(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_04(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_05(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_06(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_07(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_08(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_09(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_10(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_11(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_12(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_13(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_14(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_15(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_16(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_17(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_18(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_19(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_20(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_21(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_22(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_23(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_24(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_25(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_26(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_27(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_28(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_29(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_30(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_31(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_32(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_33(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_34(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_35(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_36(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_37(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_38(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_39(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + // Ordinarily this would be 2048 elements deep, 9 bits wide, but Yosys doesn't have support + // for bit widths other than 36 (https://github.com/YosysHQ/yosys/issues/2143) + // Since we don't need 2048 (or even 512) elements here, run it in 36 bit data mode with 512 elements + .DATA_WIDTH_W(36), + .DATA_WIDTH_R(36) + ) lpc_fw_cycle_output_xfer_bram( + .BE3(1'b1), + .BE2(1'b1), + .BE1(1'b1), + .BE0(1'b1), + .DI7(lpc_fw_output_xfer_write_data[7]), + .DI6(lpc_fw_output_xfer_write_data[6]), + .DI5(lpc_fw_output_xfer_write_data[5]), + .DI4(lpc_fw_output_xfer_write_data[4]), + .DI3(lpc_fw_output_xfer_write_data[3]), + .DI2(lpc_fw_output_xfer_write_data[2]), + .DI1(lpc_fw_output_xfer_write_data[1]), + .DI0(lpc_fw_output_xfer_write_data[0]), + .CSW2(1'b0), + .CSW1(1'b0), + .CSW0(1'b0), + .ADW8(lpc_fw_output_xfer_write_addr[8]), + .ADW7(lpc_fw_output_xfer_write_addr[7]), + .ADW6(lpc_fw_output_xfer_write_addr[6]), + .ADW5(lpc_fw_output_xfer_write_addr[5]), + .ADW4(lpc_fw_output_xfer_write_addr[4]), + .ADW3(lpc_fw_output_xfer_write_addr[3]), + .ADW2(lpc_fw_output_xfer_write_addr[2]), + .ADW1(lpc_fw_output_xfer_write_addr[1]), + .ADW0(lpc_fw_output_xfer_write_addr[0]), + // NOTE + // The ECP5 block RAMs operate in a non-intuitive manner. + // To connect both ports to the same set of memory cells, one port has to be attached + // 1:1 in address and data lines, while the other is offset (data + 18, address + 5). + .DO25(lpc_fw_output_xfer_read_data[7]), + .DO24(lpc_fw_output_xfer_read_data[6]), + .DO23(lpc_fw_output_xfer_read_data[5]), + .DO22(lpc_fw_output_xfer_read_data[4]), + .DO21(lpc_fw_output_xfer_read_data[3]), + .DO20(lpc_fw_output_xfer_read_data[2]), + .DO19(lpc_fw_output_xfer_read_data[1]), + .DO18(lpc_fw_output_xfer_read_data[0]), + .CSR2(1'b0), + .CSR1(1'b0), + .CSR0(1'b0), + .ADR13(lpc_fw_output_xfer_read_addr[8]), + .ADR12(lpc_fw_output_xfer_read_addr[7]), + .ADR11(lpc_fw_output_xfer_read_addr[6]), + .ADR10(lpc_fw_output_xfer_read_addr[5]), + .ADR9(lpc_fw_output_xfer_read_addr[4]), + .ADR8(lpc_fw_output_xfer_read_addr[3]), + .ADR7(lpc_fw_output_xfer_read_addr[2]), + .ADR6(lpc_fw_output_xfer_read_addr[1]), + .ADR5(lpc_fw_output_xfer_read_addr[0]), + .ADR4(1'b0), + .ADR3(1'b0), + .ADR2(1'b0), + .ADR1(1'b0), + .ADR0(1'b0), + .CEW(lpc_fw_output_xfer_write_wren), + .CLKW(lpc_clock), + .CER(1'b1), + .CLKR(lpc_fw_output_xfer_read_clk), + .OCER(1'b1), + .RST(1'b0) + ); + + PDPW16KD #( + .INITVAL_00(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_01(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_02(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_03(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_04(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_05(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_06(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_07(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_08(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_09(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_10(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_11(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_12(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_13(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_14(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_15(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_16(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_17(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_18(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_19(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_20(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_21(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_22(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_23(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_24(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_25(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_26(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_27(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_28(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_29(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_30(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_31(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_32(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_33(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_34(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_35(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_36(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_37(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_38(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_39(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + // Ordinarily this would be 2048 elements deep, 9 bits wide, but Yosys doesn't have support + // for bit widths other than 36 (https://github.com/YosysHQ/yosys/issues/2143) + // Since we don't need 2048 (or even 512) elements here, run it in 36 bit data mode with 512 elements + .DATA_WIDTH_W(36), + .DATA_WIDTH_R(36) + ) ipmi_bt_cycle_input_xfer_bram( + .BE3(1'b1), + .BE2(1'b1), + .BE1(1'b1), + .BE0(1'b1), + .DI7(ipmi_bt_input_xfer_write_data[7]), + .DI6(ipmi_bt_input_xfer_write_data[6]), + .DI5(ipmi_bt_input_xfer_write_data[5]), + .DI4(ipmi_bt_input_xfer_write_data[4]), + .DI3(ipmi_bt_input_xfer_write_data[3]), + .DI2(ipmi_bt_input_xfer_write_data[2]), + .DI1(ipmi_bt_input_xfer_write_data[1]), + .DI0(ipmi_bt_input_xfer_write_data[0]), + .CSW2(1'b0), + .CSW1(1'b0), + .CSW0(1'b0), + .ADW8(ipmi_bt_input_xfer_write_addr[8]), + .ADW7(ipmi_bt_input_xfer_write_addr[7]), + .ADW6(ipmi_bt_input_xfer_write_addr[6]), + .ADW5(ipmi_bt_input_xfer_write_addr[5]), + .ADW4(ipmi_bt_input_xfer_write_addr[4]), + .ADW3(ipmi_bt_input_xfer_write_addr[3]), + .ADW2(ipmi_bt_input_xfer_write_addr[2]), + .ADW1(ipmi_bt_input_xfer_write_addr[1]), + .ADW0(ipmi_bt_input_xfer_write_addr[0]), + // NOTE + // The ECP5 block RAMs operate in a non-intuitive manner. + // To connect both ports to the same set of memory cells, one port has to be attached + // 1:1 in address and data lines, while the other is offset (data + 18, address + 5). + .DO25(ipmi_bt_input_xfer_read_data[7]), + .DO24(ipmi_bt_input_xfer_read_data[6]), + .DO23(ipmi_bt_input_xfer_read_data[5]), + .DO22(ipmi_bt_input_xfer_read_data[4]), + .DO21(ipmi_bt_input_xfer_read_data[3]), + .DO20(ipmi_bt_input_xfer_read_data[2]), + .DO19(ipmi_bt_input_xfer_read_data[1]), + .DO18(ipmi_bt_input_xfer_read_data[0]), + .CSR2(1'b0), + .CSR1(1'b0), + .CSR0(1'b0), + .ADR13(ipmi_bt_input_xfer_read_addr[8]), + .ADR12(ipmi_bt_input_xfer_read_addr[7]), + .ADR11(ipmi_bt_input_xfer_read_addr[6]), + .ADR10(ipmi_bt_input_xfer_read_addr[5]), + .ADR9(ipmi_bt_input_xfer_read_addr[4]), + .ADR8(ipmi_bt_input_xfer_read_addr[3]), + .ADR7(ipmi_bt_input_xfer_read_addr[2]), + .ADR6(ipmi_bt_input_xfer_read_addr[1]), + .ADR5(ipmi_bt_input_xfer_read_addr[0]), + .ADR4(1'b0), + .ADR3(1'b0), + .ADR2(1'b0), + .ADR1(1'b0), + .ADR0(1'b0), + .CEW(ipmi_bt_input_xfer_write_wren), + .CLKW(ipmi_bt_input_xfer_write_clk), + .CER(1'b1), + .CLKR(lpc_clock), + .OCER(1'b1), + .RST(1'b0) + ); + + PDPW16KD #( + .INITVAL_00(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_01(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_02(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_03(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_04(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_05(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_06(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_07(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_08(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_09(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_0F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_10(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_11(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_12(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_13(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_14(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_15(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_16(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_17(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_18(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_19(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_1F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_20(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_21(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_22(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_23(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_24(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_25(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_26(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_27(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_28(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_29(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_2F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_30(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_31(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_32(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_33(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_34(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_35(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_36(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_37(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_38(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_39(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3A(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3B(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3C(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3D(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3E(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + .INITVAL_3F(320'h00000000000000000000000000000000000000000000000000000000000000000000000000000000), + // Ordinarily this would be 2048 elements deep, 9 bits wide, but Yosys doesn't have support + // for bit widths other than 36 (https://github.com/YosysHQ/yosys/issues/2143) + // Since we don't need 2048 (or even 512) elements here, run it in 36 bit data mode with 512 elements + .DATA_WIDTH_W(36), + .DATA_WIDTH_R(36) + ) ipmi_bt_cycle_output_xfer_bram( + .BE3(1'b1), + .BE2(1'b1), + .BE1(1'b1), + .BE0(1'b1), + .DI7(ipmi_bt_output_xfer_write_data[7]), + .DI6(ipmi_bt_output_xfer_write_data[6]), + .DI5(ipmi_bt_output_xfer_write_data[5]), + .DI4(ipmi_bt_output_xfer_write_data[4]), + .DI3(ipmi_bt_output_xfer_write_data[3]), + .DI2(ipmi_bt_output_xfer_write_data[2]), + .DI1(ipmi_bt_output_xfer_write_data[1]), + .DI0(ipmi_bt_output_xfer_write_data[0]), + .CSW2(1'b0), + .CSW1(1'b0), + .CSW0(1'b0), + .ADW8(ipmi_bt_output_xfer_write_addr[8]), + .ADW7(ipmi_bt_output_xfer_write_addr[7]), + .ADW6(ipmi_bt_output_xfer_write_addr[6]), + .ADW5(ipmi_bt_output_xfer_write_addr[5]), + .ADW4(ipmi_bt_output_xfer_write_addr[4]), + .ADW3(ipmi_bt_output_xfer_write_addr[3]), + .ADW2(ipmi_bt_output_xfer_write_addr[2]), + .ADW1(ipmi_bt_output_xfer_write_addr[1]), + .ADW0(ipmi_bt_output_xfer_write_addr[0]), + // NOTE + // The ECP5 block RAMs operate in a non-intuitive manner. + // To connect both ports to the same set of memory cells, one port has to be attached + // 1:1 in address and data lines, while the other is offset (data + 18, address + 5). + .DO25(ipmi_bt_output_xfer_read_data[7]), + .DO24(ipmi_bt_output_xfer_read_data[6]), + .DO23(ipmi_bt_output_xfer_read_data[5]), + .DO22(ipmi_bt_output_xfer_read_data[4]), + .DO21(ipmi_bt_output_xfer_read_data[3]), + .DO20(ipmi_bt_output_xfer_read_data[2]), + .DO19(ipmi_bt_output_xfer_read_data[1]), + .DO18(ipmi_bt_output_xfer_read_data[0]), + .CSR2(1'b0), + .CSR1(1'b0), + .CSR0(1'b0), + .ADR13(ipmi_bt_output_xfer_read_addr[8]), + .ADR12(ipmi_bt_output_xfer_read_addr[7]), + .ADR11(ipmi_bt_output_xfer_read_addr[6]), + .ADR10(ipmi_bt_output_xfer_read_addr[5]), + .ADR9(ipmi_bt_output_xfer_read_addr[4]), + .ADR8(ipmi_bt_output_xfer_read_addr[3]), + .ADR7(ipmi_bt_output_xfer_read_addr[2]), + .ADR6(ipmi_bt_output_xfer_read_addr[1]), + .ADR5(ipmi_bt_output_xfer_read_addr[0]), + .ADR4(1'b0), + .ADR3(1'b0), + .ADR2(1'b0), + .ADR1(1'b0), + .ADR0(1'b0), + .CEW(ipmi_bt_output_xfer_write_wren), + .CLKW(lpc_clock), + .CER(1'b1), + .CLKR(ipmi_bt_output_xfer_read_clk), + .OCER(1'b1), + .RST(1'b0) + ); +endmodule diff --git a/aquila/third_party/async_fifo/async_bidir_fifo.v b/aquila/third_party/async_fifo/async_bidir_fifo.v new file mode 100644 index 0000000..e3e9d3c --- /dev/null +++ b/aquila/third_party/async_fifo/async_bidir_fifo.v @@ -0,0 +1,194 @@ +//----------------------------------------------------------------------------- +// Copyright 2017 Damien Pretet ThotIP +// Copyright 2018 Julius Baxter +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//----------------------------------------------------------------------------- + +`timescale 1 ns / 1 ps +`default_nettype none + +module async_bidir_fifo + + #( + parameter DSIZE = 8, + parameter ASIZE = 4, + parameter FALLTHROUGH = "TRUE" // First word fall-through + ) ( + input wire a_clk, + input wire a_rst_n, + input wire a_winc, + input wire [DSIZE-1:0] a_wdata, + input wire a_rinc, + output wire [DSIZE-1:0] a_rdata, + output wire a_full, + output wire a_afull, + output wire a_empty, + output wire a_aempty, + input wire a_dir, // dir = 1: this side is writing, dir = 0: this side is reading + + + input wire b_clk, + input wire b_rst_n, + input wire b_winc, + input wire [DSIZE-1:0] b_wdata, + input wire b_rinc, + output wire [DSIZE-1:0] b_rdata, + output wire b_full, + output wire b_afull, + output wire b_empty, + output wire b_aempty, + input wire b_dir // dir = 1: this side is writing, dir = 0: this side is reading + ); + + wire [ASIZE-1:0] a_addr, b_addr; + wire [ASIZE-1:0] a_waddr, a_raddr, b_waddr, b_raddr; + wire [ ASIZE:0] a_wptr, b_rptr, a2b_wptr, b2a_rptr; + wire [ ASIZE:0] a_rptr, b_wptr, a2b_rptr, b2a_wptr; + + assign a_addr = a_dir ? a_waddr : a_raddr; + assign b_addr = b_dir ? b_waddr : b_raddr; + + ////////////////////////////////////////////////////////////////////////////// + // A-side logic + ////////////////////////////////////////////////////////////////////////////// + + // Sync b write pointer to a domain + sync_ptr #(ASIZE) + sync_b2a_wptr + ( + .dest_clk (a_clk), + .dest_rst_n (a_rst_n), + .src_ptr (b_wptr), + .dest_ptr (b2a_wptr) + ); + + // Sync b read pointer to a domain + sync_ptr #(ASIZE) + sync_b2a_rptr + ( + .dest_clk (a_clk), + .dest_rst_n (a_rst_n), + .src_ptr (b_rptr), + .dest_ptr (b2a_rptr) + ); + + // The module handling the write requests + // outputs valid when dir == 0 (a is writing) + wptr_full #(ASIZE) + a_wptr_inst + ( + .wclk (a_clk), + .wrst_n (a_rst_n), + .winc (a_winc), + .wq2_rptr (b2a_rptr), + .awfull (a_afull), + .wfull (a_full), + .waddr (a_waddr), + .wptr (a_wptr) + ); + + // dir == 1 read pointer on a side calculation + rptr_empty #(ASIZE) + a_rptr_inst + ( + .rclk (a_clk), + .rrst_n (a_rst_n), + .rinc (a_rinc), + .rq2_wptr (b2a_wptr), + .arempty (a_aempty), + .rempty (a_empty), + .raddr (a_raddr), + .rptr (a_rptr) + ); + + ////////////////////////////////////////////////////////////////////////////// + // B-side logic + ////////////////////////////////////////////////////////////////////////////// + + // Sync a write pointer to b domain + sync_ptr #(ASIZE) + sync_a2b_wptr + ( + .dest_clk (b_clk), + .dest_rst_n (b_rst_n), + .src_ptr (a_wptr), + .dest_ptr (a2b_wptr) + ); + + // Sync a read pointer to b domain + sync_ptr #(ASIZE) + sync_a2b_rptr + ( + .dest_clk (b_clk), + .dest_rst_n (b_rst_n), + .src_ptr (a_rptr), + .dest_ptr (a2b_rptr) + ); + + // The module handling the write requests + // outputs valid when dir == 0 (b is writing) + wptr_full #(ASIZE) + b_wptr_inst + ( + .wclk (b_clk), + .wrst_n (b_rst_n), + .winc (b_winc), + .wq2_rptr (a2b_rptr), + .awfull (b_afull), + .wfull (b_full), + .waddr (b_waddr), + .wptr (b_wptr) + ); + + // dir == 1 read pointer on b side calculation + rptr_empty #(ASIZE) + b_rptr_inst + ( + .rclk (b_clk), + .rrst_n (b_rst_n), + .rinc (b_rinc), + .rq2_wptr (a2b_wptr), + .arempty (b_aempty), + .rempty (b_empty), + .raddr (b_raddr), + .rptr (b_rptr) + ); + + ////////////////////////////////////////////////////////////////////////////// + // FIFO RAM + ////////////////////////////////////////////////////////////////////////////// + + fifomem_dp #(DSIZE, ASIZE, FALLTHROUGH) + fifomem_dp + ( + .a_clk (a_clk), + .a_wdata (a_wdata), + .a_rdata (a_rdata), + .a_addr (a_addr), + .a_rinc (a_rinc & !a_dir), + .a_winc (a_winc & a_dir), + + .b_clk (b_clk), + .b_wdata (b_wdata), + .b_rdata (b_rdata), + .b_addr (b_addr), + .b_rinc (b_rinc & !b_dir), + .b_winc (b_winc & b_dir) + ); + + + +endmodule + +`resetall diff --git a/aquila/third_party/async_fifo/async_bidir_ramif_fifo.v b/aquila/third_party/async_fifo/async_bidir_ramif_fifo.v new file mode 100644 index 0000000..6c44f48 --- /dev/null +++ b/aquila/third_party/async_fifo/async_bidir_ramif_fifo.v @@ -0,0 +1,201 @@ +//----------------------------------------------------------------------------- +// Copyright 2017 Damien Pretet ThotIP +// Copyright 2018 Julius Baxter +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//----------------------------------------------------------------------------- + +`timescale 1 ns / 1 ps +`default_nettype none + +module async_bidir_ramif_fifo + + #( + parameter DSIZE = 8, + parameter ASIZE = 4, + parameter FALLTHROUGH = "FALSE" // First word fall-through, not sure it can be disabled for this + ) ( + input wire a_clk, + input wire a_rst_n, + input wire a_winc, + input wire [DSIZE-1:0] a_wdata, + input wire a_rinc, + output wire [DSIZE-1:0] a_rdata, + output wire a_full, + output wire a_afull, + output wire a_empty, + output wire a_aempty, + input wire a_dir, // dir = 1: this side is writing, dir = 0: this side is reading + + + input wire b_clk, + input wire b_rst_n, + input wire b_winc, + input wire [DSIZE-1:0] b_wdata, + input wire b_rinc, + output wire [DSIZE-1:0] b_rdata, + output wire b_full, + output wire b_afull, + output wire b_empty, + output wire b_aempty, + input wire b_dir, // dir = 1: this side is writing, dir = 0: this side is reading + + // Dual-port RAM interface + output wire o_ram_a_clk, + output wire [DSIZE-1:0] o_ram_a_wdata, + input wire [DSIZE-1:0] i_ram_a_rdata, + output wire [ASIZE-1:0] o_ram_a_addr, + output wire o_ram_a_rinc, + output wire o_ram_a_winc, + output wire o_ram_b_clk, + output wire [DSIZE-1:0] o_ram_b_wdata, + input wire [DSIZE-1:0] i_ram_b_rdata, + output wire [ASIZE-1:0] o_ram_b_addr, + output wire o_ram_b_rinc, + output wire o_ram_b_winc + ); + + wire [ASIZE-1:0] a_addr, b_addr; + wire [ASIZE-1:0] a_waddr, a_raddr, b_waddr, b_raddr; + wire [ ASIZE:0] a_wptr, b_rptr, a2b_wptr, b2a_rptr; + wire [ ASIZE:0] a_rptr, b_wptr, a2b_rptr, b2a_wptr; + + assign a_addr = a_dir ? a_waddr : a_raddr; + assign b_addr = b_dir ? b_waddr : b_raddr; + + ////////////////////////////////////////////////////////////////////////////// + // A-side logic + ////////////////////////////////////////////////////////////////////////////// + + // Sync b write pointer to a domain + sync_ptr #(ASIZE) + sync_b2a_wptr + ( + .dest_clk (a_clk), + .dest_rst_n (a_rst_n), + .src_ptr (b_wptr), + .dest_ptr (b2a_wptr) + ); + + // Sync b read pointer to a domain + sync_ptr #(ASIZE) + sync_b2a_rptr + ( + .dest_clk (a_clk), + .dest_rst_n (a_rst_n), + .src_ptr (b_rptr), + .dest_ptr (b2a_rptr) + ); + + // The module handling the write requests + // outputs valid when dir == 0 (a is writing) + wptr_full #(ASIZE) + a_wptr_inst + ( + .wclk (a_clk), + .wrst_n (a_rst_n), + .winc (a_winc), + .wq2_rptr (b2a_rptr), + .awfull (a_afull), + .wfull (a_full), + .waddr (a_waddr), + .wptr (a_wptr) + ); + + // dir == 1 read pointer on a side calculation + rptr_empty #(ASIZE) + a_rptr_inst + ( + .rclk (a_clk), + .rrst_n (a_rst_n), + .rinc (a_rinc), + .rq2_wptr (b2a_wptr), + .arempty (a_aempty), + .rempty (a_empty), + .raddr (a_raddr), + .rptr (a_rptr) + ); + + ////////////////////////////////////////////////////////////////////////////// + // B-side logic + ////////////////////////////////////////////////////////////////////////////// + + // Sync a write pointer to b domain + sync_ptr #(ASIZE) + sync_a2b_wptr + ( + .dest_clk (b_clk), + .dest_rst_n (b_rst_n), + .src_ptr (a_wptr), + .dest_ptr (a2b_wptr) + ); + + // Sync a read pointer to b domain + sync_ptr #(ASIZE) + sync_a2b_rptr + ( + .dest_clk (b_clk), + .dest_rst_n (b_rst_n), + .src_ptr (a_rptr), + .dest_ptr (a2b_rptr) + ); + + // The module handling the write requests + // outputs valid when dir == 0 (b is writing) + wptr_full #(ASIZE) + b_wptr_inst + ( + .wclk (b_clk), + .wrst_n (b_rst_n), + .winc (b_winc), + .wq2_rptr (a2b_rptr), + .awfull (b_afull), + .wfull (b_full), + .waddr (b_waddr), + .wptr (b_wptr) + ); + + // dir == 1 read pointer on b side calculation + rptr_empty #(ASIZE) + b_rptr_inst + ( + .rclk (b_clk), + .rrst_n (b_rst_n), + .rinc (b_rinc), + .rq2_wptr (a2b_wptr), + .arempty (b_aempty), + .rempty (b_empty), + .raddr (b_raddr), + .rptr (b_rptr) + ); + + ////////////////////////////////////////////////////////////////////////////// + // FIFO RAM interface + ////////////////////////////////////////////////////////////////////////////// + + assign o_ram_a_clk = a_clk; + assign o_ram_a_wdata = a_wdata; + assign a_rdata = i_ram_a_rdata; + assign o_ram_a_addr = a_addr; + assign o_ram_a_rinc = a_rinc & !a_dir; + assign o_ram_a_winc = a_winc & a_dir; + assign o_ram_b_clk = b_clk; + assign o_ram_b_wdata = b_wdata; + assign b_rdata = i_ram_b_rdata; + assign o_ram_b_addr = b_addr; + assign o_ram_b_rinc = b_rinc & !b_dir; + assign o_ram_b_winc = b_winc & b_dir; + +endmodule + +`resetall diff --git a/aquila/third_party/async_fifo/async_fifo.v b/aquila/third_party/async_fifo/async_fifo.v new file mode 100644 index 0000000..3d272c0 --- /dev/null +++ b/aquila/third_party/async_fifo/async_fifo.v @@ -0,0 +1,120 @@ +//----------------------------------------------------------------------------- +// Copyright 2017 Damien Pretet ThotIP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//----------------------------------------------------------------------------- + +`timescale 1 ns / 1 ps +`default_nettype none + +module async_fifo + + #( + parameter DSIZE = 8, + parameter ASIZE = 4, + parameter FALLTHROUGH = "TRUE" // First word fall-through + )( + input wire wclk, + input wire wrst_n, + input wire winc, + input wire [DSIZE-1:0] wdata, + output wire wfull, + output wire awfull, + input wire rclk, + input wire rrst_n, + input wire rinc, + output wire [DSIZE-1:0] rdata, + output wire rempty, + output wire arempty, + output wire [ASIZE :0] rclk_rptr, + output wire [ASIZE :0] rclk_wptr, + output wire [ASIZE :0] wclk_rptr, + output wire [ASIZE :0] wclk_wptr + ); + + wire [ ASIZE:0] wptr, rptr, wq2_rptr, rq2_wptr; + wire [ASIZE-1:0] wclk_wptr_internal, rclk_rptr_internal; + + assign wclk_wptr = {1'b0, wclk_wptr_internal}; + assign rclk_rptr = {1'b0, rclk_rptr_internal}; + + // The module synchronizing the read point + // from read to write domain + sync_r2w + #(ASIZE) + sync_r2w ( + .wq2_rptr (wq2_rptr), + .rptr (rptr), + .wclk (wclk), + .wrst_n (wrst_n) + ); + + // The module synchronizing the write point + // from write to read domain + sync_w2r + #(ASIZE) + sync_w2r ( + .rq2_wptr (rq2_wptr), + .wptr (wptr), + .rclk (rclk), + .rrst_n (rrst_n) + ); + + // The module handling the write requests + wptr_full + #(ASIZE) + wptr_full ( + .awfull (awfull), + .wfull (wfull), + .waddr (wclk_wptr_internal), + .wptr (wptr), + .wq2_rptr (wq2_rptr), + .winc (winc), + .wclk (wclk), + .wrst_n (wrst_n), + .wq2_rptr_binary (wclk_rptr) + ); + + // The DC-RAM + fifomem + #(DSIZE, ASIZE, FALLTHROUGH) + fifomem ( + .rclken (rinc), + .rclk (rclk), + .rdata (rdata), + .wdata (wdata), + .waddr (wclk_wptr_internal), + .raddr (rclk_rptr_internal), + .wclken (winc), + .wfull (wfull), + .wclk (wclk) + ); + + // The module handling read requests + rptr_empty + #(ASIZE) + rptr_empty ( + .arempty (arempty), + .rempty (rempty), + .raddr (rclk_rptr_internal), + .rptr (rptr), + .rq2_wptr (rq2_wptr), + .rinc (rinc), + .rclk (rclk), + .rrst_n (rrst_n), + .rq2_wptr_binary (rclk_wptr) + ); + +endmodule + +`resetall diff --git a/aquila/third_party/async_fifo/fifo_2mem.v b/aquila/third_party/async_fifo/fifo_2mem.v new file mode 100644 index 0000000..7540293 --- /dev/null +++ b/aquila/third_party/async_fifo/fifo_2mem.v @@ -0,0 +1,64 @@ +//----------------------------------------------------------------------------- +// Copyright 2017 Damien Pretet ThotIP +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +//----------------------------------------------------------------------------- + +`timescale 1 ns / 1 ps +`default_nettype none + +module fifomem + + #( + parameter DATASIZE = 8, // Memory data word width + parameter ADDRSIZE = 4, // Number of mem address bits + parameter FALLTHROUGH = "TRUE" // First word fall-through + ) ( + input wire wclk, + input wire wclken, + input wire [ADDRSIZE-1:0] waddr, + input wire [DATASIZE-1:0] wdata, + input wire wfull, + input wire rclk, + input wire rclken, + input wire [ADDRSIZE-1:0] raddr, + output reg [DATASIZE-1:0] rdata + ); + + localparam DEPTH = 1<> 1) ^ rbinnext; + assign rgraynextm1 = ((rbinnext + 1'b1) >> 1) ^ (rbinnext + 1'b1); + + //--------------------------------------------------------------- + // FIFO empty when the next rptr == synchronized wptr or on reset + //--------------------------------------------------------------- + assign rempty_val = (rgraynext == rq2_wptr); + assign arempty_val = (rgraynextm1 == rq2_wptr); + + generate genvar i; + for (i=0; i> 1) ^ wbinnext; + assign wgraynextp1 = ((wbinnext + 1'b1) >> 1) ^ (wbinnext + 1'b1); + + //------------------------------------------------------------------ + // Simplified version of the three necessary full-tests: + // assign wfull_val=((wgnext[ADDRSIZE] !=wq2_rptr[ADDRSIZE] ) && + // (wgnext[ADDRSIZE-1] !=wq2_rptr[ADDRSIZE-1]) && + // (wgnext[ADDRSIZE-2:0]==wq2_rptr[ADDRSIZE-2:0])); + //------------------------------------------------------------------ + + assign wfull_val = (wgraynext == {~wq2_rptr[ADDRSIZE:ADDRSIZE-1],wq2_rptr[ADDRSIZE-2:0]}); + assign awfull_val = (wgraynextp1 == {~wq2_rptr[ADDRSIZE:ADDRSIZE-1],wq2_rptr[ADDRSIZE-2:0]}); + + generate genvar i; + for (i=0; i vuart1_h2b_fifo_wptr)?(vuart1_h2b_fifo_wptr-vuart1_h2b_fifo_rptr):((vuart1_h2b_fifo_wptr+16)-vuart1_h2b_fifo_rptr); + + async_fifo #( + .DSIZE(8), + .ASIZE(4), + .FALLTHROUGH("TRUE") + ) vuart1_h2b_fifo ( + .wclk(lpc_clock), + .wrst_n(!peripheral_reset && lpc_slave_lpc_reset_n && !vuart1_h2b_fifo_reset), + .winc(vuart1_h2b_fifo_wwren), + .wdata(vuart1_h2b_fifo_wdata), + .wfull(vuart1_h2b_fifo_wfull), + .awfull(vuart1_h2b_fifo_walmost_full), + .rclk(peripheral_clock), + .rrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart1_h2b_fifo_reset_sync[2]), + .rinc(vuart1_h2b_fifo_rpop), + .rdata(vuart1_h2b_fifo_rdata), + .rempty(vuart1_h2b_fifo_rempty), + .arempty(vuart1_h2b_fifo_ralmost_empty), + .rclk_rptr(vuart1_h2b_fifo_rptr), + .rclk_wptr(vuart1_h2b_fifo_wptr) + ); + + wire vuart1_b2h_fifo_reset; + + reg vuart1_b2h_fifo_wwren = 0; + reg [7:0] vuart1_b2h_fifo_wdata = 0; + wire vuart1_b2h_fifo_wfull; + wire vuart1_b2h_fifo_walmost_full; + + wire vuart1_b2h_fifo_rpop; + wire [7:0] vuart1_b2h_fifo_rdata; + wire vuart1_b2h_fifo_rempty; + wire vuart1_b2h_fifo_ralmost_empty; + wire [4:0] vuart1_b2h_fifo_rptr; + wire [4:0] vuart1_b2h_fifo_wptr; + + reg [2:0] vuart1_b2h_fifo_wfull_sync = 0; + reg [2:0] vuart1_b2h_fifo_reset_sync = 0; + + // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals! + // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately + // at or above the configured data queue threshold. + wire [4:0] vuart1_b2h_fifo_data_available_count; + assign vuart1_b2h_fifo_data_available_count = (vuart1_b2h_fifo_rptr > vuart1_b2h_fifo_wptr)?(vuart1_b2h_fifo_wptr-vuart1_b2h_fifo_rptr):((vuart1_b2h_fifo_wptr+16)-vuart1_b2h_fifo_rptr); + + async_fifo #( + .DSIZE(8), + .ASIZE(4), + .FALLTHROUGH("TRUE") + ) vuart1_b2h_fifo ( + .wclk(peripheral_clock), + .wrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart1_b2h_fifo_reset_sync[2]), + .winc(vuart1_b2h_fifo_wwren), + .wdata(vuart1_b2h_fifo_wdata), + .wfull(vuart1_b2h_fifo_wfull), + .awfull(vuart1_b2h_fifo_walmost_full), + .rclk(lpc_clock), + .rrst_n(!peripheral_reset_sync[1] && lpc_slave_lpc_reset_n && !vuart1_b2h_fifo_reset), + .rinc(vuart1_b2h_fifo_rpop), + .rdata(vuart1_b2h_fifo_rdata), + .rempty(vuart1_b2h_fifo_rempty), + .arempty(vuart1_b2h_fifo_ralmost_empty), + .rclk_rptr(vuart1_b2h_fifo_rptr), + .rclk_wptr(vuart1_b2h_fifo_wptr) + ); + + wire vuart2_h2b_fifo_reset; + + wire vuart2_h2b_fifo_wwren; + wire [7:0] vuart2_h2b_fifo_wdata; + wire vuart2_h2b_fifo_wfull; + wire vuart2_h2b_fifo_walmost_full; + + reg vuart2_h2b_fifo_rpop = 0; + wire [7:0] vuart2_h2b_fifo_rdata; + wire vuart2_h2b_fifo_rempty; + wire vuart2_h2b_fifo_ralmost_empty; + wire [4:0] vuart2_h2b_fifo_rptr; + wire [4:0] vuart2_h2b_fifo_wptr; + + reg [1:0] vuart2_h2b_fifo_rempty_sync = 0; + reg [2:0] vuart2_h2b_fifo_reset_sync = 0; + + wire vuart2_irqs_enabled; + reg vuart2_h2b_fifo_queue_past_trigger = 0; + reg vuart2_h2b_fifo_read_timeout = 0; + reg vuart2_h2b_fifo_irq = 0; + wire vuart2_h2b_fifo_irq_enabled; + reg [15:0] vuart2_h2b_fifo_read_timeout_counter = 0; + + // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals! + // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately + // at or above the configured data queue threshold. + assign vuart2_h2b_fifo_data_available_count = (vuart2_h2b_fifo_rptr > vuart2_h2b_fifo_wptr)?(vuart2_h2b_fifo_wptr-vuart2_h2b_fifo_rptr):((vuart2_h2b_fifo_wptr+16)-vuart2_h2b_fifo_rptr); + + async_fifo #( + .DSIZE(8), + .ASIZE(4), + .FALLTHROUGH("TRUE") + ) vuart2_h2b_fifo ( + .wclk(lpc_clock), + .wrst_n(!peripheral_reset && lpc_slave_lpc_reset_n && !vuart2_h2b_fifo_reset), + .winc(vuart2_h2b_fifo_wwren), + .wdata(vuart2_h2b_fifo_wdata), + .wfull(vuart2_h2b_fifo_wfull), + .awfull(vuart2_h2b_fifo_walmost_full), + .rclk(peripheral_clock), + .rrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart2_h2b_fifo_reset_sync[2]), + .rinc(vuart2_h2b_fifo_rpop), + .rdata(vuart2_h2b_fifo_rdata), + .rempty(vuart2_h2b_fifo_rempty), + .arempty(vuart2_h2b_fifo_ralmost_empty), + .rclk_rptr(vuart2_h2b_fifo_rptr), + .rclk_wptr(vuart2_h2b_fifo_wptr) + ); + + wire vuart2_b2h_fifo_reset; + + reg vuart2_b2h_fifo_wwren = 0; + reg [7:0] vuart2_b2h_fifo_wdata = 0; + wire vuart2_b2h_fifo_wfull; + wire vuart2_b2h_fifo_walmost_full; + + wire vuart2_b2h_fifo_rpop; + wire [7:0] vuart2_b2h_fifo_rdata; + wire vuart2_b2h_fifo_rempty; + wire vuart2_b2h_fifo_ralmost_empty; + wire [4:0] vuart2_b2h_fifo_rptr; + wire [4:0] vuart2_b2h_fifo_wptr; + + reg [2:0] vuart2_b2h_fifo_wfull_sync = 0; + reg [2:0] vuart2_b2h_fifo_reset_sync = 0; + + // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals! + // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately + // at or above the configured data queue threshold. + wire [4:0] vuart2_b2h_fifo_data_available_count; + assign vuart2_b2h_fifo_data_available_count = (vuart2_b2h_fifo_rptr > vuart2_b2h_fifo_wptr)?(vuart2_b2h_fifo_wptr-vuart2_b2h_fifo_rptr):((vuart2_b2h_fifo_wptr+16)-vuart2_b2h_fifo_rptr); + + async_fifo #( + .DSIZE(8), + .ASIZE(4), + .FALLTHROUGH("TRUE") + ) vuart2_b2h_fifo ( + .wclk(peripheral_clock), + .wrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart2_b2h_fifo_reset_sync[2]), + .winc(vuart2_b2h_fifo_wwren), + .wdata(vuart2_b2h_fifo_wdata), + .wfull(vuart2_b2h_fifo_wfull), + .awfull(vuart2_b2h_fifo_walmost_full), + .rclk(lpc_clock), + .rrst_n(!peripheral_reset_sync[1] && lpc_slave_lpc_reset_n && !vuart2_b2h_fifo_reset), + .rinc(vuart2_b2h_fifo_rpop), + .rdata(vuart2_b2h_fifo_rdata), + .rempty(vuart2_b2h_fifo_rempty), + .arempty(vuart2_b2h_fifo_ralmost_empty), + .rclk_rptr(vuart2_b2h_fifo_rptr), + .rclk_wptr(vuart2_b2h_fifo_wptr) + ); + + // IPMI BT signals + wire ipmi_bt_bmc_to_host_ctl_sms_ack; + wire ipmi_bt_bmc_to_host_ctl_attn_ack; + wire ipmi_bt_host_to_bmc_ctl_attn_req; + wire ipmi_bt_host_to_bmc_ctl_oem0_req; + wire ipmi_bt_irq_ack; + wire ipmi_bt_irq_bmc_reset; + wire ipmi_bt_host_to_bmc_ctl_h_busy; + wire ipmi_bt_irq_enable; + + reg ipmi_bt_bmc_to_host_ctl_sms_req = 0; + reg ipmi_bt_bmc_to_host_ctl_attn_req = 0; + reg ipmi_bt_bmc_to_host_ctl_sms_ack_cont = 0; + reg ipmi_bt_bmc_to_host_ctl_attn_ack_cont = 0; + reg ipmi_bt_host_to_bmc_ctl_attn_req_cont = 0; + reg ipmi_bt_host_to_bmc_ctl_oem0_req_cont = 0; + reg ipmi_bt_irq_ack_cont = 0; + reg ipmi_bt_irq_bmc_reset_cont = 0; + reg ipmi_bt_bmc_to_host_ctl_b_busy = 0; + reg ipmi_bt_irq_req = 0; + + reg ipmi_bt_bmc_irq = 0; + + reg [1:0] ipmi_bt_bmc_to_host_ctl_sms_req_sync = 0; + reg [1:0] ipmi_bt_bmc_to_host_ctl_attn_req_sync = 0; + reg [1:0] ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync = 0; + reg [1:0] ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync = 0; + reg [1:0] ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync = 0; + reg [1:0] ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync = 0; + reg [1:0] ipmi_bt_irq_ack_cont_sync = 0; + reg [1:0] ipmi_bt_irq_bmc_reset_cont_sync = 0; + reg [1:0] ipmi_bt_bmc_to_host_ctl_b_busy_sync = 0; + reg [1:0] ipmi_bt_irq_req_sync = 0; + + reg [2:0] ipmi_bt_bmc_to_host_ctl_sms_ack_sync = 0; + reg [2:0] ipmi_bt_bmc_to_host_ctl_attn_ack_sync = 0; + reg [2:0] ipmi_bt_host_to_bmc_ctl_attn_req_sync = 0; + reg [2:0] ipmi_bt_host_to_bmc_ctl_oem0_req_sync = 0; + reg [2:0] ipmi_bt_irq_ack_sync = 0; + reg [2:0] ipmi_bt_irq_bmc_reset_sync = 0; + reg [2:0] ipmi_bt_host_to_bmc_ctl_h_busy_sync = 0; + reg [2:0] ipmi_bt_irq_enable_sync = 0; + + reg ipmi_bt_bmc_to_host_ctl_attn_req_prev = 0; + reg ipmi_bt_bmc_to_host_ctl_sms_req_prev = 0; + reg ipmi_bt_h2b_oem0_req_prev = 0; + + assign ipmi_bt_h2b_oem0_req = ipmi_bt_host_to_bmc_ctl_oem0_req_sync[2]; + assign ipmi_bt_sms_attn_req = ipmi_bt_bmc_to_host_ctl_sms_req; + assign ipmi_bt_b2h_attn_req = ipmi_bt_bmc_to_host_ctl_attn_req; + assign ipmi_bt_h2b_attn_req = ipmi_bt_host_to_bmc_ctl_attn_req_sync[2]; + assign ipmi_bt_h2b_ctl_h_busy = ipmi_bt_host_to_bmc_ctl_h_busy_sync[2]; + assign ipmi_bt_b2h_ctl_b_busy = ipmi_bt_bmc_to_host_ctl_b_busy; + + // IRQ control + assign vuart1_irqs_enabled = vuart1_control_register[31]; + assign vuart1_h2b_fifo_irq_enabled = vuart1_control_register[30]; + assign vuart2_irqs_enabled = vuart2_control_register[31]; + assign vuart2_h2b_fifo_irq_enabled = vuart2_control_register[30]; + + // IRQ connections + assign vuart1_irq_asserted = vuart1_h2b_fifo_irq; + assign vuart2_irq_asserted = vuart2_h2b_fifo_irq; + assign ipmi_bt_bmc_irq_asserted = ipmi_bt_bmc_irq; + assign lpc_io_cycle_irq_asserted = lpc_io_cycle_irq; + assign lpc_tpm_cycle_irq_asserted = lpc_tpm_cycle_irq; + assign lpc_firmware_cycle_irq_asserted = lpc_firmware_cycle_irq; + assign slave_irq_o = lpc_io_cycle_irq_asserted | lpc_tpm_cycle_irq_asserted | lpc_firmware_cycle_irq_asserted | vuart1_irq_asserted | vuart2_irq_asserted | ipmi_bt_bmc_irq_asserted; + + // Instantiate slave + lpc_slave_interface lpc_slave_interface( + .address(lpc_slave_address), + .tx_data(lpc_slave_tx_data), + .rx_data(lpc_slave_rx_data), + .tpm_cycle(lpc_slave_tpm_cycle), + .firmware_cycle(lpc_slave_firmware_cycle), + .continue(lpc_slave_continue_sync[1]), + .data_ack(lpc_slave_data_ack_sync[1]), + .transfer_error(lpc_slave_signal_error_sync[1]), + .exception_ack(lpc_slave_exception_ack_sync[1]), + .address_ready(lpc_slave_address_ready), + .data_ready(lpc_slave_data_ready), + .data_ready_cont(lpc_slave_data_ready_cont), + .continue_cont(lpc_slave_continue_cont), + .exception(lpc_slave_exception), + .data_direction(lpc_slave_cycle_direction), + + .irq_request(irq_request_sync_1), + .irq_tx_ready(irq_tx_ready), + .irq_tx_queued(irq_tx_queued), + + .lpc_fw_input_xfer_write_addr(lpc_fw_input_xfer_write_addr), + .lpc_fw_input_xfer_write_data(lpc_fw_input_xfer_write_data), + .lpc_fw_input_xfer_write_clk(peripheral_clock), + .lpc_fw_input_xfer_write_wren(lpc_fw_input_xfer_write_wren), + .lpc_fw_output_xfer_read_addr(lpc_fw_output_xfer_read_addr), + .lpc_fw_output_xfer_read_data(lpc_fw_output_xfer_read_data), + .lpc_fw_output_xfer_read_clk(peripheral_clock), + + .ipmi_bt_input_xfer_write_addr(ipmi_bt_input_xfer_write_addr), + .ipmi_bt_input_xfer_write_data(ipmi_bt_input_xfer_write_data), + .ipmi_bt_input_xfer_write_clk(peripheral_clock), + .ipmi_bt_input_xfer_write_wren(ipmi_bt_input_xfer_write_wren), + .ipmi_bt_output_xfer_read_addr(ipmi_bt_output_xfer_read_addr), + .ipmi_bt_output_xfer_read_data(ipmi_bt_output_xfer_read_data), + .ipmi_bt_output_xfer_read_clk(peripheral_clock), + + .range1_start(range_1_start_address[15:0]), + .range1_end(range_1_end_address[15:0]), + .range2_start(range_2_start_address[15:0]), + .range2_end(range_2_end_address[15:0]), + .range3_start(range_3_start_address[15:0]), + .range3_end(range_3_end_address[15:0]), + .range4_start(range_4_start_address[15:0]), + .range4_end(range_4_end_address[15:0]), + .range5_start(range_5_start_address[15:0]), + .range5_end(range_5_end_address[15:0]), + .range6_start(range_6_start_address[15:0]), + .range6_end(range_6_end_address[15:0]), + + .enable_vuart1(enable_vuart1), + .vuart1_status_register(vuart1_status_register), + .vuart1_control_register(vuart1_control_register_sync_1), + .vuart1_assert_b2h_break_clear(vuart1_assert_b2h_break_clear), + + .vuart1_tx_fifo_reset(vuart1_h2b_fifo_reset), + .vuart1_tx_fifo_wren(vuart1_h2b_fifo_wwren), + .vuart1_tx_fifo_data(vuart1_h2b_fifo_wdata), + .vuart1_tx_fifo_full(vuart1_h2b_fifo_wfull), + .vuart1_tx_fifo_almost_full(vuart1_h2b_fifo_walmost_full), + .vuart1_tx_fifo_empty(vuart1_h2b_fifo_rempty_sync[1]), + + .vuart1_rx_fifo_reset(vuart1_b2h_fifo_reset), + .vuart1_rx_fifo_rpop(vuart1_b2h_fifo_rpop), + .vuart1_rx_fifo_data(vuart1_b2h_fifo_rdata), + .vuart1_rx_fifo_empty(vuart1_b2h_fifo_rempty), + .vuart1_rx_fifo_almost_empty(vuart1_b2h_fifo_ralmost_empty), + .vuart1_rx_fifo_full(vuart1_b2h_fifo_wfull_sync[2]), + .vuart1_rx_data_available_count(vuart1_b2h_fifo_data_available_count[3:0]), + + .enable_vuart2(enable_vuart2), + .vuart2_status_register(vuart2_status_register), + .vuart2_control_register(vuart2_control_register_sync_1), + .vuart2_assert_b2h_break_clear(vuart2_assert_b2h_break_clear), + + .vuart2_tx_fifo_reset(vuart2_h2b_fifo_reset), + .vuart2_tx_fifo_wren(vuart2_h2b_fifo_wwren), + .vuart2_tx_fifo_data(vuart2_h2b_fifo_wdata), + .vuart2_tx_fifo_full(vuart2_h2b_fifo_wfull), + .vuart2_tx_fifo_almost_full(vuart2_h2b_fifo_walmost_full), + .vuart2_tx_fifo_empty(vuart2_h2b_fifo_rempty_sync[1]), + + .vuart2_rx_fifo_reset(vuart2_b2h_fifo_reset), + .vuart2_rx_fifo_rpop(vuart2_b2h_fifo_rpop), + .vuart2_rx_fifo_data(vuart2_b2h_fifo_rdata), + .vuart2_rx_fifo_empty(vuart2_b2h_fifo_rempty), + .vuart2_rx_fifo_almost_empty(vuart2_b2h_fifo_ralmost_empty), + .vuart2_rx_fifo_full(vuart2_b2h_fifo_wfull_sync[2]), + .vuart2_rx_data_available_count(vuart2_b2h_fifo_data_available_count[3:0]), + + .enable_ipmi_bt(enable_ipmi_bt), + .ipmi_bt_alt_irq(ipmi_bt_alt_irq), + .ipmi_bt_port_base_address({8'h00, ipmi_bt_port_address}), + + .ipmi_bt_bmc_to_host_ctl_sms_ack(ipmi_bt_bmc_to_host_ctl_sms_ack), + .ipmi_bt_bmc_to_host_ctl_attn_ack(ipmi_bt_bmc_to_host_ctl_attn_ack), + .ipmi_bt_host_to_bmc_ctl_attn_req(ipmi_bt_host_to_bmc_ctl_attn_req), + .ipmi_bt_host_to_bmc_ctl_oem0_req(ipmi_bt_host_to_bmc_ctl_oem0_req), + .ipmi_bt_irq_ack(ipmi_bt_irq_ack), + .ipmi_bt_irq_bmc_reset(ipmi_bt_irq_bmc_reset), + .ipmi_bt_host_to_bmc_ctl_h_busy(ipmi_bt_host_to_bmc_ctl_h_busy), + .ipmi_bt_irq_enable(ipmi_bt_irq_enable), + + .ipmi_bt_bmc_to_host_ctl_sms_req(ipmi_bt_bmc_to_host_ctl_sms_req_sync[1]), + .ipmi_bt_bmc_to_host_ctl_attn_req(ipmi_bt_bmc_to_host_ctl_attn_req_sync[1]), + .ipmi_bt_bmc_to_host_ctl_sms_ack_cont(ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[1]), + .ipmi_bt_bmc_to_host_ctl_attn_ack_cont(ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[1]), + .ipmi_bt_host_to_bmc_ctl_attn_req_cont(ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[1]), + .ipmi_bt_host_to_bmc_ctl_oem0_req_cont(ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[1]), + .ipmi_bt_irq_ack_cont(ipmi_bt_irq_ack_cont_sync[1]), + .ipmi_bt_irq_bmc_reset_cont(ipmi_bt_irq_bmc_reset_cont_sync[1]), + .ipmi_bt_bmc_to_host_ctl_b_busy(ipmi_bt_bmc_to_host_ctl_b_busy_sync[1]), + .ipmi_bt_irq_req(ipmi_bt_irq_req_sync[1]), + + .fw_idsel(lpc_slave_fw_idsel), + .fw_msize(lpc_slave_fw_msize), + +`ifdef LPC_SLAVE_DEBUG + .debug_port(lpc_slave_debug_port), +`endif + + .lpc_data_out(lpc_slave_lpc_data_out), + .lpc_data_in(lpc_slave_lpc_data_in), + .lpc_data_direction(lpc_slave_lpc_data_direction), + + .lpc_irq_out(lpc_slave_lpc_irq_out), + .lpc_irq_in(lpc_slave_lpc_irq_in), + .lpc_irq_direction(lpc_slave_lpc_irq_direction), + + .lpc_frame_n(lpc_slave_lpc_frame_n), + .lpc_reset_n(lpc_slave_lpc_reset_n), + .lpc_clock(lpc_clock) + ); + + // Create registered I/O signals on external LPC bus + always @(posedge lpc_clock) begin + lpc_slave_lpc_frame_n <= lpc_frame_n; + lpc_slave_lpc_reset_n <= lpc_reset_n; + end + assign lpc_data_out = lpc_slave_lpc_data_out; + assign lpc_slave_lpc_data_in = lpc_data_in; + assign lpc_data_direction = lpc_slave_lpc_data_direction; + + always @(posedge lpc_clock) begin + lpc_slave_lpc_irq_out_reg <= lpc_slave_lpc_irq_out; + lpc_slave_lpc_irq_direction_reg <= lpc_slave_lpc_irq_direction; + end + assign lpc_irq_out = lpc_slave_lpc_irq_out_reg; + assign lpc_irq_in = lpc_slave_lpc_irq_in; + assign lpc_irq_direction = lpc_slave_lpc_irq_direction_reg; + + assign lpc_clock = lpc_clock; + + reg [3:0] slave_wishbone_sel_reg = 0; + reg slave_wishbone_ack_reg = 0; + reg [31:0] slave_wishbone_dat_r_reg = 0; + + assign slave_wishbone_ack = slave_wishbone_ack_reg; + assign slave_wishbone_dat_r = slave_wishbone_dat_r_reg; + + reg master_wishbone_cyc_reg = 0; + reg master_wishbone_stb_reg = 0; + reg master_wishbone_we_reg = 0; + reg [(WISHBONE_DMA_ADDR_BUS_WIDTH-1):0] master_wishbone_adr_reg = 0; + reg [(WISHBONE_DMA_DATA_BUS_WIDTH-1):0] master_wishbone_dat_w_reg = 0; + reg [((WISHBONE_DMA_DATA_BUS_WIDTH/8)-1):0] master_wishbone_sel_reg = 0; + + assign master_wishbone_cyc = master_wishbone_cyc_reg; + assign master_wishbone_stb = master_wishbone_stb_reg; + assign master_wishbone_we = master_wishbone_we_reg; + assign master_wishbone_adr = master_wishbone_adr_reg; + assign master_wishbone_dat_w = master_wishbone_dat_w_reg; + assign master_wishbone_sel = master_wishbone_sel_reg; + + parameter AQUIEL_LPC_CYCLE_TYPE_IO = 0; + parameter AQUIEL_LPC_CYCLE_TYPE_TPM = 1; + parameter AQUIEL_LPC_CYCLE_TYPE_FIRMWARE = 2; + + parameter LPC_SLAVE_TRANSFER_STATE_IDLE = 0; + parameter LPC_SLAVE_TRANSFER_STATE_IR01 = 1; + parameter LPC_SLAVE_TRANSFER_STATE_IR02 = 2; + parameter LPC_SLAVE_TRANSFER_STATE_IR03 = 3; + parameter LPC_SLAVE_TRANSFER_STATE_IW01 = 5; + parameter LPC_SLAVE_TRANSFER_STATE_IW02 = 6; + parameter LPC_SLAVE_TRANSFER_STATE_IW03 = 7; + parameter LPC_SLAVE_TRANSFER_STATE_IW04 = 8; + parameter LPC_SLAVE_TRANSFER_STATE_FR01 = 9; + parameter LPC_SLAVE_TRANSFER_STATE_FR02 = 10; + parameter LPC_SLAVE_TRANSFER_STATE_FR03 = 11; + parameter LPC_SLAVE_TRANSFER_STATE_FW01 = 12; + parameter LPC_SLAVE_TRANSFER_STATE_ER01 = 16; + parameter LPC_SLAVE_TRANSFER_STATE_DR01 = 17; + parameter LPC_SLAVE_TRANSFER_STATE_DR02 = 18; + parameter LPC_SLAVE_TRANSFER_STATE_DR03 = 19; + parameter LPC_SLAVE_TRANSFER_STATE_DR04 = 20; + parameter LPC_SLAVE_TRANSFER_STATE_DR05 = 21; + parameter LPC_SLAVE_TRANSFER_STATE_DR06 = 22; + parameter LPC_SLAVE_TRANSFER_STATE_DR07 = 23; + parameter LPC_SLAVE_TRANSFER_STATE_DR08 = 24; + parameter LPC_SLAVE_TRANSFER_STATE_DR09 = 25; + parameter LPC_SLAVE_TRANSFER_STATE_DR10 = 26; + parameter LPC_SLAVE_TRANSFER_STATE_DW01 = 27; + parameter LPC_SLAVE_TRANSFER_STATE_DW02 = 28; + parameter LPC_SLAVE_TRANSFER_STATE_DW03 = 29; + parameter LPC_SLAVE_TRANSFER_STATE_DW04 = 30; + parameter LPC_SLAVE_TRANSFER_STATE_DW05 = 31; + parameter LPC_SLAVE_TRANSFER_STATE_DW06 = 32; + parameter LPC_SLAVE_TRANSFER_STATE_DW07 = 33; + parameter LPC_SLAVE_TRANSFER_STATE_DW08 = 34; + parameter LPC_SLAVE_TRANSFER_STATE_DW09 = 35; + parameter LPC_SLAVE_TRANSFER_STATE_DW10 = 36; + parameter LPC_SLAVE_TRANSFER_STATE_DW11 = 37; + parameter LPC_SLAVE_TRANSFER_STATE_DF01 = 38; + + parameter MMIO_TRANSFER_STATE_IDLE = 0; + parameter MMIO_TRANSFER_STATE_TR01 = 8; + parameter MMIO_TRANSFER_STATE_TR02 = 9; + parameter MMIO_TRANSFER_STATE_RD01 = 16; + parameter MMIO_TRANSFER_STATE_RD02 = 17; + parameter MMIO_TRANSFER_STATE_RD03 = 18; + parameter MMIO_TRANSFER_STATE_RD04 = 19; + parameter MMIO_TRANSFER_STATE_RD05 = 20; + parameter MMIO_TRANSFER_STATE_WR01 = 32; + parameter MMIO_TRANSFER_STATE_WR02 = 33; + parameter MMIO_TRANSFER_STATE_WR03 = 34; + parameter MMIO_TRANSFER_STATE_WR04 = 35; + + reg [27:0] lpc_slave_address_reg = 0; + reg lpc_slave_firmware_cycle_reg = 0; + reg is_firmware_cycle_type = 0; + reg is_tpm_cycle_type = 0; + reg is_io_cycle_type = 0; + reg cycle_range_intercept_allowed = 0; + reg wishbone_mmio_access_is_32_bits = 0; + reg [31:0] mmio_lpc_buffer_address_reg = 0; + reg [7:0] mmio_transfer_state = 0; + reg [7:0] lpc_slave_transfer_state = 0; + reg mmio_guard_condition_required = 0; + reg [31:0] mmio_peripheral_tx_buffer = 0; + reg [31:0] mmio_peripheral_rx_buffer = 0; + reg [31:0] mmio_cfg_space_tx_buffer = 0; + reg [31:0] mmio_cfg_space_rx_buffer = 0; + + assign debug_port[11:8] = lpc_slave_transfer_state[3:0]; + assign debug_port[12] = master_wishbone_cyc; + assign debug_port[13] = master_wishbone_stb; + assign debug_port[14] = master_wishbone_ack; + assign debug_port[15] = master_wishbone_err; + + assign lpc_clock_mirror = lpc_clock; + + // Wishbone connector -- MMIO + always @(posedge peripheral_clock) begin + if (peripheral_reset) begin + // Reset Wishbone interface / control state machine + slave_wishbone_ack_reg <= 0; + wishbone_mmio_access_is_32_bits = 0; + continue_transfer <= 0; + + vuart1_control_register <= 0; + vuart2_control_register <= 0; + vuart1_lcr_break_request <= 0; + vuart2_lcr_break_request <= 0; + vuart1_lcr_break_ack <= 0; + vuart2_lcr_break_ack <= 0; + vuart1_irq_source <= 0; + vuart2_irq_source <= 0; + vuart1_h2b_fifo_irq <= 0; + vuart2_h2b_fifo_irq <= 0; + vuart1_h2b_fifo_read_timeout <= 0; + vuart2_h2b_fifo_read_timeout <= 0; + vuart1_h2b_fifo_queue_past_trigger <= 0; + vuart2_h2b_fifo_queue_past_trigger <= 0; + vuart1_h2b_fifo_read_timeout_counter <= 0; + vuart2_h2b_fifo_read_timeout_counter <= 0; + + ipmi_bt_bmc_to_host_ctl_sms_req <= 0; + ipmi_bt_bmc_to_host_ctl_attn_req <= 0; + ipmi_bt_bmc_to_host_ctl_sms_ack_cont <= 0; + ipmi_bt_bmc_to_host_ctl_attn_ack_cont <= 0; + ipmi_bt_host_to_bmc_ctl_attn_req_cont <= 0; + ipmi_bt_host_to_bmc_ctl_oem0_req_cont <= 0; + ipmi_bt_irq_ack_cont <= 0; + ipmi_bt_irq_bmc_reset_cont <= 0; + ipmi_bt_bmc_to_host_ctl_b_busy <= 1; // BMC should always indicate busy until BMC software is online and clears the busy flag + ipmi_bt_irq_req <= 0; + ipmi_bt_bmc_irq <= 0; + + ipmi_bt_bmc_to_host_ctl_attn_req_prev <= 0; + ipmi_bt_bmc_to_host_ctl_sms_req_prev <= 0; + ipmi_bt_h2b_oem0_req_prev <= 0; + + mmio_transfer_state <= MMIO_TRANSFER_STATE_IDLE; + end else begin + case (mmio_transfer_state) + MMIO_TRANSFER_STATE_IDLE: begin + // Compute effective address + mmio_lpc_buffer_address_reg[31:2] = slave_wishbone_adr; + case (slave_wishbone_sel) + 4'b0001: mmio_lpc_buffer_address_reg[1:0] = 0; + 4'b0010: mmio_lpc_buffer_address_reg[1:0] = 1; + 4'b0100: mmio_lpc_buffer_address_reg[1:0] = 2; + 4'b1000: mmio_lpc_buffer_address_reg[1:0] = 3; + 4'b1111: mmio_lpc_buffer_address_reg[1:0] = 0; + default: mmio_lpc_buffer_address_reg[1:0] = 0; + endcase + + if (slave_wishbone_cyc && slave_wishbone_stb) begin + mmio_guard_condition_required = 0; + if (mmio_lpc_buffer_address_reg[31:20] == 12'h00e) begin + // VUART register space access + if (!continue_transfer) begin + // Single clock pulse signals in deasserted state...process incoming request! + if (!slave_wishbone_we) begin + // Read requested + case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) + // Bus is little endian! + 0: begin + mmio_peripheral_tx_buffer = {(vuart1_h2b_fifo_rempty)?8'h00:vuart1_h2b_fifo_rdata, + 7'b0, vuart1_h2b_fifo_rempty || !lpc_slave_lpc_reset_n || vuart1_h2b_fifo_reset, + (vuart2_h2b_fifo_rempty)?8'h00:vuart2_h2b_fifo_rdata, + 7'b0, vuart2_h2b_fifo_rempty || !lpc_slave_lpc_reset_n || vuart2_h2b_fifo_reset + }; + + if (slave_wishbone_sel[0]) begin + // Wishbone bits 31:24 + if (!vuart1_h2b_fifo_rempty) begin + vuart1_h2b_fifo_rpop <= 1; + end + end + if (slave_wishbone_sel[2]) begin + // Wishbone bits 15:8 + if (!vuart2_h2b_fifo_rempty) begin + vuart2_h2b_fifo_rpop <= 1; + end + end + end + 4: begin + mmio_peripheral_tx_buffer[31:24] <= vuart1_status_register_sync_2[7:0]; + mmio_peripheral_tx_buffer[23:16] <= vuart1_status_register_sync_2[15:8]; + mmio_peripheral_tx_buffer[15:8] <= vuart1_status_register_sync_2[23:16]; + mmio_peripheral_tx_buffer[7:0] <= {6'b0, vuart1_lcr_break_request, vuart1_b2h_fifo_wfull}; + vuart1_lcr_break_ack <= 1; + end + 8: begin + mmio_peripheral_tx_buffer[31:24] <= vuart1_control_register[7:0]; + mmio_peripheral_tx_buffer[23:16] <= vuart1_control_register[15:8]; + mmio_peripheral_tx_buffer[15:8] <= vuart1_control_register[23:16]; + mmio_peripheral_tx_buffer[7:0] <= vuart1_control_register[31:24]; + end + 12: begin + mmio_peripheral_tx_buffer[31:24] <= vuart2_status_register_sync_2[7:0]; + mmio_peripheral_tx_buffer[23:16] <= vuart2_status_register_sync_2[15:8]; + mmio_peripheral_tx_buffer[15:8] <= vuart2_status_register_sync_2[23:16]; + mmio_peripheral_tx_buffer[7:0] <= {6'b0, vuart2_lcr_break_request, vuart2_b2h_fifo_wfull}; + vuart2_lcr_break_ack <= 1; + end + 16: begin + mmio_peripheral_tx_buffer[31:24] <= vuart2_control_register[7:0]; + mmio_peripheral_tx_buffer[23:16] <= vuart2_control_register[15:8]; + mmio_peripheral_tx_buffer[15:8] <= vuart2_control_register[23:16]; + mmio_peripheral_tx_buffer[7:0] <= vuart2_control_register[31:24]; + end + default: mmio_peripheral_tx_buffer = 32'hffffffff; + endcase + + // Place data on Wishbone bus + slave_wishbone_dat_r_reg <= mmio_peripheral_tx_buffer; + + // Signal transfer complete + slave_wishbone_ack_reg <= 1; + + mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; + end else begin + // Write requested + case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) + 8: mmio_cfg_space_rx_buffer = vuart1_control_register; + 16: mmio_cfg_space_rx_buffer = vuart2_control_register; + default: mmio_cfg_space_rx_buffer = 0; + endcase + + if (slave_wishbone_sel[0]) begin + mmio_cfg_space_rx_buffer[7:0] = slave_wishbone_dat_w[31:24]; + end + if (slave_wishbone_sel[1]) begin + mmio_cfg_space_rx_buffer[15:8] = slave_wishbone_dat_w[23:16]; + end + if (slave_wishbone_sel[2]) begin + mmio_cfg_space_rx_buffer[23:16] = slave_wishbone_dat_w[15:8]; + end + if (slave_wishbone_sel[3]) begin + mmio_cfg_space_rx_buffer[31:24] = slave_wishbone_dat_w[7:0]; + end + + // Specialty bit handlers + case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) + 0: begin + if (slave_wishbone_sel[0]) begin + // Load VUART1 B2H FIFO + if (!vuart1_b2h_fifo_wfull) begin + vuart1_b2h_fifo_wdata <= mmio_cfg_space_rx_buffer[7:0]; + vuart1_b2h_fifo_wwren <= 1; + end + end + if (slave_wishbone_sel[2]) begin + // Load VUART2 B2H FIFO + if (!vuart2_b2h_fifo_wfull) begin + vuart2_b2h_fifo_wdata <= mmio_cfg_space_rx_buffer[23:16]; + vuart2_b2h_fifo_wwren <= 1; + end + end + end + 8: begin + if (mmio_cfg_space_rx_buffer[0]) begin + // B2H BREAK request + mmio_cfg_space_rx_buffer[0] = 0; + if (!vuart1_assert_b2h_break_clear_sync[2]) begin + vuart1_control_register[0] <= 1; + end + end + end + 16: begin + if (mmio_cfg_space_rx_buffer[0]) begin + // B2H BREAK request + mmio_cfg_space_rx_buffer[0] = 0; + if (!vuart2_assert_b2h_break_clear_sync[2]) begin + vuart2_control_register[0] <= 1; + end + end + end + endcase + + case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) + 8: vuart1_control_register <= mmio_cfg_space_rx_buffer; + 16: vuart2_control_register <= mmio_cfg_space_rx_buffer; + endcase + + // Signal transfer complete + slave_wishbone_ack_reg <= 1; + + mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; + end + end + end else if (mmio_lpc_buffer_address_reg[31:20] == 12'h00f) begin + // Configuration register space access + if (!continue_transfer) begin + // Single clock pulse signals in deasserted state...process incoming request! + if (!slave_wishbone_we) begin + // Read requested + case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) + 0: mmio_cfg_space_tx_buffer = device_id[63:32]; + 4: mmio_cfg_space_tx_buffer = device_id[31:0]; + 8: mmio_cfg_space_tx_buffer = device_version; + 12: mmio_cfg_space_tx_buffer = control_reg1; + 16: mmio_cfg_space_tx_buffer = control_reg2; + 20: mmio_cfg_space_tx_buffer = range_config1; + 24: mmio_cfg_space_tx_buffer = range_end1; + 28: mmio_cfg_space_tx_buffer = range_config2; + 32: mmio_cfg_space_tx_buffer = range_end2; + 36: mmio_cfg_space_tx_buffer = range_config3; + 40: mmio_cfg_space_tx_buffer = range_end3; + 44: mmio_cfg_space_tx_buffer = range_config4; + 48: mmio_cfg_space_tx_buffer = range_end4; + 52: mmio_cfg_space_tx_buffer = range_config5; + 56: mmio_cfg_space_tx_buffer = range_end5; + 60: mmio_cfg_space_tx_buffer = range_config6; + 64: mmio_cfg_space_tx_buffer = range_end6; + 68: mmio_cfg_space_tx_buffer = dma_config_reg1; + 72: mmio_cfg_space_tx_buffer = dma_config_reg2; + 76: mmio_cfg_space_tx_buffer = dma_config_reg3; + 80: mmio_cfg_space_tx_buffer = dma_config_reg4; + 84: mmio_cfg_space_tx_buffer = dma_config_reg5; + 88: mmio_cfg_space_tx_buffer = dma_config_reg6; + 92: mmio_cfg_space_tx_buffer = dma_config_reg7; + 96: mmio_cfg_space_tx_buffer = status_reg1; + 100: mmio_cfg_space_tx_buffer = status_reg2; + 104: mmio_cfg_space_tx_buffer = status_reg3; + 108: mmio_cfg_space_tx_buffer = status_reg4; + 112: mmio_cfg_space_tx_buffer = ipmi_bt_status_reg; + default: mmio_cfg_space_tx_buffer = 0; + endcase + + // Specialty bit handlers + case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) + 12: mmio_cfg_space_tx_buffer[0] = 0; // continue_transfer + endcase + + // Endian swap + slave_wishbone_dat_r_reg[31:24] <= mmio_cfg_space_tx_buffer[7:0]; + slave_wishbone_dat_r_reg[23:16] <= mmio_cfg_space_tx_buffer[15:8]; + slave_wishbone_dat_r_reg[15:8] <= mmio_cfg_space_tx_buffer[23:16]; + slave_wishbone_dat_r_reg[7:0] <= mmio_cfg_space_tx_buffer[31:24]; + + // Signal transfer complete + slave_wishbone_ack_reg <= 1; + + mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; + end else begin + // Write requested + case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) + // Device ID / version registers cannot be written, don't even try... + 12: mmio_cfg_space_rx_buffer = control_reg1; + 16: mmio_cfg_space_rx_buffer = control_reg2; + 20: mmio_cfg_space_rx_buffer = range_config1; + 24: mmio_cfg_space_rx_buffer = range_end1; + 28: mmio_cfg_space_rx_buffer = range_config2; + 32: mmio_cfg_space_rx_buffer = range_end2; + 36: mmio_cfg_space_rx_buffer = range_config3; + 40: mmio_cfg_space_rx_buffer = range_end3; + 44: mmio_cfg_space_rx_buffer = range_config4; + 48: mmio_cfg_space_rx_buffer = range_end4; + 52: mmio_cfg_space_rx_buffer = range_config5; + 56: mmio_cfg_space_rx_buffer = range_end5; + 60: mmio_cfg_space_rx_buffer = range_config6; + 64: mmio_cfg_space_rx_buffer = range_end6; + 68: mmio_cfg_space_rx_buffer = dma_config_reg1; + 72: mmio_cfg_space_rx_buffer = dma_config_reg2; + 76: mmio_cfg_space_rx_buffer = dma_config_reg3; + 80: mmio_cfg_space_rx_buffer = dma_config_reg4; + 84: mmio_cfg_space_rx_buffer = dma_config_reg5; + 88: mmio_cfg_space_rx_buffer = dma_config_reg6; + 92: mmio_cfg_space_rx_buffer = dma_config_reg7; + // Status registers cannot be written, don't even try... + default: mmio_cfg_space_rx_buffer = 0; + endcase + + if (slave_wishbone_sel[0]) begin + mmio_cfg_space_rx_buffer[7:0] = slave_wishbone_dat_w[31:24]; + end + if (slave_wishbone_sel[1]) begin + mmio_cfg_space_rx_buffer[15:8] = slave_wishbone_dat_w[23:16]; + end + if (slave_wishbone_sel[2]) begin + mmio_cfg_space_rx_buffer[23:16] = slave_wishbone_dat_w[15:8]; + end + if (slave_wishbone_sel[3]) begin + mmio_cfg_space_rx_buffer[31:24] = slave_wishbone_dat_w[7:0]; + end + + // Specialty bit handlers + case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) + 16: begin + // continue_transfer + if (mmio_cfg_space_rx_buffer[0]) begin + mmio_cfg_space_rx_buffer[0] = 0; + continue_transfer <= 1; + end + end + 108: begin + // IPMI BT control register + if (mmio_cfg_space_rx_buffer[0]) begin + // CLR_WR_PTR + ipmi_bt_input_xfer_write_addr <= 0; + end + if (mmio_cfg_space_rx_buffer[1]) begin + // CLR_RD_PTR + ipmi_bt_output_xfer_read_addr <= 0; + end + if (mmio_cfg_space_rx_buffer[2]) begin + // H2B_ATN clear + ipmi_bt_host_to_bmc_ctl_attn_req_cont <= 1; + mmio_guard_condition_required = 1; + end + if (mmio_cfg_space_rx_buffer[3]) begin + // B2H_ATN set + ipmi_bt_bmc_to_host_ctl_attn_req <= 1; + end + if (mmio_cfg_space_rx_buffer[4]) begin + // SMS_ATN set + ipmi_bt_bmc_to_host_ctl_sms_req <= 1; + end + if (mmio_cfg_space_rx_buffer[5]) begin + // OEM0 clear + ipmi_bt_host_to_bmc_ctl_oem0_req_cont <= 1; + mmio_guard_condition_required = 1; + end + if (mmio_cfg_space_rx_buffer[7]) begin + // B_BUSY + if (ipmi_bt_bmc_to_host_ctl_b_busy) begin + ipmi_bt_bmc_to_host_ctl_b_busy <= 0; + end else begin + ipmi_bt_bmc_to_host_ctl_b_busy <= 1; + end + end + end + endcase + + case ({mmio_lpc_buffer_address_reg[19:2], 2'b00}) + 12: control_reg1 <= mmio_cfg_space_rx_buffer; + 16: control_reg2 <= mmio_cfg_space_rx_buffer; + 20: range_config1 <= mmio_cfg_space_rx_buffer; + 24: range_end1 <= mmio_cfg_space_rx_buffer; + 28: range_config2 <= mmio_cfg_space_rx_buffer; + 32: range_end2 <= mmio_cfg_space_rx_buffer; + 36: range_config3 <= mmio_cfg_space_rx_buffer; + 40: range_end3 <= mmio_cfg_space_rx_buffer; + 44: range_config4 <= mmio_cfg_space_rx_buffer; + 48: range_end4 <= mmio_cfg_space_rx_buffer; + 52: range_config5 <= mmio_cfg_space_rx_buffer; + 56: range_end5 <= mmio_cfg_space_rx_buffer; + 60: range_config6 <= mmio_cfg_space_rx_buffer; + 64: range_end6 <= mmio_cfg_space_rx_buffer; + 68: dma_config_reg1 <= mmio_cfg_space_rx_buffer; + 72: dma_config_reg2 <= mmio_cfg_space_rx_buffer; + 76: dma_config_reg3 <= mmio_cfg_space_rx_buffer; + 80: dma_config_reg4 <= mmio_cfg_space_rx_buffer; + 84: dma_config_reg5 <= mmio_cfg_space_rx_buffer; + 88: dma_config_reg6 <= mmio_cfg_space_rx_buffer; + 92: dma_config_reg7 <= mmio_cfg_space_rx_buffer; + endcase + + if (mmio_guard_condition_required) begin + // Signal transfer complete + slave_wishbone_ack_reg <= 1; + + mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; + end else begin + mmio_transfer_state <= MMIO_TRANSFER_STATE_TR01; + end + end + end + end else begin + // Select 8-bit/32-bit transfer size via Wishbone access mode + if (slave_wishbone_sel == 4'b1111) begin + wishbone_mmio_access_is_32_bits = 1; + end else begin + wishbone_mmio_access_is_32_bits = 0; + end + slave_wishbone_sel_reg <= slave_wishbone_sel; + if (!slave_wishbone_we) begin + // Read requested + // Set up read + if (wishbone_mmio_access_is_32_bits) begin + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0]; + 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0]; + endcase + end else begin + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0]; + 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0]; + endcase + end + mmio_transfer_state <= MMIO_TRANSFER_STATE_RD01; + end else begin + // Write requested + // Take single cycle performance hit for simplicity here... + mmio_transfer_state <= MMIO_TRANSFER_STATE_WR01; + end + end + end + end + MMIO_TRANSFER_STATE_RD01: begin + if (wishbone_mmio_access_is_32_bits) begin + // Set up next read + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 1; + 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 1; + endcase + mmio_transfer_state <= MMIO_TRANSFER_STATE_RD02; + end else begin + // Wait for read data to become available + mmio_transfer_state <= MMIO_TRANSFER_STATE_RD02; + end + end + MMIO_TRANSFER_STATE_RD02: begin + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: xfer_read_data_buffer = lpc_fw_output_xfer_read_data; + 12'h00d: xfer_read_data_buffer = ipmi_bt_output_xfer_read_data; + endcase + + if (wishbone_mmio_access_is_32_bits) begin + // Read first byte + slave_wishbone_dat_r_reg[31:24] <= xfer_read_data_buffer; + + // Set up next read + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 2; + 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 2; + endcase + mmio_transfer_state <= MMIO_TRANSFER_STATE_RD03; + end else begin + // Replicate the data byte to all active lanes + if (slave_wishbone_sel_reg[0]) begin + slave_wishbone_dat_r_reg[31:24] <= xfer_read_data_buffer; + end + if (slave_wishbone_sel_reg[1]) begin + slave_wishbone_dat_r_reg[23:16] <= xfer_read_data_buffer; + end + if (slave_wishbone_sel_reg[2]) begin + slave_wishbone_dat_r_reg[15:8] <= xfer_read_data_buffer; + end + if (slave_wishbone_sel_reg[3]) begin + slave_wishbone_dat_r_reg[7:0] <= xfer_read_data_buffer; + end + + // Signal transfer complete + slave_wishbone_ack_reg <= 1; + + mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; + end + end + MMIO_TRANSFER_STATE_RD03: begin + // Read second byte + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: begin + slave_wishbone_dat_r_reg[23:16] <= lpc_fw_output_xfer_read_data; + lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 3; + end + 12'h00d: begin + slave_wishbone_dat_r_reg[23:16] <= ipmi_bt_output_xfer_read_data; + ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 3; + end + endcase + + mmio_transfer_state <= MMIO_TRANSFER_STATE_RD04; + end + MMIO_TRANSFER_STATE_RD04: begin + // Read third byte + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: slave_wishbone_dat_r_reg[15:8] <= lpc_fw_output_xfer_read_data; + 12'h00d: slave_wishbone_dat_r_reg[15:8] <= ipmi_bt_output_xfer_read_data; + endcase + + mmio_transfer_state <= MMIO_TRANSFER_STATE_RD05; + end + MMIO_TRANSFER_STATE_RD05: begin + // Read fourth byte + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: slave_wishbone_dat_r_reg[7:0] <= lpc_fw_output_xfer_read_data; + 12'h00d: slave_wishbone_dat_r_reg[7:0] <= ipmi_bt_output_xfer_read_data; + endcase + + // Signal transfer complete + slave_wishbone_ack_reg <= 1; + + mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; + end + MMIO_TRANSFER_STATE_WR01: begin + if (lpc_fw_dma_cycle_inactive) begin + // No conflict present on LPC buffer write signals + if (wishbone_mmio_access_is_32_bits) begin + // Write first byte + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: begin + lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 3; + lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[7:0]; + lpc_fw_input_xfer_mmio_write_wren <= 1; + end + 12'h00d: begin + ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 3; + ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[7:0]; + ipmi_bt_input_xfer_write_wren <= 1; + end + endcase + mmio_transfer_state <= MMIO_TRANSFER_STATE_WR02; + end else begin + // Read the data byte to write from the active lane + if (slave_wishbone_sel_reg[0]) begin + xfer_write_data_buffer = slave_wishbone_dat_w[31:24]; + end else if (slave_wishbone_sel_reg[1]) begin + xfer_write_data_buffer = slave_wishbone_dat_w[23:16]; + end else if (slave_wishbone_sel_reg[2]) begin + xfer_write_data_buffer = slave_wishbone_dat_w[15:8]; + end else if (slave_wishbone_sel_reg[3]) begin + xfer_write_data_buffer = slave_wishbone_dat_w[7:0]; + end else begin + xfer_write_data_buffer = 8'hff; + end + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: begin + lpc_fw_input_xfer_mmio_write_data <= xfer_write_data_buffer; + lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0]; + lpc_fw_input_xfer_mmio_write_wren <= 1; + end + 12'h00d: begin + ipmi_bt_input_xfer_write_data <= xfer_write_data_buffer; + ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0]; + ipmi_bt_input_xfer_write_wren <= 1; + end + endcase + + // Signal transfer complete + slave_wishbone_ack_reg <= 1; + + mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; + end + end + end + MMIO_TRANSFER_STATE_WR02: begin + if (lpc_fw_dma_cycle_inactive) begin + // No conflict present on LPC buffer write signals + // Write second byte + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: begin + lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 2; + lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[15:8]; + lpc_fw_input_xfer_mmio_write_wren <= 1; + end + 12'h00d: begin + ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 2; + ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[15:8]; + ipmi_bt_input_xfer_write_wren <= 1; + end + endcase + mmio_transfer_state <= MMIO_TRANSFER_STATE_WR03; + end + end + MMIO_TRANSFER_STATE_WR03: begin + if (lpc_fw_dma_cycle_inactive) begin + // No conflict present on LPC buffer write signals + // Write third byte + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: begin + lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 1; + lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[23:16]; + lpc_fw_input_xfer_mmio_write_wren <= 1; + end + 12'h00d: begin + ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 1; + ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[23:16]; + ipmi_bt_input_xfer_write_wren <= 1; + end + endcase + mmio_transfer_state <= MMIO_TRANSFER_STATE_WR04; + end + end + MMIO_TRANSFER_STATE_WR04: begin + if (lpc_fw_dma_cycle_inactive) begin + // No conflict present on LPC buffer write signals + // Write fourth byte + case (mmio_lpc_buffer_address_reg[31:20]) + 12'h00c: begin + lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0]; + lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[31:24]; + lpc_fw_input_xfer_mmio_write_wren <= 1; + end + 12'h00d: begin + ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0]; + ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[31:24]; + ipmi_bt_input_xfer_write_wren <= 1; + end + endcase + + // Signal transfer complete + slave_wishbone_ack_reg <= 1; + + mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; + end + end + MMIO_TRANSFER_STATE_TR01: begin + if (ipmi_bt_host_to_bmc_ctl_attn_req_cont) begin + if (!ipmi_bt_host_to_bmc_ctl_attn_req_sync[2]) begin + ipmi_bt_host_to_bmc_ctl_attn_req_cont <= 0; + end + end else if (ipmi_bt_host_to_bmc_ctl_oem0_req_cont) begin + if (!ipmi_bt_host_to_bmc_ctl_oem0_req_sync[2]) begin + ipmi_bt_host_to_bmc_ctl_oem0_req_cont <= 0; + end + end else begin + // Signal transfer complete + slave_wishbone_ack_reg <= 1; + + mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02; + end + end + MMIO_TRANSFER_STATE_TR02: begin + // Cycle complete + slave_wishbone_ack_reg <= 0; + vuart1_h2b_fifo_rpop <= 0; + vuart2_h2b_fifo_rpop <= 0; + vuart1_b2h_fifo_wwren <= 0; + vuart2_b2h_fifo_wwren <= 0; + lpc_fw_input_xfer_mmio_write_wren <= 0; + ipmi_bt_input_xfer_write_wren <= 0; + mmio_transfer_state <= MMIO_TRANSFER_STATE_IDLE; + end + default: begin + // Should never reach this state + mmio_transfer_state <= MMIO_TRANSFER_STATE_IDLE; + end + endcase + + // This assumes the MMIO handler is driven off the same clock as the LPC transfer handler + // It will generate a single clock width pulse on the continue line + if (continue_transfer) begin + continue_transfer <= 0; + end + + if ((mmio_transfer_state == MMIO_TRANSFER_STATE_IDLE) && !(slave_wishbone_cyc && slave_wishbone_stb)) begin + // Bits are not being actively set / cleared by the BMC, therefore it is now safe + // to execute the handshake logic (potential race conditions avoided) + + // VUART handshake logic + if (vuart1_control_register[0] && vuart1_assert_b2h_break_clear_sync[2]) begin + vuart1_control_register[0] <= 0; + end + if (vuart2_control_register[0] && vuart2_assert_b2h_break_clear_sync[2]) begin + vuart2_control_register[0] <= 0; + end + + if (vuart1_status_register_sync_2[6]) begin + vuart1_lcr_break_request <= 1; + end else begin + if (vuart1_lcr_break_ack) begin + vuart1_lcr_break_request <= 0; + vuart1_lcr_break_ack <= 0; + end + end + if (vuart2_status_register_sync_2[6]) begin + vuart2_lcr_break_request <= 1; + end else begin + if (vuart2_lcr_break_ack) begin + vuart2_lcr_break_request <= 0; + vuart2_lcr_break_ack <= 0; + end + end + + // VUART1 IRQ handler logic + if (vuart1_h2b_fifo_rpop) begin + vuart1_h2b_fifo_read_timeout_counter <= 0; + vuart1_h2b_fifo_read_timeout <= 0; + end else begin + if (vuart1_h2b_fifo_rempty) begin + vuart1_h2b_fifo_read_timeout_counter <= 0; + vuart1_h2b_fifo_read_timeout <= 0; + end else begin + if (vuart1_h2b_fifo_read_timeout_counter > 1000) begin + vuart1_h2b_fifo_read_timeout <= 1; + end else begin + vuart1_h2b_fifo_read_timeout_counter <= vuart1_h2b_fifo_read_timeout_counter + 1; + end + end + end + + if ((vuart1_h2b_fifo_data_available_count[3:0] >= vuart1_h2b_fifo_irq_trigger_level) || vuart1_h2b_fifo_wfull) begin + vuart1_h2b_fifo_queue_past_trigger <= 1; + end else begin + vuart1_h2b_fifo_queue_past_trigger <= 0; + end + if (vuart1_irqs_enabled) begin + if (vuart1_h2b_fifo_irq_enabled && vuart1_h2b_fifo_queue_past_trigger) begin + vuart1_h2b_fifo_irq <= 1; + vuart1_irq_source <= VUART_IRQ_REASON_QUEUE_TRIGGER; + end else if (vuart1_h2b_fifo_irq_enabled && vuart1_h2b_fifo_read_timeout) begin + vuart1_h2b_fifo_irq <= 1; + vuart1_irq_source <= VUART_IRQ_REASON_QUEUE_TIMEOUT; + end else begin + vuart1_irq_source <= VUART_IRQ_REASON_NONE; + vuart1_h2b_fifo_irq <= 0; + end + end else begin + vuart1_irq_source <= VUART_IRQ_REASON_NONE; + vuart1_h2b_fifo_irq <= 0; + end + + // VUART2 IRQ handler logic + if (vuart2_h2b_fifo_rpop) begin + vuart2_h2b_fifo_read_timeout_counter <= 0; + vuart2_h2b_fifo_read_timeout <= 0; + end else begin + if (vuart2_h2b_fifo_rempty) begin + vuart2_h2b_fifo_read_timeout_counter <= 0; + vuart2_h2b_fifo_read_timeout <= 0; + end else begin + if (vuart2_h2b_fifo_read_timeout_counter > 1000) begin + vuart2_h2b_fifo_read_timeout <= 1; + end else begin + vuart2_h2b_fifo_read_timeout_counter <= vuart2_h2b_fifo_read_timeout_counter + 1; + end + end + end + + if ((vuart2_h2b_fifo_data_available_count[3:0] >= vuart2_h2b_fifo_irq_trigger_level) || vuart2_h2b_fifo_wfull) begin + vuart2_h2b_fifo_queue_past_trigger <= 1; + end else begin + vuart2_h2b_fifo_queue_past_trigger <= 0; + end + if (vuart2_irqs_enabled) begin + if (vuart2_h2b_fifo_irq_enabled && vuart2_h2b_fifo_queue_past_trigger) begin + vuart2_h2b_fifo_irq <= 1; + vuart2_irq_source <= VUART_IRQ_REASON_QUEUE_TRIGGER; + end else if (vuart2_h2b_fifo_irq_enabled && vuart2_h2b_fifo_read_timeout) begin + vuart2_h2b_fifo_irq <= 1; + vuart2_irq_source <= VUART_IRQ_REASON_QUEUE_TIMEOUT; + end else begin + vuart2_irq_source <= VUART_IRQ_REASON_NONE; + vuart2_h2b_fifo_irq <= 0; + end + end else begin + vuart2_irq_source <= VUART_IRQ_REASON_NONE; + vuart2_h2b_fifo_irq <= 0; + end + + // IPMI handshake handler logic + if (ipmi_bt_bmc_to_host_ctl_attn_ack_sync[2]) begin + ipmi_bt_bmc_to_host_ctl_attn_req <= 0; + ipmi_bt_bmc_to_host_ctl_attn_ack_cont <= 1; + end else begin + ipmi_bt_bmc_to_host_ctl_attn_ack_cont <= 0; + end + if (ipmi_bt_bmc_to_host_ctl_sms_ack_sync[2]) begin + ipmi_bt_bmc_to_host_ctl_sms_req <= 0; + ipmi_bt_bmc_to_host_ctl_sms_ack_cont <= 1; + end else begin + ipmi_bt_bmc_to_host_ctl_sms_ack_cont <= 0; + end + + // IPMI BMC IRQ handler logic + if (ipmi_bt_irq_ack_sync[2]) begin + ipmi_bt_irq_req <= 0; + ipmi_bt_irq_ack_cont <= 1; + end else begin + if (!ipmi_bt_irq_ack_cont) begin + if (ipmi_bt_irq_enable_sync[2] + && !ipmi_bt_irq_ack_cont + && ((!ipmi_bt_h2b_oem0_req_prev && ipmi_bt_h2b_oem0_req) + || (!ipmi_bt_bmc_to_host_ctl_attn_req_prev && ipmi_bt_bmc_to_host_ctl_attn_req) + || (!ipmi_bt_bmc_to_host_ctl_sms_req_prev && ipmi_bt_bmc_to_host_ctl_sms_req))) begin + ipmi_bt_irq_req <= 1; + end + end else begin + ipmi_bt_irq_ack_cont <= 0; + end + end + if (!ipmi_bt_irq_ack_cont) begin + // Wait for prior IRQ line handshake to complete before sampling the B2H_ATN line + // This ensures that the IRQ is still fired if the continue signal is asserted while + // B2H_ATN transitions from inactive to active. + ipmi_bt_bmc_to_host_ctl_attn_req_prev <= ipmi_bt_bmc_to_host_ctl_attn_req; + ipmi_bt_bmc_to_host_ctl_sms_req_prev <= ipmi_bt_bmc_to_host_ctl_sms_req; + ipmi_bt_h2b_oem0_req_prev <= ipmi_bt_h2b_oem0_req; + end + + if (ipmi_bt_bmc_irq_enable && ipmi_bt_host_to_bmc_ctl_attn_req_sync[2]) begin + ipmi_bt_bmc_irq <= 1; + end else begin + ipmi_bt_bmc_irq <= 0; + end + end + end + end + + // Wishbone connector -- CSRs + // + // WARNING: The LPC slave will run at ~33.33MHz from an external clock source. + // This module assumes the Wishbone clock will be clocked no lower than 1.5x the + // external LPC frequency, i.e. no lower than 50MHz. All synchronizer logic is + // built around this assumption; violating it *will* lead to data corruption and + // unpredictable / undefined behavior! + always @(posedge peripheral_clock) begin + if (peripheral_reset || !lpc_reset_n_sync[2]) begin + // Reset Wishbone interface / control state machine + lpc_slave_address_reg <= 0; + lpc_slave_firmware_cycle_reg <= 0; + attn_req <= 0; + pending_address <= 0; + pending_data <= 0; + pending_fw_cycle_idsel <= 0; + pending_fw_cycle_msize <= 0; + lpc_fw_dma_cycle_active <= 0; + lpc_fw_dma_cycle_inactive <= 1; + lpc_io_cycle_irq <= 0; + lpc_tpm_cycle_irq <= 0; + lpc_firmware_cycle_irq <= 0; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE; + end else begin + case (lpc_slave_transfer_state) + LPC_SLAVE_TRANSFER_STATE_IDLE: begin + if (lpc_slave_address_ready_sync[2]) begin + // Determine cycle type + is_firmware_cycle_type = lpc_slave_firmware_cycle && !lpc_slave_tpm_cycle; + is_tpm_cycle_type = !lpc_slave_firmware_cycle && lpc_slave_tpm_cycle; + is_io_cycle_type = !lpc_slave_firmware_cycle && !lpc_slave_tpm_cycle; + + // Check if cycle is configured for intercept + cycle_range_intercept_allowed = 0; + if (range_1_enable && ((is_io_cycle_type && range_1_allow_io) || (is_tpm_cycle_type && range_1_allow_tpm))) begin + if ((lpc_slave_address >= range_1_start_address) && (lpc_slave_address <= range_1_end_address)) begin + cycle_range_intercept_allowed = 1; + end + end + if (range_2_enable && ((is_io_cycle_type && range_2_allow_io) || (is_tpm_cycle_type && range_2_allow_tpm))) begin + if ((lpc_slave_address >= range_2_start_address) && (lpc_slave_address <= range_2_end_address)) begin + cycle_range_intercept_allowed = 1; + end + end + if (range_3_enable && ((is_io_cycle_type && range_3_allow_io) || (is_tpm_cycle_type && range_3_allow_tpm))) begin + if ((lpc_slave_address >= range_3_start_address) && (lpc_slave_address <= range_3_end_address)) begin + cycle_range_intercept_allowed = 1; + end + end + if (range_4_enable && ((is_io_cycle_type && range_4_allow_io) || (is_tpm_cycle_type && range_4_allow_tpm))) begin + if ((lpc_slave_address >= range_4_start_address) && (lpc_slave_address <= range_4_end_address)) begin + cycle_range_intercept_allowed = 1; + end + end + if (range_5_enable && ((is_io_cycle_type && range_5_allow_io) || (is_tpm_cycle_type && range_5_allow_tpm))) begin + if ((lpc_slave_address >= range_5_start_address) && (lpc_slave_address <= range_5_end_address)) begin + cycle_range_intercept_allowed = 1; + end + end + if (range_6_enable && ((is_io_cycle_type && range_6_allow_io) || (is_tpm_cycle_type && range_6_allow_tpm))) begin + if ((lpc_slave_address >= range_6_start_address) && (lpc_slave_address <= range_6_end_address)) begin + cycle_range_intercept_allowed = 1; + end + end + if (is_firmware_cycle_type) begin + // Firmware cycles are not range-configurable + cycle_range_intercept_allowed = 1; + end + + if (enable_firmware_cycles && is_firmware_cycle_type && cycle_range_intercept_allowed) begin + // Handle firmware cycle here... + cycle_type <= AQUIEL_LPC_CYCLE_TYPE_FIRMWARE; + pending_address <= lpc_slave_address; + cycle_direction <= lpc_slave_cycle_direction; + pending_fw_cycle_idsel <= lpc_slave_fw_idsel; + pending_fw_cycle_msize <= lpc_slave_fw_msize; + if (lpc_slave_cycle_direction) begin + // Write + if (lpc_fw_cycle_dma_write_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == lpc_slave_fw_idsel))) begin + // DMA enabled + lpc_fw_dma_current_buffer_address <= 0; + lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + (lpc_slave_address & lpc_fw_dma_offset_address_mask); + lpc_fw_dma_cycle_active <= 1; + lpc_fw_dma_cycle_inactive <= 0; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW01; + end else begin + // DMA disabled + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FW01; + end + end else begin + // Read + if (lpc_fw_cycle_dma_read_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == lpc_slave_fw_idsel))) begin + // DMA enabled + lpc_fw_dma_current_buffer_address <= 0; + lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + (lpc_slave_address & lpc_fw_dma_offset_address_mask); + lpc_fw_dma_cycle_active <= 1; + lpc_fw_dma_cycle_inactive <= 0; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR01; + end else begin + // DMA disabled + attn_req <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR01; + end + end + end else if (enable_tpm_cycles && is_tpm_cycle_type && cycle_range_intercept_allowed) begin + // Handle TPM cycle here... + cycle_type <= AQUIEL_LPC_CYCLE_TYPE_TPM; + pending_address <= lpc_slave_address; + cycle_direction <= lpc_slave_cycle_direction; + pending_fw_cycle_idsel <= 0; + pending_fw_cycle_msize <= 0; + if (lpc_slave_cycle_direction) begin + // Write + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW01; + end else begin + // Read + attn_req <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR01; + end + end else if (enable_io_cycles && is_io_cycle_type && cycle_range_intercept_allowed) begin + // Handle I/O cycle here... + cycle_type <= AQUIEL_LPC_CYCLE_TYPE_IO; + pending_address <= lpc_slave_address; + cycle_direction <= lpc_slave_cycle_direction; + pending_fw_cycle_idsel <= 0; + pending_fw_cycle_msize <= 0; + if (lpc_slave_cycle_direction) begin + // Write + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW01; + end else begin + // Read + attn_req <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR01; + end + end else begin + // Ignore every other cycle type and any known cycle types that the CPU has chosen to ignore + if (lpc_slave_data_ready_sync[2] && !lpc_slave_continue) begin + lpc_slave_continue <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR02; + end + if (lpc_slave_data_ready_sync[2] && !lpc_slave_data_ack) begin + lpc_slave_data_ack <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW03; + end + if (lpc_slave_exception_sync_2 && !lpc_slave_exception_ack) begin + lpc_slave_exception_ack <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_ER01; + end + end + + // Latch address and control registers for further use + lpc_slave_address_reg <= lpc_slave_address; + lpc_slave_firmware_cycle_reg <= lpc_slave_firmware_cycle; + end else begin + // Ensure LPC DMA transfer buffer control is released if no LPC cycle is active + lpc_fw_dma_cycle_active <= 0; + lpc_fw_dma_cycle_inactive <= 1; + end + end + LPC_SLAVE_TRANSFER_STATE_IW01: begin + if (lpc_slave_data_ready_sync[2]) begin + // Latch data register for CPU to read + pending_data <= lpc_slave_rx_data; + + // Signal CPU attention required + attn_req <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW02; + end + end + LPC_SLAVE_TRANSFER_STATE_IW02: begin + if (continue_transfer) begin + // CPU handler complete! + // Deassert attention request and start LPC ACK process + lpc_slave_data_ack <= 1; + attn_req <= 0; + lpc_slave_signal_error <= signal_error; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW03; + end + end + LPC_SLAVE_TRANSFER_STATE_IW03: begin + if (lpc_slave_data_ready_cont_sync[2]) begin + lpc_slave_data_ack <= 0; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW04; + end + end + LPC_SLAVE_TRANSFER_STATE_IW04: begin + if ((!lpc_slave_address_ready_sync[2]) && (!lpc_slave_data_ready_cont_sync[2])) begin + // Interlocked handshake complete! + // Return to idle + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE; + end + end + LPC_SLAVE_TRANSFER_STATE_IR01: begin + if (continue_transfer) begin + // CPU handler complete! + // Deassert attention request and start LPC response process + if (signal_error) begin + lpc_slave_tx_data <= 8'hff; + end else begin + lpc_slave_tx_data <= data_out; + end + lpc_slave_continue <= 1; + attn_req <= 0; + lpc_slave_signal_error <= signal_error; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR02; + end + end + LPC_SLAVE_TRANSFER_STATE_IR02: begin + if (lpc_slave_continue_cont_sync[2]) begin + lpc_slave_continue <= 0; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR03; + end + end + LPC_SLAVE_TRANSFER_STATE_IR03: begin + if (!lpc_slave_address_ready_sync[2]) begin + // Interlocked handshake complete! + // Return to idle + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE; + end + end + LPC_SLAVE_TRANSFER_STATE_DW01: begin + if (lpc_slave_data_ready_sync[2]) begin + // Set up first byte read + lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 0; + + // Data ready, fire off DMA engine + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW02; + end + end + LPC_SLAVE_TRANSFER_STATE_DW02: begin + if (((pending_address & lpc_fw_dma_offset_address_mask) >= lpc_fw_dma_valid_window_start_offset) && ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_valid_window_end_offset)) begin + if ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_length) begin + // DMA request is valid, start transfer + // Set up second byte read + lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 1; + + // Continue processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW03; + end else begin + // Invalid DMA requested, fall back to CPU processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; + + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + end + end else begin + // Invalid DMA requested, fall back to CPU processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; + + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + end + + lpc_fw_input_xfer_dma_write_wren <= 0; + end + LPC_SLAVE_TRANSFER_STATE_DW03: begin + // Read first byte + master_wishbone_dat_w_reg[63:56] <= lpc_fw_output_xfer_read_data; + + // Set up third byte read + lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 2; + + // Continue processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW04; + end + LPC_SLAVE_TRANSFER_STATE_DW04: begin + // Read second byte + master_wishbone_dat_w_reg[55:48] <= lpc_fw_output_xfer_read_data; + + // Set up fourth byte read + lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 3; + + // Continue processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW05; + end + LPC_SLAVE_TRANSFER_STATE_DW05: begin + // Read third byte + master_wishbone_dat_w_reg[47:40] <= lpc_fw_output_xfer_read_data; + + // Set up fifth byte read + lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 4; + + // Continue processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW06; + end + LPC_SLAVE_TRANSFER_STATE_DW06: begin + // Read fourth byte + master_wishbone_dat_w_reg[39:32] <= lpc_fw_output_xfer_read_data; + + // Set up sixth byte read + lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 5; + + // Continue processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW07; + end + LPC_SLAVE_TRANSFER_STATE_DW07: begin + // Read fifth byte + master_wishbone_dat_w_reg[31:24] <= lpc_fw_output_xfer_read_data; + + // Set up seventh byte read + lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 6; + + // Continue processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW08; + end + LPC_SLAVE_TRANSFER_STATE_DW08: begin + // Read sixth byte + master_wishbone_dat_w_reg[23:16] <= lpc_fw_output_xfer_read_data; + + // Set up eigth byte read + lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 7; + + // Continue processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW09; + end + LPC_SLAVE_TRANSFER_STATE_DW09: begin + // Read seventh byte + master_wishbone_dat_w_reg[15:8] <= lpc_fw_output_xfer_read_data; + + // Continue processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW10; + end + LPC_SLAVE_TRANSFER_STATE_DW10: begin + if (master_wishbone_ack) begin + // Internal fault, fall back to CPU processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; + + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + end else begin + if (lpc_fw_cycle_dma_write_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == pending_fw_cycle_idsel))) begin + // Read eigth byte + master_wishbone_dat_w_reg[7:0] <= lpc_fw_output_xfer_read_data; + + // Start Wishbone transfer + master_wishbone_adr_reg <= lpc_fw_dma_current_wb_address; + master_wishbone_we_reg <= 1; + master_wishbone_sel_reg <= 8'b11111111; + master_wishbone_cyc_reg <= 1; + master_wishbone_stb_reg <= 1; + end else begin + // Internal fault, fall back to CPU processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; + + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + end + end + + // Wait for Wishbone response + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW11; + end + LPC_SLAVE_TRANSFER_STATE_DW11: begin + if (master_wishbone_err) begin + // Release bus + master_wishbone_cyc_reg <= 0; + master_wishbone_stb_reg <= 0; + + // DMA failed, fall back to CPU processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; + + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + end else if (master_wishbone_ack) begin + // Release bus + master_wishbone_cyc_reg <= 0; + master_wishbone_stb_reg <= 0; + + if (((pending_fw_cycle_msize == 4'b0100) && (lpc_fw_dma_current_buffer_address < (16 - 4))) + || ((pending_fw_cycle_msize == 4'b0111) && (lpc_fw_dma_current_buffer_address < (128 - 4)))) begin + // Set up next transfer + lpc_fw_dma_current_buffer_address <= lpc_fw_dma_current_buffer_address + 8; + pending_address <= pending_address + 8; + lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + ((pending_address + 8) & lpc_fw_dma_offset_address_mask); + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW02; + end else begin + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + + // Start LPC response process + lpc_slave_data_ack <= 1; + lpc_slave_signal_error <= 0; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW03; + end + end + end + LPC_SLAVE_TRANSFER_STATE_FW01: begin + if (lpc_slave_data_ready_sync[2]) begin + // Signal CPU attention required + attn_req <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW02; + end + end + LPC_SLAVE_TRANSFER_STATE_DR01: begin + if (((pending_address & lpc_fw_dma_offset_address_mask) >= lpc_fw_dma_valid_window_start_offset) && ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_valid_window_end_offset)) begin + if ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_length) begin + if (master_wishbone_ack) begin + // Internal fault, fall back to CPU processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; + + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + end else begin + // DMA request is valid, start transfer + master_wishbone_adr_reg <= lpc_fw_dma_current_wb_address; + master_wishbone_we_reg <= 0; + master_wishbone_sel_reg <= 8'b11111111; + master_wishbone_cyc_reg <= 1; + master_wishbone_stb_reg <= 1; + + // Wait for Wishbone response + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR02; + end + end else begin + // Invalid DMA requested, fall back to CPU processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; + + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + end + end else begin + // Invalid DMA requested, fall back to CPU processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; + + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + end + + lpc_fw_input_xfer_dma_write_wren <= 0; + end + LPC_SLAVE_TRANSFER_STATE_DR02: begin + if (master_wishbone_err) begin + // Release bus + master_wishbone_cyc_reg <= 0; + master_wishbone_stb_reg <= 0; + + // DMA failed, fall back to CPU processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; + + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + end else if (master_wishbone_ack) begin + // Release bus + master_wishbone_cyc_reg <= 0; + master_wishbone_stb_reg <= 0; + + // Cache read data + lpc_fw_dma_data_cache_reg <= master_wishbone_dat_r; + + // Write first byte + lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 0; + lpc_fw_input_xfer_dma_write_data <= master_wishbone_dat_r[63:56]; + lpc_fw_input_xfer_dma_write_wren <= 1; + + // Continue processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR03; + end + end + LPC_SLAVE_TRANSFER_STATE_DR03: begin + // Write second byte + lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 1; + lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[55:48]; + lpc_fw_input_xfer_dma_write_wren <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR04; + end + LPC_SLAVE_TRANSFER_STATE_DR04: begin + // Write third byte + lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 2; + lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[47:40]; + lpc_fw_input_xfer_dma_write_wren <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR05; + end + LPC_SLAVE_TRANSFER_STATE_DR05: begin + // Write third byte + lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 3; + lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[39:32]; + lpc_fw_input_xfer_dma_write_wren <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR06; + end + LPC_SLAVE_TRANSFER_STATE_DR06: begin + // Write third byte + lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 4; + lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[31:24]; + lpc_fw_input_xfer_dma_write_wren <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR07; + end + LPC_SLAVE_TRANSFER_STATE_DR07: begin + // Write third byte + lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 5; + lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[23:16]; + lpc_fw_input_xfer_dma_write_wren <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR08; + end + LPC_SLAVE_TRANSFER_STATE_DR08: begin + // Write third byte + lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 6; + lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[15:8]; + lpc_fw_input_xfer_dma_write_wren <= 1; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR09; + end + LPC_SLAVE_TRANSFER_STATE_DR09: begin + // Write fourth byte + lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 7; + lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[7:0]; + lpc_fw_input_xfer_dma_write_wren <= 1; + + if (lpc_fw_cycle_dma_read_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == pending_fw_cycle_idsel))) begin + if (((pending_fw_cycle_msize == 4'b0100) && (lpc_fw_dma_current_buffer_address < (16 - 4))) + || ((pending_fw_cycle_msize == 4'b0111) && (lpc_fw_dma_current_buffer_address < (128 - 4)))) begin + // Set up next transfer + lpc_fw_dma_current_buffer_address <= lpc_fw_dma_current_buffer_address + 8; + pending_address <= pending_address + 8; + lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + ((pending_address + 8) & lpc_fw_dma_offset_address_mask); + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR01; + end else begin + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR10; + end + end else begin + // DMA failed, fall back to CPU processing + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01; + + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + end + end + LPC_SLAVE_TRANSFER_STATE_DR10: begin + // Release transfer RAM control signals + lpc_fw_dma_cycle_active <= 0; + + // Start LPC response process + lpc_slave_continue <= 1; + lpc_slave_signal_error <= 0; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR02; + end + LPC_SLAVE_TRANSFER_STATE_DF01: begin + // If DMA was active, allow one cycle for RAM control signals to reload from override status + lpc_fw_dma_cycle_inactive <= 1; + + // DMA transfer failed, fall back to CPU processing + attn_req <= 1; + if (cycle_direction) begin + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW02; + end else begin + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR01; + end + end + LPC_SLAVE_TRANSFER_STATE_FR01: begin + if (continue_transfer) begin + // CPU handler complete! + // Deassert attention request and start LPC response process + lpc_slave_continue <= 1; + attn_req <= 0; + lpc_slave_signal_error <= signal_error; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR02; + end + end + LPC_SLAVE_TRANSFER_STATE_FR02: begin + // Allow one cycle for RAM control signals to reload from override status + // This is safe to set here regardless of if the previous cycle was actually the DMA engine + lpc_fw_dma_cycle_inactive <= 1; + + if (lpc_slave_continue_cont_sync[2]) begin + lpc_slave_continue <= 0; + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR03; + end + end + LPC_SLAVE_TRANSFER_STATE_FR03: begin + if (!lpc_slave_address_ready_sync[2]) begin + // Interlocked handshake complete! + // Return to idle + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE; + end + end + LPC_SLAVE_TRANSFER_STATE_ER01: begin + if (!lpc_slave_exception_sync_2) begin + lpc_slave_exception_ack <= 0; + + // Interlocked handshake complete! + // Return to idle + lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE; + end + end + endcase + + if (attn_req) begin + case (cycle_type) + AQUIEL_LPC_CYCLE_TYPE_IO: begin + if (lpc_io_cycle_irq_enable) begin + lpc_io_cycle_irq <= 1; + end else begin + lpc_io_cycle_irq <= 0; + end + lpc_tpm_cycle_irq <= 0; + lpc_firmware_cycle_irq <= 0; + end + AQUIEL_LPC_CYCLE_TYPE_TPM: begin + lpc_io_cycle_irq <= 0; + if (lpc_tpm_cycle_irq_enable) begin + lpc_tpm_cycle_irq <= 1; + end else begin + lpc_tpm_cycle_irq <= 0; + end + lpc_firmware_cycle_irq <= 0; + end + AQUIEL_LPC_CYCLE_TYPE_FIRMWARE: begin + lpc_io_cycle_irq <= 0; + lpc_tpm_cycle_irq <= 0; + if (lpc_firmware_cycle_irq_enable) begin + lpc_firmware_cycle_irq <= 1; + end else begin + lpc_firmware_cycle_irq <= 0; + end + end + default: begin + lpc_io_cycle_irq <= 0; + lpc_tpm_cycle_irq <= 0; + lpc_firmware_cycle_irq <= 0; + end + endcase + end else begin + lpc_io_cycle_irq <= 0; + lpc_tpm_cycle_irq <= 0; + lpc_firmware_cycle_irq <= 0; + end + end + + // Synchronizer logic for LPC core to Wishbone traffic + // Three flip flops used for maximum MTBF on control lines + // All data paths are synhronized from these signals using req/ack handshaking mechanisms + lpc_slave_address_ready_sync[2] <= lpc_slave_address_ready_sync[1]; + lpc_slave_address_ready_sync[1] <= lpc_slave_address_ready_sync[0]; + lpc_slave_address_ready_sync[0] <= lpc_slave_address_ready; + lpc_slave_data_ready_sync[2] <= lpc_slave_data_ready_sync[1]; + lpc_slave_data_ready_sync[1] <= lpc_slave_data_ready_sync[0]; + lpc_slave_data_ready_sync[0] <= lpc_slave_data_ready; + lpc_slave_exception_sync_2 <= lpc_slave_exception_sync_1; + lpc_slave_exception_sync_1 <= lpc_slave_exception_sync_0; + lpc_slave_exception_sync_0 <= lpc_slave_exception; + lpc_slave_data_ready_cont_sync[2] <= lpc_slave_data_ready_cont_sync[1]; + lpc_slave_data_ready_cont_sync[1] <= lpc_slave_data_ready_cont_sync[0]; + lpc_slave_data_ready_cont_sync[0] <= lpc_slave_data_ready_cont; + lpc_slave_continue_cont_sync[2] <= lpc_slave_continue_cont_sync[1]; + lpc_slave_continue_cont_sync[1] <= lpc_slave_continue_cont; + lpc_reset_n_sync[2] <= lpc_reset_n_sync[1]; + lpc_reset_n_sync[1] <= lpc_reset_n_sync[0]; + lpc_reset_n_sync[0] <= lpc_reset_n; + + vuart1_h2b_fifo_reset_sync[2] <= vuart1_h2b_fifo_reset_sync[1]; + vuart1_h2b_fifo_reset_sync[1] <= vuart1_h2b_fifo_reset_sync[0]; + vuart1_h2b_fifo_reset_sync[0] <= vuart1_h2b_fifo_reset; + vuart2_h2b_fifo_reset_sync[2] <= vuart2_h2b_fifo_reset_sync[1]; + vuart2_h2b_fifo_reset_sync[1] <= vuart2_h2b_fifo_reset_sync[0]; + vuart2_h2b_fifo_reset_sync[0] <= vuart2_h2b_fifo_reset; + vuart1_b2h_fifo_wfull_sync[2] <= vuart1_b2h_fifo_wfull_sync[1]; + vuart1_b2h_fifo_wfull_sync[1] <= vuart1_b2h_fifo_wfull_sync[0]; + vuart1_b2h_fifo_wfull_sync[0] <= vuart1_b2h_fifo_wfull; + vuart1_b2h_fifo_reset_sync[2] <= vuart1_b2h_fifo_reset_sync[1]; + vuart1_b2h_fifo_reset_sync[1] <= vuart1_b2h_fifo_reset_sync[0]; + vuart1_b2h_fifo_reset_sync[0] <= vuart1_b2h_fifo_reset; + vuart2_b2h_fifo_wfull_sync[2] <= vuart2_b2h_fifo_wfull_sync[1]; + vuart2_b2h_fifo_wfull_sync[1] <= vuart2_b2h_fifo_wfull_sync[0]; + vuart2_b2h_fifo_wfull_sync[0] <= vuart2_b2h_fifo_wfull; + vuart2_b2h_fifo_reset_sync[2] <= vuart2_b2h_fifo_reset_sync[1]; + vuart2_b2h_fifo_reset_sync[1] <= vuart2_b2h_fifo_reset_sync[0]; + vuart2_b2h_fifo_reset_sync[0] <= vuart2_b2h_fifo_reset; + vuart1_status_register_sync_2 <= vuart1_status_register_sync_1; + vuart1_status_register_sync_1 <= vuart1_status_register_sync_0; + vuart1_status_register_sync_0 <= vuart1_status_register; + vuart2_status_register_sync_2 <= vuart2_status_register_sync_1; + vuart2_status_register_sync_1 <= vuart2_status_register_sync_0; + vuart2_status_register_sync_0 <= vuart2_status_register; + vuart1_assert_b2h_break_clear_sync[2] <= vuart1_assert_b2h_break_clear_sync[1]; + vuart1_assert_b2h_break_clear_sync[1] <= vuart1_assert_b2h_break_clear_sync[0]; + vuart1_assert_b2h_break_clear_sync[0] <= vuart1_assert_b2h_break_clear; + vuart2_assert_b2h_break_clear_sync[2] <= vuart2_assert_b2h_break_clear_sync[1]; + vuart2_assert_b2h_break_clear_sync[1] <= vuart2_assert_b2h_break_clear_sync[0]; + vuart2_assert_b2h_break_clear_sync[0] <= vuart2_assert_b2h_break_clear; + + ipmi_bt_bmc_to_host_ctl_sms_ack_sync[2] <= ipmi_bt_bmc_to_host_ctl_sms_ack_sync[1]; + ipmi_bt_bmc_to_host_ctl_sms_ack_sync[1] <= ipmi_bt_bmc_to_host_ctl_sms_ack_sync[0]; + ipmi_bt_bmc_to_host_ctl_sms_ack_sync[0] <= ipmi_bt_bmc_to_host_ctl_sms_ack; + ipmi_bt_bmc_to_host_ctl_attn_ack_sync[2] <= ipmi_bt_bmc_to_host_ctl_attn_ack_sync[1]; + ipmi_bt_bmc_to_host_ctl_attn_ack_sync[1] <= ipmi_bt_bmc_to_host_ctl_attn_ack_sync[0]; + ipmi_bt_bmc_to_host_ctl_attn_ack_sync[0] <= ipmi_bt_bmc_to_host_ctl_attn_ack; + ipmi_bt_host_to_bmc_ctl_attn_req_sync[2] <= ipmi_bt_host_to_bmc_ctl_attn_req_sync[1]; + ipmi_bt_host_to_bmc_ctl_attn_req_sync[1] <= ipmi_bt_host_to_bmc_ctl_attn_req_sync[0]; + ipmi_bt_host_to_bmc_ctl_attn_req_sync[0] <= ipmi_bt_host_to_bmc_ctl_attn_req; + ipmi_bt_host_to_bmc_ctl_oem0_req_sync[2] <= ipmi_bt_host_to_bmc_ctl_oem0_req_sync[1]; + ipmi_bt_host_to_bmc_ctl_oem0_req_sync[1] <= ipmi_bt_host_to_bmc_ctl_oem0_req_sync[0]; + ipmi_bt_host_to_bmc_ctl_oem0_req_sync[0] <= ipmi_bt_host_to_bmc_ctl_oem0_req; + ipmi_bt_irq_ack_sync[2] <= ipmi_bt_irq_ack_sync[1]; + ipmi_bt_irq_ack_sync[1] <= ipmi_bt_irq_ack_sync[0]; + ipmi_bt_irq_ack_sync[0] <= ipmi_bt_irq_ack; + ipmi_bt_irq_bmc_reset_sync[2] <= ipmi_bt_irq_bmc_reset_sync[1]; + ipmi_bt_irq_bmc_reset_sync[1] <= ipmi_bt_irq_bmc_reset_sync[0]; + ipmi_bt_irq_bmc_reset_sync[0] <= ipmi_bt_irq_bmc_reset; + ipmi_bt_host_to_bmc_ctl_h_busy_sync[2] <= ipmi_bt_host_to_bmc_ctl_h_busy_sync[1]; + ipmi_bt_host_to_bmc_ctl_h_busy_sync[1] <= ipmi_bt_host_to_bmc_ctl_h_busy_sync[0]; + ipmi_bt_host_to_bmc_ctl_h_busy_sync[0] <= ipmi_bt_host_to_bmc_ctl_h_busy; + ipmi_bt_irq_enable_sync[2] <= ipmi_bt_irq_enable_sync[1]; + ipmi_bt_irq_enable_sync[1] <= ipmi_bt_irq_enable_sync[0]; + ipmi_bt_irq_enable_sync[0] <= ipmi_bt_irq_enable; + end + + // Synchronizer logic for Wishbone to LPC core traffic + always @(posedge lpc_clock) begin + // Two flip flops used on the return path + lpc_slave_continue_sync[1] <= lpc_slave_continue_sync[0]; + lpc_slave_continue_sync[0] <= lpc_slave_continue; + lpc_slave_data_ack_sync[1] <= lpc_slave_data_ack_sync[0]; + lpc_slave_data_ack_sync[0] <= lpc_slave_data_ack; + lpc_slave_signal_error_sync[1] <= lpc_slave_signal_error_sync[0]; + lpc_slave_signal_error_sync[0] <= lpc_slave_signal_error; + lpc_slave_exception_ack_sync[1] <= lpc_slave_exception_ack_sync[0]; + lpc_slave_exception_ack_sync[0] <= lpc_slave_exception_ack; + irq_tx_ready_sync[1] <= irq_tx_ready_sync[0]; + irq_tx_ready_sync[0] <= irq_tx_ready; + irq_request_sync_1 <= irq_request_sync_0; + irq_request_sync_0 <= irq_request; + peripheral_reset_sync[1] <= peripheral_reset_sync[0]; + peripheral_reset_sync[0] <= peripheral_reset; + + vuart1_h2b_fifo_rempty_sync[1] <= vuart1_h2b_fifo_rempty_sync[0]; + vuart1_h2b_fifo_rempty_sync[0] <= vuart1_h2b_fifo_rempty; + vuart2_h2b_fifo_rempty_sync[1] <= vuart2_h2b_fifo_rempty_sync[0]; + vuart2_h2b_fifo_rempty_sync[0] <= vuart2_h2b_fifo_rempty; + vuart1_control_register_sync_1 <= vuart1_control_register_sync_0; + vuart1_control_register_sync_0 <= vuart1_control_register; + vuart2_control_register_sync_1 <= vuart2_control_register_sync_0; + vuart2_control_register_sync_0 <= vuart2_control_register; + + ipmi_bt_bmc_to_host_ctl_sms_req_sync[1] <= ipmi_bt_bmc_to_host_ctl_sms_req_sync[0]; + ipmi_bt_bmc_to_host_ctl_sms_req_sync[0] <= ipmi_bt_bmc_to_host_ctl_sms_req; + ipmi_bt_bmc_to_host_ctl_attn_req_sync[1] <= ipmi_bt_bmc_to_host_ctl_attn_req_sync[0]; + ipmi_bt_bmc_to_host_ctl_attn_req_sync[0] <= ipmi_bt_bmc_to_host_ctl_attn_req; + ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[1] <= ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[0]; + ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[0] <= ipmi_bt_bmc_to_host_ctl_sms_ack_cont; + ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[1] <= ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[0]; + ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[0] <= ipmi_bt_bmc_to_host_ctl_attn_ack_cont; + ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[1] <= ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[0]; + ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[0] <= ipmi_bt_host_to_bmc_ctl_attn_req_cont; + ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[1] <= ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[0]; + ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[0] <= ipmi_bt_host_to_bmc_ctl_oem0_req_cont; + ipmi_bt_irq_ack_cont_sync[1] <= ipmi_bt_irq_ack_cont_sync[0]; + ipmi_bt_irq_ack_cont_sync[0] <= ipmi_bt_irq_ack_cont; + ipmi_bt_irq_bmc_reset_cont_sync[1] <= ipmi_bt_irq_bmc_reset_cont_sync[0]; + ipmi_bt_irq_bmc_reset_cont_sync[0] <= ipmi_bt_irq_bmc_reset_cont; + ipmi_bt_bmc_to_host_ctl_b_busy_sync[1] <= ipmi_bt_bmc_to_host_ctl_b_busy_sync[0]; + ipmi_bt_bmc_to_host_ctl_b_busy_sync[0] <= ipmi_bt_bmc_to_host_ctl_b_busy; + ipmi_bt_irq_req_sync[1] <= ipmi_bt_irq_req_sync[0]; + ipmi_bt_irq_req_sync[0] <= ipmi_bt_irq_req; + end +endmodule \ No newline at end of file diff --git a/constraints/arctic-tern.lpf b/constraints/arctic-tern.lpf index 958b762..29f2a25 100644 --- a/constraints/arctic-tern.lpf +++ b/constraints/arctic-tern.lpf @@ -207,6 +207,24 @@ IOBUF PORT "spi_flash_miso" IO_TYPE=LVCMOS33; IOBUF PORT "spi_flash_wp_n" IO_TYPE=LVCMOS33; IOBUF PORT "spi_flash_hold_n" IO_TYPE=LVCMOS33; +# LPC slave +LOCATE COMP "lpc_slave_data[0]" SITE "C4"; +LOCATE COMP "lpc_slave_data[1]" SITE "A3"; +LOCATE COMP "lpc_slave_data[2]" SITE "B4"; +LOCATE COMP "lpc_slave_data[3]" SITE "B3"; +LOCATE COMP "lpc_slave_serirq" SITE "F4"; +LOCATE COMP "lpc_slave_frame_n" SITE "D3"; +LOCATE COMP "lpc_slave_reset_n" SITE "C3"; +LOCATE COMP "lpc_slave_clock" SITE "H5"; +IOBUF PORT "lpc_slave_data[0]" IO_TYPE=LVCMOS33; +IOBUF PORT "lpc_slave_data[1]" IO_TYPE=LVCMOS33; +IOBUF PORT "lpc_slave_data[2]" IO_TYPE=LVCMOS33; +IOBUF PORT "lpc_slave_data[3]" IO_TYPE=LVCMOS33; +IOBUF PORT "lpc_slave_irq" IO_TYPE=LVCMOS33; +IOBUF PORT "lpc_slave_frame_n" IO_TYPE=LVCMOS33; +IOBUF PORT "lpc_slave_reset_n" IO_TYPE=LVCMOS33; +IOBUF PORT "lpc_slave_clock" IO_TYPE=LVCMOS33; + LOCATE COMP "dvo_r[0]" SITE "C14"; LOCATE COMP "dvo_r[1]" SITE "E14"; LOCATE COMP "dvo_r[2]" SITE "D14"; diff --git a/fpga/top-rcs-arctic-tern-bmc-card.vhdl b/fpga/top-rcs-arctic-tern-bmc-card.vhdl index 3b02aa3..e05809b 100644 --- a/fpga/top-rcs-arctic-tern-bmc-card.vhdl +++ b/fpga/top-rcs-arctic-tern-bmc-card.vhdl @@ -23,6 +23,7 @@ entity toplevel is LOG_LENGTH : natural := 0; USE_LITEETH : boolean := true; USE_TERCEL : boolean := true; + USE_LPC_SLAVE : boolean := true; UART_IS_16550 : boolean := true; HAS_UART1 : boolean := false; ICACHE_NUM_LINES : natural := 64 @@ -42,6 +43,13 @@ entity toplevel is spi_flash_wp_n : inout std_ulogic; spi_flash_hold_n : inout std_ulogic; + -- LPC slave + lpc_slave_data : inout std_ulogic_vector(3 downto 0); + lpc_slave_serirq : inout std_ulogic; + lpc_slave_frame_n : in std_ulogic; + lpc_slave_reset_n : in std_ulogic; + lpc_slave_clock : in std_ulogic; + -- Ethernet eth_clocks_tx : out std_ulogic; eth_clocks_rx : in std_ulogic; @@ -89,6 +97,7 @@ architecture behaviour of toplevel is signal wb_ext_is_dram_init : std_ulogic; signal wb_ext_is_eth : std_ulogic; signal wb_ext_is_tercel : std_ulogic; + signal wb_ext_is_lpc_slave : std_ulogic; -- DRAM main data wishbone connection signal wb_dram_in : wishbone_master_out; @@ -104,6 +113,9 @@ architecture behaviour of toplevel is -- Tercel connection signal wb_tercel_out : wb_io_slave_out := wb_io_slave_out_init; + -- Aquila connection + signal wb_lpc_slave_out : wb_io_slave_out := wb_io_slave_out_init; + -- Control/status signal core_alt_reset : std_ulogic; @@ -114,10 +126,24 @@ architecture behaviour of toplevel is signal spi_sdat_oe : std_ulogic_vector(3 downto 0); signal spi_sdat_i : std_ulogic_vector(3 downto 0); + -- LPC slave bus + signal lpc_sdat_o : std_ulogic_vector(3 downto 0); + signal lpc_slave_oe : std_ulogic; + signal lpc_sdat_i : std_ulogic_vector(3 downto 0); + signal lpc_sirq_o : std_ulogic; + signal lpc_sirq_oe : std_ulogic; + signal lpc_sirq_i : std_ulogic; + signal lpc_sframe_n : std_ulogic; + signal lpc_sreset_n : std_ulogic; + signal lpc_sclock : std_ulogic; + -- SPI main data wishbone connection signal wb_spiflash_in : wb_io_master_out; signal wb_spiflash_out : wb_io_slave_out; + signal wb_master_lpc_slave_in : wishbone_slave_out; + signal wb_master_lpc_slave_out : wishbone_master_out; + -- Fixup various memory sizes based on generics function get_bram_size return natural is begin @@ -146,9 +172,21 @@ architecture behaviour of toplevel is USRMCLKTS : IN STD_ULOGIC ); END COMPONENT; + attribute syn_noprune: boolean ; attribute syn_noprune of USRMCLK: component is true; + component aquila_lpc_sdr_tristate + port( + i : out std_ulogic; + oe : in std_ulogic; + o : in std_ulogic; + clk : in std_ulogic; + p : inout std_ulogic + ); + end component; + attribute syn_noprune of aquila_lpc_sdr_tristate: component is true; + begin -- Main SoC @@ -171,6 +209,7 @@ begin LOG_LENGTH => LOG_LENGTH, HAS_LITEETH => USE_LITEETH, HAS_TERCEL => USE_TERCEL, + HAS_LPC_SLAVE => USE_LPC_SLAVE, UART0_IS_16550 => UART_IS_16550, HAS_UART1 => HAS_UART1, ICACHE_NUM_LINES => ICACHE_NUM_LINES @@ -198,10 +237,14 @@ begin wb_ext_is_dram_init => wb_ext_is_dram_init, wb_ext_is_eth => wb_ext_is_eth, wb_ext_is_tercel => wb_ext_is_tercel, + wb_ext_is_lpc_slave => wb_ext_is_lpc_slave, wb_spiflash_in => wb_spiflash_in, wb_spiflash_out => wb_spiflash_out, + wb_lpc_slave_in => wb_master_lpc_slave_in, + wb_lpc_slave_out => wb_master_lpc_slave_out, + alt_reset => core_alt_reset ); @@ -495,9 +538,148 @@ begin end generate; + -- LPC slave + -- + lpc_sframe_n <= lpc_slave_frame_n; + lpc_sreset_n <= lpc_slave_reset_n; + lpc_sclock <= lpc_slave_clock; + + lpc_sdat_buf_0: aquila_lpc_sdr_tristate port map ( + o => lpc_sdat_o(0), + oe => lpc_slave_oe, + i => lpc_sdat_i(0), + clk => lpc_sclock, + p => lpc_slave_data(0) + ); + + lpc_sdat_buf_1: aquila_lpc_sdr_tristate port map ( + o => lpc_sdat_o(1), + oe => lpc_slave_oe, + i => lpc_sdat_i(1), + clk => lpc_sclock, + p => lpc_slave_data(1) + ); + + lpc_sdat_buf_2: aquila_lpc_sdr_tristate port map ( + o => lpc_sdat_o(2), + oe => lpc_slave_oe, + i => lpc_sdat_i(2), + clk => lpc_sclock, + p => lpc_slave_data(2) + ); + + lpc_sdat_buf_3: aquila_lpc_sdr_tristate port map ( + o => lpc_sdat_o(3), + oe => lpc_slave_oe, + i => lpc_sdat_i(3), + clk => lpc_sclock, + p => lpc_slave_data(3) + ); + + lpc_sirq_buf: aquila_lpc_sdr_tristate port map ( + o => lpc_sirq_o, + oe => lpc_sirq_oe, + i => lpc_sirq_i, + clk => lpc_sclock, + p => lpc_slave_serirq + ); + + has_lpc_slave : if USE_LPC_SLAVE generate + + component aquila_lpc_slave_wishbone port ( + peripheral_clock : in std_ulogic; + peripheral_reset : in std_ulogic; + + lpc_data_out : out std_ulogic_vector(3 downto 0); + lpc_data_in : in std_ulogic_vector(3 downto 0); + lpc_data_direction : out std_ulogic; + lpc_irq_out : out std_ulogic; + lpc_irq_in : in std_ulogic; + lpc_irq_direction : out std_ulogic; + + lpc_frame_n : in std_ulogic; + lpc_reset_n : in std_ulogic; + lpc_clock : in std_ulogic; + + slave_wishbone_adr : in std_ulogic_vector(29 downto 0); + slave_wishbone_dat_w : in std_ulogic_vector(31 downto 0); + slave_wishbone_dat_r : out std_ulogic_vector(31 downto 0); + slave_wishbone_sel : in std_ulogic_vector(3 downto 0); + slave_wishbone_cyc : in std_ulogic; + slave_wishbone_stb : in std_ulogic; + slave_wishbone_ack : out std_ulogic; + slave_wishbone_we : in std_ulogic; + slave_wishbone_err : out std_ulogic; + + master_wishbone_adr : out std_ulogic_vector(wishbone_addr_bits-1 downto 0); + master_wishbone_dat_w : out std_ulogic_vector(wishbone_data_bits-1 downto 0); + master_wishbone_dat_r : in std_ulogic_vector(wishbone_data_bits-1 downto 0); + master_wishbone_sel : out std_ulogic_vector(wishbone_sel_bits-1 downto 0); + master_wishbone_cyc : out std_ulogic; + master_wishbone_stb : out std_ulogic; + master_wishbone_ack : in std_ulogic; + master_wishbone_we : out std_ulogic; + master_wishbone_err : in std_ulogic; + + debug_port : out std_ulogic_vector(15 downto 0); + lpc_clock_mirror : out std_ulogic + ); + end component; + + signal wb_lpc_slave_cyc : std_ulogic; + + begin + lpc_slave : aquila_lpc_slave_wishbone + port map( + peripheral_clock => system_clk, + peripheral_reset => soc_rst, + + lpc_data_out => lpc_sdat_o, + lpc_data_in => lpc_sdat_i, + lpc_data_direction => lpc_slave_oe, + lpc_irq_out => lpc_sirq_o, + lpc_irq_in => lpc_sirq_i, + lpc_irq_direction => lpc_sirq_oe, + lpc_frame_n => lpc_sframe_n, + lpc_reset_n => lpc_sreset_n, + lpc_clock => lpc_sclock, + + master_wishbone_adr => wb_master_lpc_slave_out.adr, + master_wishbone_dat_w => wb_master_lpc_slave_out.dat, + master_wishbone_dat_r => wb_master_lpc_slave_in.dat, + master_wishbone_sel => wb_master_lpc_slave_out.sel, + master_wishbone_cyc => wb_master_lpc_slave_out.cyc, + master_wishbone_stb => wb_master_lpc_slave_out.stb, + master_wishbone_ack => wb_master_lpc_slave_in.ack, + master_wishbone_we => wb_master_lpc_slave_out.we, + master_wishbone_err => '0', + + slave_wishbone_adr => wb_ext_io_in.adr, + slave_wishbone_dat_w => wb_ext_io_in.dat, + slave_wishbone_dat_r => wb_lpc_slave_out.dat, + slave_wishbone_sel => wb_ext_io_in.sel, + slave_wishbone_cyc => wb_lpc_slave_cyc, + slave_wishbone_stb => wb_ext_io_in.stb, + slave_wishbone_ack => wb_lpc_slave_out.ack, + slave_wishbone_we => wb_ext_io_in.we, + slave_wishbone_err => open, + + debug_port => open, + lpc_clock_mirror => open + ); + + -- Gate cyc with "chip select" from soc + wb_lpc_slave_cyc <= wb_ext_io_in.cyc and wb_ext_is_lpc_slave; + + -- Aquila isn't pipelined + wb_lpc_slave_out.stall <= not wb_lpc_slave_out.ack; + + end generate; + -- Mux WB response on the IO bus wb_ext_io_out <= wb_eth_out when wb_ext_is_eth = '1' else wb_tercel_out when wb_ext_is_tercel = '1' else + wb_lpc_slave_out when wb_ext_is_lpc_slave = '1' else wb_dram_ctrl_out; end architecture behaviour; diff --git a/soc.vhdl b/soc.vhdl index 507a146..0295848 100644 --- a/soc.vhdl +++ b/soc.vhdl @@ -67,6 +67,7 @@ entity soc is LOG_LENGTH : natural := 512; HAS_LITEETH : boolean := false; HAS_TERCEL : boolean := false; + HAS_LPC_SLAVE : boolean := false; UART0_IS_16550 : boolean := true; HAS_UART1 : boolean := false; ICACHE_NUM_LINES : natural := 64; @@ -92,6 +93,7 @@ entity soc is wb_ext_is_dram_init : out std_ulogic; wb_ext_is_eth : out std_ulogic; wb_ext_is_tercel : out std_ulogic; + wb_ext_is_lpc_slave : out std_ulogic; -- External interrupts ext_irq_eth : in std_ulogic := '0'; @@ -108,6 +110,10 @@ entity soc is wb_spiflash_in : out wb_io_master_out; wb_spiflash_out : in wb_io_slave_out := wb_io_slave_out_init; + -- LPC slave DMA signals + wb_lpc_slave_in : out wishbone_slave_out; + wb_lpc_slave_out : in wishbone_master_out; + -- DRAM controller signals alt_reset : in std_ulogic := '0' ); @@ -122,10 +128,12 @@ architecture behaviour of soc is signal wishbone_icore_out : wishbone_master_out; signal wishbone_debug_in : wishbone_slave_out; signal wishbone_debug_out : wishbone_master_out; + signal wishbone_lpc_in : wishbone_slave_out; + signal wishbone_lpc_out : wishbone_master_out; -- Arbiter array (ghdl doesnt' support assigning the array -- elements in the entity instantiation) - constant NUM_WB_MASTERS : positive := 3; + constant NUM_WB_MASTERS : positive := 4; signal wb_masters_out : wishbone_master_out_vector(0 to NUM_WB_MASTERS-1); signal wb_masters_in : wishbone_slave_out_vector(0 to NUM_WB_MASTERS-1); @@ -288,10 +296,12 @@ begin -- Wishbone bus master arbiter & mux wb_masters_out <= (0 => wishbone_dcore_out, 1 => wishbone_icore_out, - 2 => wishbone_debug_out); + 2 => wishbone_debug_out, + 3 => wishbone_lpc_out); wishbone_dcore_in <= wb_masters_in(0); wishbone_icore_in <= wb_masters_in(1); wishbone_debug_in <= wb_masters_in(2); + wishbone_lpc_in <= wb_masters_in(3); wishbone_arbiter_0: entity work.wishbone_arbiter generic map( NUM_MASTERS => NUM_WB_MASTERS @@ -590,6 +600,9 @@ begin elsif wb_sio_out.adr(23 downto 16) = x"03" and HAS_LITEETH then wb_ext_is_eth <= '1'; ext_valid := true; + elsif wb_sio_out.adr(21 downto 14) = x"04" and HAS_LPC_SLAVE then + wb_ext_is_lpc_slave <= '1'; + ext_valid := true; elsif wb_sio_out.adr(21 downto 14) = x"05" and HAS_TERCEL then wb_ext_is_tercel <= '1'; ext_valid := true; @@ -900,9 +913,11 @@ begin assert not(is_x(wb_masters_out(0).cyc)) and not(is_x(wb_masters_out(0).stb)) severity failure; assert not(is_x(wb_masters_out(1).cyc)) and not(is_x(wb_masters_out(1).stb)) severity failure; assert not(is_x(wb_masters_out(2).cyc)) and not(is_x(wb_masters_out(2).stb)) severity failure; + assert not(is_x(wb_masters_out(3).cyc)) and not(is_x(wb_masters_out(3).stb)) severity failure; assert not(is_x(wb_masters_in(0).ack)) severity failure; assert not(is_x(wb_masters_in(1).ack)) severity failure; assert not(is_x(wb_masters_in(2).ack)) severity failure; + assert not(is_x(wb_masters_in(3).ack)) severity failure; -- Main memory wishbones assert not(is_x(wb_bram_in.cyc)) and not (is_x(wb_bram_in.stb)) severity failure; @@ -932,4 +947,8 @@ begin end process; --pragma synthesis_on + -- LPC slave DMA + wb_lpc_slave_in <= wishbone_lpc_in; + wishbone_lpc_out <= wb_lpc_slave_out; + end architecture behaviour;