[WIP] Add initial version of Aquila LPC slave core
[microwatt.git] / aquila / wishbone_lpc_slave_interface.v
1 // © 2017 - 2022 Raptor Engineering, LLC
2 //
3 // Released under the terms of the GPL v3
4 // See the LICENSE file for full details
5 //
6 // This LPC slave peripheral currently implements I/O, TPM, and firmware memory read/write functionality
7 // An optional DMA engine is provided to accelerate LPC firmware cycle transfers
8 //
9 // The LPC firmware cycle DMA engine provides configurable access protection on 32-bit (word) boundaries -- short word and byte access that overlaps an allowed range at 32-bit granularity will also be allowed.
10 // As a result, it is recommended to keep a single word safety buffer before and after an active LPC firmware DMA target region in memory.
11 //
12 // =============================================================================================
13 // Memory Map:
14 // =============================================================================================
15 // Device ID string (8 bytes)
16 // Version register (4 bytes): {24'b0, version}
17 // Control register 1 (4 bytes): {12'b0, lpc_io_cycle_irq_enable, lpc_tpm_cycle_irq_enable, lpc_firmware_cycle_irq_enable, ipmi_bt_bmc_irq_enable, ipmi_bt_port_address, ipmi_bt_alt_irq, enable_ipmi_bt, enable_vuart2, enable_vuart1, enable_io_cycles, enable_tpm_cycles, enable_firmware_cycles, enable_irq}
18 // Control register 2 (4 bytes): {16'b0, data_out, 6'b0, signal_error, continue_transfer}
19 // Range config 1 (4 bytes): {enable_range, range_type, 1'b0, start_address}
20 // Range end 1 (4 bytes): {4'b0, end_address}
21 // Range config 2 (4 bytes): {enable_range, range_type, 1'b0, start_address}
22 // Range end 2 (4 bytes): {4'b0, end_address}
23 // Range config 3 (4 bytes): {enable_range, range_type, 1'b0, start_address}
24 // Range end 3 (4 bytes): {4'b0, end_address}
25 // Range config 4 (4 bytes): {enable_range, range_type, 1'b0, start_address}
26 // Range end 4 (4 bytes): {4'b0, end_address}
27 // Range config 5 (4 bytes): {enable_range, range_type, 1'b0, start_address}
28 // Range end 5 (4 bytes): {4'b0, end_address}
29 // Range config 6 (4 bytes): {enable_range, range_type, 1'b0, start_address}
30 // Range end 6 (4 bytes): {4'b0, end_address}
31 // DMA config 1 (4 bytes): {24'b0, lpc_fw_cycle_dma_idsel_filter, 1'b0, lpc_fw_cycle_dma_idsel_filter_enable, lpc_fw_cycle_dma_write_enable, lpc_fw_cycle_dma_read_enable}
32 // DMA config 2 (4 bytes): {lpc_fw_dma_base_address_low}
33 // DMA config 3 (4 bytes): {lpc_fw_dma_base_address_high}
34 // DMA config 4 (4 bytes): {lpc_fw_dma_length}
35 // DMA config 5 (4 bytes): {lpc_fw_dma_valid_window_start_offset}
36 // DMA config 6 (4 bytes): {lpc_fw_dma_valid_window_end_offset}
37 // DMA config 7 (4 bytes): {lpc_fw_dma_offset_address_mask}
38 // Status register 1 (4 bytes): {8'b0, pending_fw_cycle_idsel, pending_fw_cycle_msize, 11'b0, bus_in_reset, cycle_type, cycle_direction, attn_req}
39 // Status register 2 (4 bytes): {4'b0, pending_address}
40 // Status register 3 (4 bytes): {24'b0, pending_data}
41 // Status register 4 (4 bytes): {16'b0, 4'b0, vuart2_irq_source, vuart1_irq_source, 1'b0, lpc_io_cycle_irq_asserted, lpc_tpm_cycle_irq_asserted, lpc_firmware_cycle_irq_asserted, ipmi_bt_bmc_irq_asserted, vuart2_irq_asserted, vuart1_irq_asserted, irq_asserted}
42 // IPMI BT interface status register (4 bytes): {24'b0, BT_CTRL}
43
44 // Required by this wrapper module
45 `define LPC_SLAVE_DEBUG
46
47 `ifndef DISABLE_FIRMWARE_MEMORY_CYCLES
48 `define ENABLE_FIRMWARE_MEMORY_CYCLES
49 `endif
50
51 // Stop LiteX silently ignoring net naming / missing register errors
52 `default_nettype none
53
54 module aquila_lpc_slave_wishbone(
55 // Wishbone slave port signals
56 input wire slave_wishbone_cyc,
57 input wire slave_wishbone_stb,
58 input wire slave_wishbone_we,
59 input wire [29:0] slave_wishbone_adr,
60 input wire [31:0] slave_wishbone_dat_w,
61 output wire [31:0] slave_wishbone_dat_r,
62 input wire [3:0] slave_wishbone_sel,
63 output wire slave_wishbone_ack,
64 output wire slave_wishbone_err,
65 output wire slave_irq_o,
66
67 // Wishbone master port signals (DMA)
68 output wire master_wishbone_cyc,
69 output wire master_wishbone_stb,
70 output wire master_wishbone_we,
71 output wire [(WISHBONE_DMA_ADDR_BUS_WIDTH-1):0] master_wishbone_adr,
72 output wire [(WISHBONE_DMA_DATA_BUS_WIDTH-1):0] master_wishbone_dat_w,
73 input wire [(WISHBONE_DMA_DATA_BUS_WIDTH-1):0] master_wishbone_dat_r,
74 output wire [((WISHBONE_DMA_DATA_BUS_WIDTH/8)-1):0] master_wishbone_sel,
75 input wire master_wishbone_ack,
76 input wire master_wishbone_err,
77
78 // LPC core signals
79 output wire [3:0] lpc_data_out, // Must have a LPC-clocked register packed into the I/O buffer
80 input wire [3:0] lpc_data_in, // Must have a LPC-clocked register packed into the I/O buffer
81 output wire lpc_data_direction, // 0 == tristate (input), 1 == driven (output) [IOB packed]
82 output wire lpc_irq_out,
83 input wire lpc_irq_in,
84 output wire lpc_irq_direction, // 0 == tristate (input), 1 == driven (output) [IOB packed]
85
86 input wire lpc_frame_n,
87 input wire lpc_reset_n,
88 input wire lpc_clock,
89
90 output wire [15:0] debug_port,
91 output wire lpc_clock_mirror,
92
93 input wire peripheral_reset,
94 input wire peripheral_clock
95 );
96
97 // Wishbone master port parameters
98 parameter WISHBONE_DMA_ADDR_BUS_WIDTH = 32;
99 parameter WISHBONE_DMA_DATA_BUS_WIDTH = 64;
100
101 // VUART parameters
102 parameter VUART_IRQ_REASON_NONE = 0;
103 parameter VUART_IRQ_REASON_QUEUE_TRIGGER = 1;
104 parameter VUART_IRQ_REASON_QUEUE_TIMEOUT = 2;
105
106 // Control and status registers
107 wire [63:0] device_id;
108 wire [31:0] device_version;
109 reg [31:0] control_reg1 = 0;
110 reg [31:0] control_reg2 = 0;
111 reg [31:0] range_config1 = 0;
112 reg [31:0] range_end1 = 0;
113 reg [31:0] range_config2 = 0;
114 reg [31:0] range_end2 = 0;
115 reg [31:0] range_config3 = 0;
116 reg [31:0] range_end3 = 0;
117 reg [31:0] range_config4 = 0;
118 reg [31:0] range_end4 = 0;
119 reg [31:0] range_config5 = 0;
120 reg [31:0] range_end5 = 0;
121 reg [31:0] range_config6 = 0;
122 reg [31:0] range_end6 = 0;
123 reg [31:0] dma_config_reg1 = 0;
124 reg [31:0] dma_config_reg2 = 0;
125 reg [31:0] dma_config_reg3 = 0;
126 reg [31:0] dma_config_reg4 = 0;
127 reg [31:0] dma_config_reg5 = 0;
128 reg [31:0] dma_config_reg6 = 0;
129 reg [31:0] dma_config_reg7 = 0;
130 wire [31:0] status_reg1;
131 wire [31:0] status_reg2;
132 wire [31:0] status_reg3;
133 wire [31:0] status_reg4;
134 wire [31:0] ipmi_bt_status_reg;
135
136 // Device identifier
137 assign device_id = 64'h7c5250544c504353;
138 assign device_version = 32'h00010000;
139
140 // CSR connections
141 wire lpc_io_cycle_irq_enable;
142 wire lpc_tpm_cycle_irq_enable;
143 wire lpc_firmware_cycle_irq_enable;
144 wire ipmi_bt_bmc_irq_enable;
145 wire [7:0] ipmi_bt_port_address;
146 wire enable_vuart1;
147 wire enable_vuart2;
148 wire enable_ipmi_bt;
149 wire ipmi_bt_alt_irq;
150 wire [15:0] ipmi_bt_port_base_address;
151 wire enable_io_cycles;
152 wire enable_tpm_cycles;
153 wire enable_firmware_cycles;
154 wire enable_irq;
155 wire [7:0] data_out;
156 wire signal_error;
157 reg continue_transfer = 0; // Write 1 to continue, always reads 0
158 wire range_1_enable;
159 wire range_1_allow_io;
160 wire range_1_allow_tpm;
161 wire [27:0] range_1_start_address;
162 wire [27:0] range_1_end_address;
163 wire range_2_enable;
164 wire range_2_allow_io;
165 wire range_2_allow_tpm;
166 wire [27:0] range_2_start_address;
167 wire [27:0] range_2_end_address;
168 wire range_3_enable;
169 wire range_3_allow_io;
170 wire range_3_allow_tpm;
171 wire [27:0] range_3_start_address;
172 wire [27:0] range_3_end_address;
173 wire range_4_enable;
174 wire range_4_allow_io;
175 wire range_4_allow_tpm;
176 wire [27:0] range_4_start_address;
177 wire [27:0] range_4_end_address;
178 wire range_5_enable;
179 wire range_5_allow_io;
180 wire range_5_allow_tpm;
181 wire [27:0] range_5_start_address;
182 wire [27:0] range_5_end_address;
183 wire range_6_enable;
184 wire range_6_allow_io;
185 wire range_6_allow_tpm;
186 wire [27:0] range_6_start_address;
187 wire [27:0] range_6_end_address;
188 wire lpc_fw_cycle_dma_read_enable;
189 wire lpc_fw_cycle_dma_write_enable;
190 wire lpc_fw_cycle_dma_idsel_filter_enable;
191 wire [3:0] lpc_fw_cycle_dma_idsel_filter;
192 wire [63:0] lpc_fw_dma_base_address;
193 wire [31:0] lpc_fw_dma_length;
194 wire [31:0] lpc_fw_dma_valid_window_start_offset;
195 wire [31:0] lpc_fw_dma_valid_window_end_offset;
196 wire [31:0] lpc_fw_dma_offset_address_mask;
197 reg [1:0] cycle_type = 0;
198 reg cycle_direction = 0;
199 reg attn_req = 0;
200 reg [27:0] pending_address = 0;
201 reg [7:0] pending_data = 0;
202 reg [3:0] pending_fw_cycle_idsel = 0;
203 reg [3:0] pending_fw_cycle_msize = 0;
204
205 reg [(WISHBONE_DMA_ADDR_BUS_WIDTH-1):0] lpc_fw_dma_current_wb_address = 0;
206 reg [(WISHBONE_DMA_DATA_BUS_WIDTH-1):0] lpc_fw_dma_data_cache_reg = 0;
207 reg [8:0] lpc_fw_dma_current_buffer_address = 0;
208
209 wire lpc_io_cycle_irq_asserted;
210 wire lpc_tpm_cycle_irq_asserted;
211 wire lpc_firmware_cycle_irq_asserted;
212
213 reg lpc_io_cycle_irq = 0;
214 reg lpc_tpm_cycle_irq = 0;
215 reg lpc_firmware_cycle_irq = 0;
216
217 reg [1:0] vuart1_irq_source = 0;
218 reg [1:0] vuart2_irq_source = 0;
219
220 wire vuart1_irq_asserted;
221 wire vuart2_irq_asserted;
222 wire [4:0] vuart1_h2b_fifo_data_available_count;
223 wire [4:0] vuart2_h2b_fifo_data_available_count;
224
225 wire ipmi_bt_bmc_irq_asserted;
226
227 // IPMI BT_CTRL-defined signals
228 wire ipmi_bt_h2b_oem0_req;
229 wire ipmi_bt_sms_attn_req;
230 wire ipmi_bt_b2h_attn_req;
231 wire ipmi_bt_h2b_attn_req;
232 wire ipmi_bt_h2b_ctl_h_busy;
233 wire ipmi_bt_b2h_ctl_b_busy;
234
235 assign lpc_io_cycle_irq_enable = control_reg1[19];
236 assign lpc_tpm_cycle_irq_enable = control_reg1[18];
237 assign lpc_firmware_cycle_irq_enable = control_reg1[17];
238 assign ipmi_bt_bmc_irq_enable = control_reg1[16];
239 assign ipmi_bt_port_address = control_reg1[15:8];
240 assign ipmi_bt_alt_irq = control_reg1[7];
241 assign enable_ipmi_bt = control_reg1[6];
242 assign enable_vuart2 = control_reg1[5];
243 assign enable_vuart1 = control_reg1[4];
244 assign enable_io_cycles = control_reg1[3];
245 assign enable_tpm_cycles = control_reg1[2];
246 assign enable_firmware_cycles = control_reg1[1];
247 assign enable_irq = control_reg1[0];
248 assign data_out = control_reg2[15:8];
249 assign signal_error = control_reg2[1];
250 assign range_1_enable = range_config1[31];
251 assign range_1_allow_io = range_config1[30];
252 assign range_1_allow_tpm = range_config1[29];
253 assign range_1_start_address = range_config1[27:0];
254 assign range_1_end_address = range_end1[27:0];
255 assign range_2_enable = range_config2[31];
256 assign range_2_allow_io = range_config2[30];
257 assign range_2_allow_tpm = range_config2[29];
258 assign range_2_start_address = range_config2[27:0];
259 assign range_2_end_address = range_end2[27:0];
260 assign range_3_enable = range_config3[31];
261 assign range_3_allow_io = range_config3[30];
262 assign range_3_allow_tpm = range_config3[29];
263 assign range_3_start_address = range_config3[27:0];
264 assign range_3_end_address = range_end3[27:0];
265 assign range_4_enable = range_config4[31];
266 assign range_4_allow_io = range_config4[30];
267 assign range_4_allow_tpm = range_config4[29];
268 assign range_4_start_address = range_config4[27:0];
269 assign range_4_end_address = range_end4[27:0];
270 assign range_5_enable = range_config5[31];
271 assign range_5_allow_io = range_config5[30];
272 assign range_5_allow_tpm = range_config5[29];
273 assign range_5_start_address = range_config5[27:0];
274 assign range_5_end_address = range_end5[27:0];
275 assign range_6_enable = range_config6[31];
276 assign range_6_allow_io = range_config6[30];
277 assign range_6_allow_tpm = range_config6[29];
278 assign range_6_start_address = range_config6[27:0];
279 assign range_6_end_address = range_end6[27:0];
280 assign lpc_fw_cycle_dma_read_enable = dma_config_reg1[0];
281 assign lpc_fw_cycle_dma_write_enable = dma_config_reg1[1];
282 assign lpc_fw_cycle_dma_idsel_filter_enable = dma_config_reg1[2];
283 assign lpc_fw_cycle_dma_idsel_filter = dma_config_reg1[7:4];
284 assign lpc_fw_dma_base_address = {dma_config_reg3, dma_config_reg2[31:4], 4'b0000};
285 assign lpc_fw_dma_length = {dma_config_reg4[31:4], 4'b0000};
286 assign lpc_fw_dma_valid_window_start_offset = dma_config_reg5;
287 assign lpc_fw_dma_valid_window_end_offset = dma_config_reg6;
288 assign lpc_fw_dma_offset_address_mask = dma_config_reg7;
289 assign status_reg1 = {8'b0, pending_fw_cycle_idsel, pending_fw_cycle_msize, 11'b0, ~lpc_reset_n, cycle_type, cycle_direction, attn_req};
290 assign status_reg2 = {4'b0, pending_address};
291 assign status_reg3 = {24'b0, pending_data};
292 assign status_reg4 = {8'b0, (!vuart2_h2b_fifo_rempty && vuart2_h2b_fifo_data_available_count[3:0] == 0)?4'b1111:vuart2_h2b_fifo_data_available_count[3:0], (!vuart1_h2b_fifo_rempty && vuart1_h2b_fifo_data_available_count[3:0] == 0)?4'b1111:vuart1_h2b_fifo_data_available_count[3:0], 4'b0, vuart2_irq_source, vuart1_irq_source, 1'b0, lpc_io_cycle_irq_asserted, lpc_tpm_cycle_irq_asserted, lpc_firmware_cycle_irq_asserted, ipmi_bt_bmc_irq_asserted, vuart2_irq_asserted, vuart1_irq_asserted, slave_irq_o};
293 assign ipmi_bt_status_reg = {24'b0, ipmi_bt_b2h_ctl_b_busy, ipmi_bt_h2b_ctl_h_busy, ipmi_bt_h2b_oem0_req, ipmi_bt_sms_attn_req, ipmi_bt_b2h_attn_req, ipmi_bt_h2b_attn_req, 2'b00};
294
295 // Internal LPC interface signals
296 wire [27:0] lpc_slave_address;
297 reg [7:0] lpc_slave_tx_data = 0;
298 wire [7:0] lpc_slave_rx_data;
299 wire lpc_slave_tpm_cycle;
300 wire lpc_slave_firmware_cycle;
301 reg lpc_slave_continue = 0;
302 reg lpc_slave_data_ack = 0;
303 reg lpc_slave_signal_error = 0;
304 reg lpc_slave_exception_ack = 0;
305 wire lpc_slave_address_ready;
306 wire lpc_slave_data_ready;
307 wire lpc_slave_data_ready_cont;
308 wire lpc_slave_continue_cont;
309 wire [2:0] lpc_slave_exception;
310 wire lpc_slave_cycle_direction;
311
312 reg [16:0] irq_request = 0;
313 reg irq_tx_ready = 0;
314 wire irq_tx_queued;
315
316 reg [7:0] xfer_read_data_buffer = 0;
317 reg [7:0] xfer_write_data_buffer = 0;
318
319 reg lpc_fw_dma_cycle_active = 0;
320 reg lpc_fw_dma_cycle_inactive = 0;
321
322 reg [8:0] lpc_fw_input_xfer_mmio_write_addr = 0;
323 reg [7:0] lpc_fw_input_xfer_mmio_write_data = 0;
324 reg lpc_fw_input_xfer_mmio_write_wren = 0;
325 reg [8:0] lpc_fw_output_xfer_mmio_read_addr = 0;
326
327 reg [8:0] lpc_fw_input_xfer_dma_write_addr = 0;
328 reg [7:0] lpc_fw_input_xfer_dma_write_data = 0;
329 reg lpc_fw_input_xfer_dma_write_wren = 0;
330 reg [8:0] lpc_fw_output_xfer_dma_read_addr = 0;
331
332 wire [8:0] lpc_fw_input_xfer_write_addr;
333 wire [7:0] lpc_fw_input_xfer_write_data;
334 wire lpc_fw_input_xfer_write_wren;
335 wire [8:0] lpc_fw_output_xfer_read_addr;
336
337 assign lpc_fw_input_xfer_write_addr = (!lpc_fw_dma_cycle_active)?lpc_fw_input_xfer_mmio_write_addr:lpc_fw_input_xfer_dma_write_addr;
338 assign lpc_fw_input_xfer_write_data = (!lpc_fw_dma_cycle_active)?lpc_fw_input_xfer_mmio_write_data:lpc_fw_input_xfer_dma_write_data;
339 assign lpc_fw_input_xfer_write_wren = (!lpc_fw_dma_cycle_active)?lpc_fw_input_xfer_mmio_write_wren:lpc_fw_input_xfer_dma_write_wren;
340 assign lpc_fw_output_xfer_read_addr = (!lpc_fw_dma_cycle_active)?lpc_fw_output_xfer_mmio_read_addr:lpc_fw_output_xfer_dma_read_addr;
341
342 reg [8:0] lpc_fw_output_xfer_read_addr_prev = 0;
343 wire [7:0] lpc_fw_output_xfer_read_data;
344
345 reg [8:0] ipmi_bt_input_xfer_write_addr = 0;
346 reg [7:0] ipmi_bt_input_xfer_write_data = 0;
347 reg ipmi_bt_input_xfer_write_wren = 0;
348 reg [8:0] ipmi_bt_output_xfer_read_addr = 0;
349 reg [8:0] ipmi_bt_output_xfer_read_addr_prev = 0;
350 wire [7:0] ipmi_bt_output_xfer_read_data;
351
352 wire [3:0] lpc_slave_fw_idsel;
353 wire [3:0] lpc_slave_fw_msize;
354
355 wire [15:0] lpc_slave_debug_port;
356
357 wire [3:0] lpc_slave_lpc_data_out;
358 wire [3:0] lpc_slave_lpc_data_in;
359 wire lpc_slave_lpc_data_direction;
360
361 wire lpc_slave_lpc_irq_out;
362 wire lpc_slave_lpc_irq_in;
363 wire lpc_slave_lpc_irq_direction;
364
365 reg lpc_slave_lpc_irq_out_reg = 0;
366 reg lpc_slave_lpc_irq_direction_reg = 0;
367
368 (* syn_useioff *) reg lpc_slave_lpc_frame_n = 0; // Must be packed into the I/O buffer flip flops
369 reg lpc_slave_lpc_reset_n = 0;
370
371 // Status register format: {7'b0, b2h_fifo_wfull, fifos_enabled, 1'b0, rcvr_trigger, mcr, lcr};
372 // Control register format: {irqs_enabled, FIFO IRQ enabled, 18'b0, h2b_fifo_irq_trigger_level, 7'b0, assert_b2h_break}
373
374 wire [31:0] vuart1_status_register;
375 reg [31:0] vuart1_control_register = 0;
376 wire [31:0] vuart2_status_register;
377 reg [31:0] vuart2_control_register = 0;
378
379 wire vuart1_assert_b2h_break_clear;
380 wire vuart2_assert_b2h_break_clear;
381 reg vuart1_lcr_break_request = 0;
382 reg vuart2_lcr_break_request = 0;
383 reg vuart1_lcr_break_ack = 0;
384 reg vuart2_lcr_break_ack = 0;
385
386 wire [3:0] vuart1_h2b_fifo_irq_trigger_level;
387 wire [3:0] vuart2_h2b_fifo_irq_trigger_level;
388
389 assign vuart1_h2b_fifo_irq_trigger_level = vuart1_control_register[11:8];
390 assign vuart2_h2b_fifo_irq_trigger_level = vuart2_control_register[11:8];
391
392 // Wishbone to LPC core synchronizer registers
393 reg [1:0] peripheral_reset_sync = 0;
394 reg [1:0] lpc_slave_continue_sync = 0;
395 reg [1:0] lpc_slave_data_ack_sync = 0;
396 reg [1:0] lpc_slave_signal_error_sync = 0;
397 reg [1:0] lpc_slave_exception_ack_sync = 0;
398 reg [1:0] irq_tx_ready_sync = 0;
399 reg [16:0] irq_request_sync_1 = 0;
400 reg [16:0] irq_request_sync_0 = 0;
401 reg [31:0] vuart1_control_register_sync_1 = 0;
402 reg [31:0] vuart1_control_register_sync_0 = 0;
403 reg [31:0] vuart2_control_register_sync_1 = 0;
404 reg [31:0] vuart2_control_register_sync_0 = 0;
405
406 // LPC core to Wishbone synchronizer registers
407 reg [2:0] lpc_slave_address_ready_sync = 0;
408 reg [2:0] lpc_slave_data_ready_sync = 0;
409 reg [2:0] lpc_slave_exception_sync_2 = 0;
410 reg [2:0] lpc_slave_exception_sync_1 = 0;
411 reg [2:0] lpc_slave_exception_sync_0 = 0;
412 reg [2:0] lpc_slave_data_ready_cont_sync = 0;
413 reg [2:0] lpc_slave_continue_cont_sync = 0;
414 reg [2:0] lpc_reset_n_sync = 0;
415 reg [31:0] vuart1_status_register_sync_2 = 0;
416 reg [31:0] vuart1_status_register_sync_1 = 0;
417 reg [31:0] vuart1_status_register_sync_0 = 0;
418 reg [31:0] vuart2_status_register_sync_2 = 0;
419 reg [31:0] vuart2_status_register_sync_1 = 0;
420 reg [31:0] vuart2_status_register_sync_0 = 0;
421 reg [2:0] vuart1_assert_b2h_break_clear_sync = 0;
422 reg [2:0] vuart2_assert_b2h_break_clear_sync = 0;
423
424 // VUART FIFOs
425 wire vuart1_h2b_fifo_reset;
426
427 wire vuart1_h2b_fifo_wwren;
428 wire [7:0] vuart1_h2b_fifo_wdata;
429 wire vuart1_h2b_fifo_wfull;
430 wire vuart1_h2b_fifo_walmost_full;
431
432 reg vuart1_h2b_fifo_rpop = 0;
433 wire [7:0] vuart1_h2b_fifo_rdata;
434 wire vuart1_h2b_fifo_rempty;
435 wire vuart1_h2b_fifo_ralmost_empty;
436 wire [4:0] vuart1_h2b_fifo_rptr;
437 wire [4:0] vuart1_h2b_fifo_wptr;
438
439 reg [1:0] vuart1_h2b_fifo_rempty_sync = 0;
440 reg [2:0] vuart1_h2b_fifo_reset_sync = 0;
441
442 wire vuart1_irqs_enabled;
443 reg vuart1_h2b_fifo_queue_past_trigger = 0;
444 reg vuart1_h2b_fifo_read_timeout = 0;
445 reg vuart1_h2b_fifo_irq = 0;
446 wire vuart1_h2b_fifo_irq_enabled;
447 reg [15:0] vuart1_h2b_fifo_read_timeout_counter = 0;
448
449 // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals!
450 // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately
451 // at or above the configured data queue threshold.
452 assign vuart1_h2b_fifo_data_available_count = (vuart1_h2b_fifo_rptr > vuart1_h2b_fifo_wptr)?(vuart1_h2b_fifo_wptr-vuart1_h2b_fifo_rptr):((vuart1_h2b_fifo_wptr+16)-vuart1_h2b_fifo_rptr);
453
454 async_fifo #(
455 .DSIZE(8),
456 .ASIZE(4),
457 .FALLTHROUGH("TRUE")
458 ) vuart1_h2b_fifo (
459 .wclk(lpc_clock),
460 .wrst_n(!peripheral_reset && lpc_slave_lpc_reset_n && !vuart1_h2b_fifo_reset),
461 .winc(vuart1_h2b_fifo_wwren),
462 .wdata(vuart1_h2b_fifo_wdata),
463 .wfull(vuart1_h2b_fifo_wfull),
464 .awfull(vuart1_h2b_fifo_walmost_full),
465 .rclk(peripheral_clock),
466 .rrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart1_h2b_fifo_reset_sync[2]),
467 .rinc(vuart1_h2b_fifo_rpop),
468 .rdata(vuart1_h2b_fifo_rdata),
469 .rempty(vuart1_h2b_fifo_rempty),
470 .arempty(vuart1_h2b_fifo_ralmost_empty),
471 .rclk_rptr(vuart1_h2b_fifo_rptr),
472 .rclk_wptr(vuart1_h2b_fifo_wptr)
473 );
474
475 wire vuart1_b2h_fifo_reset;
476
477 reg vuart1_b2h_fifo_wwren = 0;
478 reg [7:0] vuart1_b2h_fifo_wdata = 0;
479 wire vuart1_b2h_fifo_wfull;
480 wire vuart1_b2h_fifo_walmost_full;
481
482 wire vuart1_b2h_fifo_rpop;
483 wire [7:0] vuart1_b2h_fifo_rdata;
484 wire vuart1_b2h_fifo_rempty;
485 wire vuart1_b2h_fifo_ralmost_empty;
486 wire [4:0] vuart1_b2h_fifo_rptr;
487 wire [4:0] vuart1_b2h_fifo_wptr;
488
489 reg [2:0] vuart1_b2h_fifo_wfull_sync = 0;
490 reg [2:0] vuart1_b2h_fifo_reset_sync = 0;
491
492 // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals!
493 // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately
494 // at or above the configured data queue threshold.
495 wire [4:0] vuart1_b2h_fifo_data_available_count;
496 assign vuart1_b2h_fifo_data_available_count = (vuart1_b2h_fifo_rptr > vuart1_b2h_fifo_wptr)?(vuart1_b2h_fifo_wptr-vuart1_b2h_fifo_rptr):((vuart1_b2h_fifo_wptr+16)-vuart1_b2h_fifo_rptr);
497
498 async_fifo #(
499 .DSIZE(8),
500 .ASIZE(4),
501 .FALLTHROUGH("TRUE")
502 ) vuart1_b2h_fifo (
503 .wclk(peripheral_clock),
504 .wrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart1_b2h_fifo_reset_sync[2]),
505 .winc(vuart1_b2h_fifo_wwren),
506 .wdata(vuart1_b2h_fifo_wdata),
507 .wfull(vuart1_b2h_fifo_wfull),
508 .awfull(vuart1_b2h_fifo_walmost_full),
509 .rclk(lpc_clock),
510 .rrst_n(!peripheral_reset_sync[1] && lpc_slave_lpc_reset_n && !vuart1_b2h_fifo_reset),
511 .rinc(vuart1_b2h_fifo_rpop),
512 .rdata(vuart1_b2h_fifo_rdata),
513 .rempty(vuart1_b2h_fifo_rempty),
514 .arempty(vuart1_b2h_fifo_ralmost_empty),
515 .rclk_rptr(vuart1_b2h_fifo_rptr),
516 .rclk_wptr(vuart1_b2h_fifo_wptr)
517 );
518
519 wire vuart2_h2b_fifo_reset;
520
521 wire vuart2_h2b_fifo_wwren;
522 wire [7:0] vuart2_h2b_fifo_wdata;
523 wire vuart2_h2b_fifo_wfull;
524 wire vuart2_h2b_fifo_walmost_full;
525
526 reg vuart2_h2b_fifo_rpop = 0;
527 wire [7:0] vuart2_h2b_fifo_rdata;
528 wire vuart2_h2b_fifo_rempty;
529 wire vuart2_h2b_fifo_ralmost_empty;
530 wire [4:0] vuart2_h2b_fifo_rptr;
531 wire [4:0] vuart2_h2b_fifo_wptr;
532
533 reg [1:0] vuart2_h2b_fifo_rempty_sync = 0;
534 reg [2:0] vuart2_h2b_fifo_reset_sync = 0;
535
536 wire vuart2_irqs_enabled;
537 reg vuart2_h2b_fifo_queue_past_trigger = 0;
538 reg vuart2_h2b_fifo_read_timeout = 0;
539 reg vuart2_h2b_fifo_irq = 0;
540 wire vuart2_h2b_fifo_irq_enabled;
541 reg [15:0] vuart2_h2b_fifo_read_timeout_counter = 0;
542
543 // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals!
544 // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately
545 // at or above the configured data queue threshold.
546 assign vuart2_h2b_fifo_data_available_count = (vuart2_h2b_fifo_rptr > vuart2_h2b_fifo_wptr)?(vuart2_h2b_fifo_wptr-vuart2_h2b_fifo_rptr):((vuart2_h2b_fifo_wptr+16)-vuart2_h2b_fifo_rptr);
547
548 async_fifo #(
549 .DSIZE(8),
550 .ASIZE(4),
551 .FALLTHROUGH("TRUE")
552 ) vuart2_h2b_fifo (
553 .wclk(lpc_clock),
554 .wrst_n(!peripheral_reset && lpc_slave_lpc_reset_n && !vuart2_h2b_fifo_reset),
555 .winc(vuart2_h2b_fifo_wwren),
556 .wdata(vuart2_h2b_fifo_wdata),
557 .wfull(vuart2_h2b_fifo_wfull),
558 .awfull(vuart2_h2b_fifo_walmost_full),
559 .rclk(peripheral_clock),
560 .rrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart2_h2b_fifo_reset_sync[2]),
561 .rinc(vuart2_h2b_fifo_rpop),
562 .rdata(vuart2_h2b_fifo_rdata),
563 .rempty(vuart2_h2b_fifo_rempty),
564 .arempty(vuart2_h2b_fifo_ralmost_empty),
565 .rclk_rptr(vuart2_h2b_fifo_rptr),
566 .rclk_wptr(vuart2_h2b_fifo_wptr)
567 );
568
569 wire vuart2_b2h_fifo_reset;
570
571 reg vuart2_b2h_fifo_wwren = 0;
572 reg [7:0] vuart2_b2h_fifo_wdata = 0;
573 wire vuart2_b2h_fifo_wfull;
574 wire vuart2_b2h_fifo_walmost_full;
575
576 wire vuart2_b2h_fifo_rpop;
577 wire [7:0] vuart2_b2h_fifo_rdata;
578 wire vuart2_b2h_fifo_rempty;
579 wire vuart2_b2h_fifo_ralmost_empty;
580 wire [4:0] vuart2_b2h_fifo_rptr;
581 wire [4:0] vuart2_b2h_fifo_wptr;
582
583 reg [2:0] vuart2_b2h_fifo_wfull_sync = 0;
584 reg [2:0] vuart2_b2h_fifo_reset_sync = 0;
585
586 // This is an "advisory" signal -- it is NOT to be used in lieu of the guaranteed reliable empty / full signals!
587 // The main purpose of this signal is simply to allow the VUART IRQ to be asserted while the FIFO is approximately
588 // at or above the configured data queue threshold.
589 wire [4:0] vuart2_b2h_fifo_data_available_count;
590 assign vuart2_b2h_fifo_data_available_count = (vuart2_b2h_fifo_rptr > vuart2_b2h_fifo_wptr)?(vuart2_b2h_fifo_wptr-vuart2_b2h_fifo_rptr):((vuart2_b2h_fifo_wptr+16)-vuart2_b2h_fifo_rptr);
591
592 async_fifo #(
593 .DSIZE(8),
594 .ASIZE(4),
595 .FALLTHROUGH("TRUE")
596 ) vuart2_b2h_fifo (
597 .wclk(peripheral_clock),
598 .wrst_n(!peripheral_reset && lpc_reset_n_sync[2] && !vuart2_b2h_fifo_reset_sync[2]),
599 .winc(vuart2_b2h_fifo_wwren),
600 .wdata(vuart2_b2h_fifo_wdata),
601 .wfull(vuart2_b2h_fifo_wfull),
602 .awfull(vuart2_b2h_fifo_walmost_full),
603 .rclk(lpc_clock),
604 .rrst_n(!peripheral_reset_sync[1] && lpc_slave_lpc_reset_n && !vuart2_b2h_fifo_reset),
605 .rinc(vuart2_b2h_fifo_rpop),
606 .rdata(vuart2_b2h_fifo_rdata),
607 .rempty(vuart2_b2h_fifo_rempty),
608 .arempty(vuart2_b2h_fifo_ralmost_empty),
609 .rclk_rptr(vuart2_b2h_fifo_rptr),
610 .rclk_wptr(vuart2_b2h_fifo_wptr)
611 );
612
613 // IPMI BT signals
614 wire ipmi_bt_bmc_to_host_ctl_sms_ack;
615 wire ipmi_bt_bmc_to_host_ctl_attn_ack;
616 wire ipmi_bt_host_to_bmc_ctl_attn_req;
617 wire ipmi_bt_host_to_bmc_ctl_oem0_req;
618 wire ipmi_bt_irq_ack;
619 wire ipmi_bt_irq_bmc_reset;
620 wire ipmi_bt_host_to_bmc_ctl_h_busy;
621 wire ipmi_bt_irq_enable;
622
623 reg ipmi_bt_bmc_to_host_ctl_sms_req = 0;
624 reg ipmi_bt_bmc_to_host_ctl_attn_req = 0;
625 reg ipmi_bt_bmc_to_host_ctl_sms_ack_cont = 0;
626 reg ipmi_bt_bmc_to_host_ctl_attn_ack_cont = 0;
627 reg ipmi_bt_host_to_bmc_ctl_attn_req_cont = 0;
628 reg ipmi_bt_host_to_bmc_ctl_oem0_req_cont = 0;
629 reg ipmi_bt_irq_ack_cont = 0;
630 reg ipmi_bt_irq_bmc_reset_cont = 0;
631 reg ipmi_bt_bmc_to_host_ctl_b_busy = 0;
632 reg ipmi_bt_irq_req = 0;
633
634 reg ipmi_bt_bmc_irq = 0;
635
636 reg [1:0] ipmi_bt_bmc_to_host_ctl_sms_req_sync = 0;
637 reg [1:0] ipmi_bt_bmc_to_host_ctl_attn_req_sync = 0;
638 reg [1:0] ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync = 0;
639 reg [1:0] ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync = 0;
640 reg [1:0] ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync = 0;
641 reg [1:0] ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync = 0;
642 reg [1:0] ipmi_bt_irq_ack_cont_sync = 0;
643 reg [1:0] ipmi_bt_irq_bmc_reset_cont_sync = 0;
644 reg [1:0] ipmi_bt_bmc_to_host_ctl_b_busy_sync = 0;
645 reg [1:0] ipmi_bt_irq_req_sync = 0;
646
647 reg [2:0] ipmi_bt_bmc_to_host_ctl_sms_ack_sync = 0;
648 reg [2:0] ipmi_bt_bmc_to_host_ctl_attn_ack_sync = 0;
649 reg [2:0] ipmi_bt_host_to_bmc_ctl_attn_req_sync = 0;
650 reg [2:0] ipmi_bt_host_to_bmc_ctl_oem0_req_sync = 0;
651 reg [2:0] ipmi_bt_irq_ack_sync = 0;
652 reg [2:0] ipmi_bt_irq_bmc_reset_sync = 0;
653 reg [2:0] ipmi_bt_host_to_bmc_ctl_h_busy_sync = 0;
654 reg [2:0] ipmi_bt_irq_enable_sync = 0;
655
656 reg ipmi_bt_bmc_to_host_ctl_attn_req_prev = 0;
657 reg ipmi_bt_bmc_to_host_ctl_sms_req_prev = 0;
658 reg ipmi_bt_h2b_oem0_req_prev = 0;
659
660 assign ipmi_bt_h2b_oem0_req = ipmi_bt_host_to_bmc_ctl_oem0_req_sync[2];
661 assign ipmi_bt_sms_attn_req = ipmi_bt_bmc_to_host_ctl_sms_req;
662 assign ipmi_bt_b2h_attn_req = ipmi_bt_bmc_to_host_ctl_attn_req;
663 assign ipmi_bt_h2b_attn_req = ipmi_bt_host_to_bmc_ctl_attn_req_sync[2];
664 assign ipmi_bt_h2b_ctl_h_busy = ipmi_bt_host_to_bmc_ctl_h_busy_sync[2];
665 assign ipmi_bt_b2h_ctl_b_busy = ipmi_bt_bmc_to_host_ctl_b_busy;
666
667 // IRQ control
668 assign vuart1_irqs_enabled = vuart1_control_register[31];
669 assign vuart1_h2b_fifo_irq_enabled = vuart1_control_register[30];
670 assign vuart2_irqs_enabled = vuart2_control_register[31];
671 assign vuart2_h2b_fifo_irq_enabled = vuart2_control_register[30];
672
673 // IRQ connections
674 assign vuart1_irq_asserted = vuart1_h2b_fifo_irq;
675 assign vuart2_irq_asserted = vuart2_h2b_fifo_irq;
676 assign ipmi_bt_bmc_irq_asserted = ipmi_bt_bmc_irq;
677 assign lpc_io_cycle_irq_asserted = lpc_io_cycle_irq;
678 assign lpc_tpm_cycle_irq_asserted = lpc_tpm_cycle_irq;
679 assign lpc_firmware_cycle_irq_asserted = lpc_firmware_cycle_irq;
680 assign slave_irq_o = lpc_io_cycle_irq_asserted | lpc_tpm_cycle_irq_asserted | lpc_firmware_cycle_irq_asserted | vuart1_irq_asserted | vuart2_irq_asserted | ipmi_bt_bmc_irq_asserted;
681
682 // Instantiate slave
683 lpc_slave_interface lpc_slave_interface(
684 .address(lpc_slave_address),
685 .tx_data(lpc_slave_tx_data),
686 .rx_data(lpc_slave_rx_data),
687 .tpm_cycle(lpc_slave_tpm_cycle),
688 .firmware_cycle(lpc_slave_firmware_cycle),
689 .continue(lpc_slave_continue_sync[1]),
690 .data_ack(lpc_slave_data_ack_sync[1]),
691 .transfer_error(lpc_slave_signal_error_sync[1]),
692 .exception_ack(lpc_slave_exception_ack_sync[1]),
693 .address_ready(lpc_slave_address_ready),
694 .data_ready(lpc_slave_data_ready),
695 .data_ready_cont(lpc_slave_data_ready_cont),
696 .continue_cont(lpc_slave_continue_cont),
697 .exception(lpc_slave_exception),
698 .data_direction(lpc_slave_cycle_direction),
699
700 .irq_request(irq_request_sync_1),
701 .irq_tx_ready(irq_tx_ready),
702 .irq_tx_queued(irq_tx_queued),
703
704 .lpc_fw_input_xfer_write_addr(lpc_fw_input_xfer_write_addr),
705 .lpc_fw_input_xfer_write_data(lpc_fw_input_xfer_write_data),
706 .lpc_fw_input_xfer_write_clk(peripheral_clock),
707 .lpc_fw_input_xfer_write_wren(lpc_fw_input_xfer_write_wren),
708 .lpc_fw_output_xfer_read_addr(lpc_fw_output_xfer_read_addr),
709 .lpc_fw_output_xfer_read_data(lpc_fw_output_xfer_read_data),
710 .lpc_fw_output_xfer_read_clk(peripheral_clock),
711
712 .ipmi_bt_input_xfer_write_addr(ipmi_bt_input_xfer_write_addr),
713 .ipmi_bt_input_xfer_write_data(ipmi_bt_input_xfer_write_data),
714 .ipmi_bt_input_xfer_write_clk(peripheral_clock),
715 .ipmi_bt_input_xfer_write_wren(ipmi_bt_input_xfer_write_wren),
716 .ipmi_bt_output_xfer_read_addr(ipmi_bt_output_xfer_read_addr),
717 .ipmi_bt_output_xfer_read_data(ipmi_bt_output_xfer_read_data),
718 .ipmi_bt_output_xfer_read_clk(peripheral_clock),
719
720 .range1_start(range_1_start_address[15:0]),
721 .range1_end(range_1_end_address[15:0]),
722 .range2_start(range_2_start_address[15:0]),
723 .range2_end(range_2_end_address[15:0]),
724 .range3_start(range_3_start_address[15:0]),
725 .range3_end(range_3_end_address[15:0]),
726 .range4_start(range_4_start_address[15:0]),
727 .range4_end(range_4_end_address[15:0]),
728 .range5_start(range_5_start_address[15:0]),
729 .range5_end(range_5_end_address[15:0]),
730 .range6_start(range_6_start_address[15:0]),
731 .range6_end(range_6_end_address[15:0]),
732
733 .enable_vuart1(enable_vuart1),
734 .vuart1_status_register(vuart1_status_register),
735 .vuart1_control_register(vuart1_control_register_sync_1),
736 .vuart1_assert_b2h_break_clear(vuart1_assert_b2h_break_clear),
737
738 .vuart1_tx_fifo_reset(vuart1_h2b_fifo_reset),
739 .vuart1_tx_fifo_wren(vuart1_h2b_fifo_wwren),
740 .vuart1_tx_fifo_data(vuart1_h2b_fifo_wdata),
741 .vuart1_tx_fifo_full(vuart1_h2b_fifo_wfull),
742 .vuart1_tx_fifo_almost_full(vuart1_h2b_fifo_walmost_full),
743 .vuart1_tx_fifo_empty(vuart1_h2b_fifo_rempty_sync[1]),
744
745 .vuart1_rx_fifo_reset(vuart1_b2h_fifo_reset),
746 .vuart1_rx_fifo_rpop(vuart1_b2h_fifo_rpop),
747 .vuart1_rx_fifo_data(vuart1_b2h_fifo_rdata),
748 .vuart1_rx_fifo_empty(vuart1_b2h_fifo_rempty),
749 .vuart1_rx_fifo_almost_empty(vuart1_b2h_fifo_ralmost_empty),
750 .vuart1_rx_fifo_full(vuart1_b2h_fifo_wfull_sync[2]),
751 .vuart1_rx_data_available_count(vuart1_b2h_fifo_data_available_count[3:0]),
752
753 .enable_vuart2(enable_vuart2),
754 .vuart2_status_register(vuart2_status_register),
755 .vuart2_control_register(vuart2_control_register_sync_1),
756 .vuart2_assert_b2h_break_clear(vuart2_assert_b2h_break_clear),
757
758 .vuart2_tx_fifo_reset(vuart2_h2b_fifo_reset),
759 .vuart2_tx_fifo_wren(vuart2_h2b_fifo_wwren),
760 .vuart2_tx_fifo_data(vuart2_h2b_fifo_wdata),
761 .vuart2_tx_fifo_full(vuart2_h2b_fifo_wfull),
762 .vuart2_tx_fifo_almost_full(vuart2_h2b_fifo_walmost_full),
763 .vuart2_tx_fifo_empty(vuart2_h2b_fifo_rempty_sync[1]),
764
765 .vuart2_rx_fifo_reset(vuart2_b2h_fifo_reset),
766 .vuart2_rx_fifo_rpop(vuart2_b2h_fifo_rpop),
767 .vuart2_rx_fifo_data(vuart2_b2h_fifo_rdata),
768 .vuart2_rx_fifo_empty(vuart2_b2h_fifo_rempty),
769 .vuart2_rx_fifo_almost_empty(vuart2_b2h_fifo_ralmost_empty),
770 .vuart2_rx_fifo_full(vuart2_b2h_fifo_wfull_sync[2]),
771 .vuart2_rx_data_available_count(vuart2_b2h_fifo_data_available_count[3:0]),
772
773 .enable_ipmi_bt(enable_ipmi_bt),
774 .ipmi_bt_alt_irq(ipmi_bt_alt_irq),
775 .ipmi_bt_port_base_address({8'h00, ipmi_bt_port_address}),
776
777 .ipmi_bt_bmc_to_host_ctl_sms_ack(ipmi_bt_bmc_to_host_ctl_sms_ack),
778 .ipmi_bt_bmc_to_host_ctl_attn_ack(ipmi_bt_bmc_to_host_ctl_attn_ack),
779 .ipmi_bt_host_to_bmc_ctl_attn_req(ipmi_bt_host_to_bmc_ctl_attn_req),
780 .ipmi_bt_host_to_bmc_ctl_oem0_req(ipmi_bt_host_to_bmc_ctl_oem0_req),
781 .ipmi_bt_irq_ack(ipmi_bt_irq_ack),
782 .ipmi_bt_irq_bmc_reset(ipmi_bt_irq_bmc_reset),
783 .ipmi_bt_host_to_bmc_ctl_h_busy(ipmi_bt_host_to_bmc_ctl_h_busy),
784 .ipmi_bt_irq_enable(ipmi_bt_irq_enable),
785
786 .ipmi_bt_bmc_to_host_ctl_sms_req(ipmi_bt_bmc_to_host_ctl_sms_req_sync[1]),
787 .ipmi_bt_bmc_to_host_ctl_attn_req(ipmi_bt_bmc_to_host_ctl_attn_req_sync[1]),
788 .ipmi_bt_bmc_to_host_ctl_sms_ack_cont(ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[1]),
789 .ipmi_bt_bmc_to_host_ctl_attn_ack_cont(ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[1]),
790 .ipmi_bt_host_to_bmc_ctl_attn_req_cont(ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[1]),
791 .ipmi_bt_host_to_bmc_ctl_oem0_req_cont(ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[1]),
792 .ipmi_bt_irq_ack_cont(ipmi_bt_irq_ack_cont_sync[1]),
793 .ipmi_bt_irq_bmc_reset_cont(ipmi_bt_irq_bmc_reset_cont_sync[1]),
794 .ipmi_bt_bmc_to_host_ctl_b_busy(ipmi_bt_bmc_to_host_ctl_b_busy_sync[1]),
795 .ipmi_bt_irq_req(ipmi_bt_irq_req_sync[1]),
796
797 .fw_idsel(lpc_slave_fw_idsel),
798 .fw_msize(lpc_slave_fw_msize),
799
800 `ifdef LPC_SLAVE_DEBUG
801 .debug_port(lpc_slave_debug_port),
802 `endif
803
804 .lpc_data_out(lpc_slave_lpc_data_out),
805 .lpc_data_in(lpc_slave_lpc_data_in),
806 .lpc_data_direction(lpc_slave_lpc_data_direction),
807
808 .lpc_irq_out(lpc_slave_lpc_irq_out),
809 .lpc_irq_in(lpc_slave_lpc_irq_in),
810 .lpc_irq_direction(lpc_slave_lpc_irq_direction),
811
812 .lpc_frame_n(lpc_slave_lpc_frame_n),
813 .lpc_reset_n(lpc_slave_lpc_reset_n),
814 .lpc_clock(lpc_clock)
815 );
816
817 // Create registered I/O signals on external LPC bus
818 always @(posedge lpc_clock) begin
819 lpc_slave_lpc_frame_n <= lpc_frame_n;
820 lpc_slave_lpc_reset_n <= lpc_reset_n;
821 end
822 assign lpc_data_out = lpc_slave_lpc_data_out;
823 assign lpc_slave_lpc_data_in = lpc_data_in;
824 assign lpc_data_direction = lpc_slave_lpc_data_direction;
825
826 always @(posedge lpc_clock) begin
827 lpc_slave_lpc_irq_out_reg <= lpc_slave_lpc_irq_out;
828 lpc_slave_lpc_irq_direction_reg <= lpc_slave_lpc_irq_direction;
829 end
830 assign lpc_irq_out = lpc_slave_lpc_irq_out_reg;
831 assign lpc_irq_in = lpc_slave_lpc_irq_in;
832 assign lpc_irq_direction = lpc_slave_lpc_irq_direction_reg;
833
834 assign lpc_clock = lpc_clock;
835
836 reg [3:0] slave_wishbone_sel_reg = 0;
837 reg slave_wishbone_ack_reg = 0;
838 reg [31:0] slave_wishbone_dat_r_reg = 0;
839
840 assign slave_wishbone_ack = slave_wishbone_ack_reg;
841 assign slave_wishbone_dat_r = slave_wishbone_dat_r_reg;
842
843 reg master_wishbone_cyc_reg = 0;
844 reg master_wishbone_stb_reg = 0;
845 reg master_wishbone_we_reg = 0;
846 reg [(WISHBONE_DMA_ADDR_BUS_WIDTH-1):0] master_wishbone_adr_reg = 0;
847 reg [(WISHBONE_DMA_DATA_BUS_WIDTH-1):0] master_wishbone_dat_w_reg = 0;
848 reg [((WISHBONE_DMA_DATA_BUS_WIDTH/8)-1):0] master_wishbone_sel_reg = 0;
849
850 assign master_wishbone_cyc = master_wishbone_cyc_reg;
851 assign master_wishbone_stb = master_wishbone_stb_reg;
852 assign master_wishbone_we = master_wishbone_we_reg;
853 assign master_wishbone_adr = master_wishbone_adr_reg;
854 assign master_wishbone_dat_w = master_wishbone_dat_w_reg;
855 assign master_wishbone_sel = master_wishbone_sel_reg;
856
857 parameter AQUIEL_LPC_CYCLE_TYPE_IO = 0;
858 parameter AQUIEL_LPC_CYCLE_TYPE_TPM = 1;
859 parameter AQUIEL_LPC_CYCLE_TYPE_FIRMWARE = 2;
860
861 parameter LPC_SLAVE_TRANSFER_STATE_IDLE = 0;
862 parameter LPC_SLAVE_TRANSFER_STATE_IR01 = 1;
863 parameter LPC_SLAVE_TRANSFER_STATE_IR02 = 2;
864 parameter LPC_SLAVE_TRANSFER_STATE_IR03 = 3;
865 parameter LPC_SLAVE_TRANSFER_STATE_IW01 = 5;
866 parameter LPC_SLAVE_TRANSFER_STATE_IW02 = 6;
867 parameter LPC_SLAVE_TRANSFER_STATE_IW03 = 7;
868 parameter LPC_SLAVE_TRANSFER_STATE_IW04 = 8;
869 parameter LPC_SLAVE_TRANSFER_STATE_FR01 = 9;
870 parameter LPC_SLAVE_TRANSFER_STATE_FR02 = 10;
871 parameter LPC_SLAVE_TRANSFER_STATE_FR03 = 11;
872 parameter LPC_SLAVE_TRANSFER_STATE_FW01 = 12;
873 parameter LPC_SLAVE_TRANSFER_STATE_ER01 = 16;
874 parameter LPC_SLAVE_TRANSFER_STATE_DR01 = 17;
875 parameter LPC_SLAVE_TRANSFER_STATE_DR02 = 18;
876 parameter LPC_SLAVE_TRANSFER_STATE_DR03 = 19;
877 parameter LPC_SLAVE_TRANSFER_STATE_DR04 = 20;
878 parameter LPC_SLAVE_TRANSFER_STATE_DR05 = 21;
879 parameter LPC_SLAVE_TRANSFER_STATE_DR06 = 22;
880 parameter LPC_SLAVE_TRANSFER_STATE_DR07 = 23;
881 parameter LPC_SLAVE_TRANSFER_STATE_DR08 = 24;
882 parameter LPC_SLAVE_TRANSFER_STATE_DR09 = 25;
883 parameter LPC_SLAVE_TRANSFER_STATE_DR10 = 26;
884 parameter LPC_SLAVE_TRANSFER_STATE_DW01 = 27;
885 parameter LPC_SLAVE_TRANSFER_STATE_DW02 = 28;
886 parameter LPC_SLAVE_TRANSFER_STATE_DW03 = 29;
887 parameter LPC_SLAVE_TRANSFER_STATE_DW04 = 30;
888 parameter LPC_SLAVE_TRANSFER_STATE_DW05 = 31;
889 parameter LPC_SLAVE_TRANSFER_STATE_DW06 = 32;
890 parameter LPC_SLAVE_TRANSFER_STATE_DW07 = 33;
891 parameter LPC_SLAVE_TRANSFER_STATE_DW08 = 34;
892 parameter LPC_SLAVE_TRANSFER_STATE_DW09 = 35;
893 parameter LPC_SLAVE_TRANSFER_STATE_DW10 = 36;
894 parameter LPC_SLAVE_TRANSFER_STATE_DW11 = 37;
895 parameter LPC_SLAVE_TRANSFER_STATE_DF01 = 38;
896
897 parameter MMIO_TRANSFER_STATE_IDLE = 0;
898 parameter MMIO_TRANSFER_STATE_TR01 = 8;
899 parameter MMIO_TRANSFER_STATE_TR02 = 9;
900 parameter MMIO_TRANSFER_STATE_RD01 = 16;
901 parameter MMIO_TRANSFER_STATE_RD02 = 17;
902 parameter MMIO_TRANSFER_STATE_RD03 = 18;
903 parameter MMIO_TRANSFER_STATE_RD04 = 19;
904 parameter MMIO_TRANSFER_STATE_RD05 = 20;
905 parameter MMIO_TRANSFER_STATE_WR01 = 32;
906 parameter MMIO_TRANSFER_STATE_WR02 = 33;
907 parameter MMIO_TRANSFER_STATE_WR03 = 34;
908 parameter MMIO_TRANSFER_STATE_WR04 = 35;
909
910 reg [27:0] lpc_slave_address_reg = 0;
911 reg lpc_slave_firmware_cycle_reg = 0;
912 reg is_firmware_cycle_type = 0;
913 reg is_tpm_cycle_type = 0;
914 reg is_io_cycle_type = 0;
915 reg cycle_range_intercept_allowed = 0;
916 reg wishbone_mmio_access_is_32_bits = 0;
917 reg [31:0] mmio_lpc_buffer_address_reg = 0;
918 reg [7:0] mmio_transfer_state = 0;
919 reg [7:0] lpc_slave_transfer_state = 0;
920 reg mmio_guard_condition_required = 0;
921 reg [31:0] mmio_peripheral_tx_buffer = 0;
922 reg [31:0] mmio_peripheral_rx_buffer = 0;
923 reg [31:0] mmio_cfg_space_tx_buffer = 0;
924 reg [31:0] mmio_cfg_space_rx_buffer = 0;
925
926 assign debug_port[11:8] = lpc_slave_transfer_state[3:0];
927 assign debug_port[12] = master_wishbone_cyc;
928 assign debug_port[13] = master_wishbone_stb;
929 assign debug_port[14] = master_wishbone_ack;
930 assign debug_port[15] = master_wishbone_err;
931
932 assign lpc_clock_mirror = lpc_clock;
933
934 // Wishbone connector -- MMIO
935 always @(posedge peripheral_clock) begin
936 if (peripheral_reset) begin
937 // Reset Wishbone interface / control state machine
938 slave_wishbone_ack_reg <= 0;
939 wishbone_mmio_access_is_32_bits = 0;
940 continue_transfer <= 0;
941
942 vuart1_control_register <= 0;
943 vuart2_control_register <= 0;
944 vuart1_lcr_break_request <= 0;
945 vuart2_lcr_break_request <= 0;
946 vuart1_lcr_break_ack <= 0;
947 vuart2_lcr_break_ack <= 0;
948 vuart1_irq_source <= 0;
949 vuart2_irq_source <= 0;
950 vuart1_h2b_fifo_irq <= 0;
951 vuart2_h2b_fifo_irq <= 0;
952 vuart1_h2b_fifo_read_timeout <= 0;
953 vuart2_h2b_fifo_read_timeout <= 0;
954 vuart1_h2b_fifo_queue_past_trigger <= 0;
955 vuart2_h2b_fifo_queue_past_trigger <= 0;
956 vuart1_h2b_fifo_read_timeout_counter <= 0;
957 vuart2_h2b_fifo_read_timeout_counter <= 0;
958
959 ipmi_bt_bmc_to_host_ctl_sms_req <= 0;
960 ipmi_bt_bmc_to_host_ctl_attn_req <= 0;
961 ipmi_bt_bmc_to_host_ctl_sms_ack_cont <= 0;
962 ipmi_bt_bmc_to_host_ctl_attn_ack_cont <= 0;
963 ipmi_bt_host_to_bmc_ctl_attn_req_cont <= 0;
964 ipmi_bt_host_to_bmc_ctl_oem0_req_cont <= 0;
965 ipmi_bt_irq_ack_cont <= 0;
966 ipmi_bt_irq_bmc_reset_cont <= 0;
967 ipmi_bt_bmc_to_host_ctl_b_busy <= 1; // BMC should always indicate busy until BMC software is online and clears the busy flag
968 ipmi_bt_irq_req <= 0;
969 ipmi_bt_bmc_irq <= 0;
970
971 ipmi_bt_bmc_to_host_ctl_attn_req_prev <= 0;
972 ipmi_bt_bmc_to_host_ctl_sms_req_prev <= 0;
973 ipmi_bt_h2b_oem0_req_prev <= 0;
974
975 mmio_transfer_state <= MMIO_TRANSFER_STATE_IDLE;
976 end else begin
977 case (mmio_transfer_state)
978 MMIO_TRANSFER_STATE_IDLE: begin
979 // Compute effective address
980 mmio_lpc_buffer_address_reg[31:2] = slave_wishbone_adr;
981 case (slave_wishbone_sel)
982 4'b0001: mmio_lpc_buffer_address_reg[1:0] = 0;
983 4'b0010: mmio_lpc_buffer_address_reg[1:0] = 1;
984 4'b0100: mmio_lpc_buffer_address_reg[1:0] = 2;
985 4'b1000: mmio_lpc_buffer_address_reg[1:0] = 3;
986 4'b1111: mmio_lpc_buffer_address_reg[1:0] = 0;
987 default: mmio_lpc_buffer_address_reg[1:0] = 0;
988 endcase
989
990 if (slave_wishbone_cyc && slave_wishbone_stb) begin
991 mmio_guard_condition_required = 0;
992 if (mmio_lpc_buffer_address_reg[31:20] == 12'h00e) begin
993 // VUART register space access
994 if (!continue_transfer) begin
995 // Single clock pulse signals in deasserted state...process incoming request!
996 if (!slave_wishbone_we) begin
997 // Read requested
998 case ({mmio_lpc_buffer_address_reg[19:2], 2'b00})
999 // Bus is little endian!
1000 0: begin
1001 mmio_peripheral_tx_buffer = {(vuart1_h2b_fifo_rempty)?8'h00:vuart1_h2b_fifo_rdata,
1002 7'b0, vuart1_h2b_fifo_rempty || !lpc_slave_lpc_reset_n || vuart1_h2b_fifo_reset,
1003 (vuart2_h2b_fifo_rempty)?8'h00:vuart2_h2b_fifo_rdata,
1004 7'b0, vuart2_h2b_fifo_rempty || !lpc_slave_lpc_reset_n || vuart2_h2b_fifo_reset
1005 };
1006
1007 if (slave_wishbone_sel[0]) begin
1008 // Wishbone bits 31:24
1009 if (!vuart1_h2b_fifo_rempty) begin
1010 vuart1_h2b_fifo_rpop <= 1;
1011 end
1012 end
1013 if (slave_wishbone_sel[2]) begin
1014 // Wishbone bits 15:8
1015 if (!vuart2_h2b_fifo_rempty) begin
1016 vuart2_h2b_fifo_rpop <= 1;
1017 end
1018 end
1019 end
1020 4: begin
1021 mmio_peripheral_tx_buffer[31:24] <= vuart1_status_register_sync_2[7:0];
1022 mmio_peripheral_tx_buffer[23:16] <= vuart1_status_register_sync_2[15:8];
1023 mmio_peripheral_tx_buffer[15:8] <= vuart1_status_register_sync_2[23:16];
1024 mmio_peripheral_tx_buffer[7:0] <= {6'b0, vuart1_lcr_break_request, vuart1_b2h_fifo_wfull};
1025 vuart1_lcr_break_ack <= 1;
1026 end
1027 8: begin
1028 mmio_peripheral_tx_buffer[31:24] <= vuart1_control_register[7:0];
1029 mmio_peripheral_tx_buffer[23:16] <= vuart1_control_register[15:8];
1030 mmio_peripheral_tx_buffer[15:8] <= vuart1_control_register[23:16];
1031 mmio_peripheral_tx_buffer[7:0] <= vuart1_control_register[31:24];
1032 end
1033 12: begin
1034 mmio_peripheral_tx_buffer[31:24] <= vuart2_status_register_sync_2[7:0];
1035 mmio_peripheral_tx_buffer[23:16] <= vuart2_status_register_sync_2[15:8];
1036 mmio_peripheral_tx_buffer[15:8] <= vuart2_status_register_sync_2[23:16];
1037 mmio_peripheral_tx_buffer[7:0] <= {6'b0, vuart2_lcr_break_request, vuart2_b2h_fifo_wfull};
1038 vuart2_lcr_break_ack <= 1;
1039 end
1040 16: begin
1041 mmio_peripheral_tx_buffer[31:24] <= vuart2_control_register[7:0];
1042 mmio_peripheral_tx_buffer[23:16] <= vuart2_control_register[15:8];
1043 mmio_peripheral_tx_buffer[15:8] <= vuart2_control_register[23:16];
1044 mmio_peripheral_tx_buffer[7:0] <= vuart2_control_register[31:24];
1045 end
1046 default: mmio_peripheral_tx_buffer = 32'hffffffff;
1047 endcase
1048
1049 // Place data on Wishbone bus
1050 slave_wishbone_dat_r_reg <= mmio_peripheral_tx_buffer;
1051
1052 // Signal transfer complete
1053 slave_wishbone_ack_reg <= 1;
1054
1055 mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02;
1056 end else begin
1057 // Write requested
1058 case ({mmio_lpc_buffer_address_reg[19:2], 2'b00})
1059 8: mmio_cfg_space_rx_buffer = vuart1_control_register;
1060 16: mmio_cfg_space_rx_buffer = vuart2_control_register;
1061 default: mmio_cfg_space_rx_buffer = 0;
1062 endcase
1063
1064 if (slave_wishbone_sel[0]) begin
1065 mmio_cfg_space_rx_buffer[7:0] = slave_wishbone_dat_w[31:24];
1066 end
1067 if (slave_wishbone_sel[1]) begin
1068 mmio_cfg_space_rx_buffer[15:8] = slave_wishbone_dat_w[23:16];
1069 end
1070 if (slave_wishbone_sel[2]) begin
1071 mmio_cfg_space_rx_buffer[23:16] = slave_wishbone_dat_w[15:8];
1072 end
1073 if (slave_wishbone_sel[3]) begin
1074 mmio_cfg_space_rx_buffer[31:24] = slave_wishbone_dat_w[7:0];
1075 end
1076
1077 // Specialty bit handlers
1078 case ({mmio_lpc_buffer_address_reg[19:2], 2'b00})
1079 0: begin
1080 if (slave_wishbone_sel[0]) begin
1081 // Load VUART1 B2H FIFO
1082 if (!vuart1_b2h_fifo_wfull) begin
1083 vuart1_b2h_fifo_wdata <= mmio_cfg_space_rx_buffer[7:0];
1084 vuart1_b2h_fifo_wwren <= 1;
1085 end
1086 end
1087 if (slave_wishbone_sel[2]) begin
1088 // Load VUART2 B2H FIFO
1089 if (!vuart2_b2h_fifo_wfull) begin
1090 vuart2_b2h_fifo_wdata <= mmio_cfg_space_rx_buffer[23:16];
1091 vuart2_b2h_fifo_wwren <= 1;
1092 end
1093 end
1094 end
1095 8: begin
1096 if (mmio_cfg_space_rx_buffer[0]) begin
1097 // B2H BREAK request
1098 mmio_cfg_space_rx_buffer[0] = 0;
1099 if (!vuart1_assert_b2h_break_clear_sync[2]) begin
1100 vuart1_control_register[0] <= 1;
1101 end
1102 end
1103 end
1104 16: begin
1105 if (mmio_cfg_space_rx_buffer[0]) begin
1106 // B2H BREAK request
1107 mmio_cfg_space_rx_buffer[0] = 0;
1108 if (!vuart2_assert_b2h_break_clear_sync[2]) begin
1109 vuart2_control_register[0] <= 1;
1110 end
1111 end
1112 end
1113 endcase
1114
1115 case ({mmio_lpc_buffer_address_reg[19:2], 2'b00})
1116 8: vuart1_control_register <= mmio_cfg_space_rx_buffer;
1117 16: vuart2_control_register <= mmio_cfg_space_rx_buffer;
1118 endcase
1119
1120 // Signal transfer complete
1121 slave_wishbone_ack_reg <= 1;
1122
1123 mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02;
1124 end
1125 end
1126 end else if (mmio_lpc_buffer_address_reg[31:20] == 12'h00f) begin
1127 // Configuration register space access
1128 if (!continue_transfer) begin
1129 // Single clock pulse signals in deasserted state...process incoming request!
1130 if (!slave_wishbone_we) begin
1131 // Read requested
1132 case ({mmio_lpc_buffer_address_reg[19:2], 2'b00})
1133 0: mmio_cfg_space_tx_buffer = device_id[63:32];
1134 4: mmio_cfg_space_tx_buffer = device_id[31:0];
1135 8: mmio_cfg_space_tx_buffer = device_version;
1136 12: mmio_cfg_space_tx_buffer = control_reg1;
1137 16: mmio_cfg_space_tx_buffer = control_reg2;
1138 20: mmio_cfg_space_tx_buffer = range_config1;
1139 24: mmio_cfg_space_tx_buffer = range_end1;
1140 28: mmio_cfg_space_tx_buffer = range_config2;
1141 32: mmio_cfg_space_tx_buffer = range_end2;
1142 36: mmio_cfg_space_tx_buffer = range_config3;
1143 40: mmio_cfg_space_tx_buffer = range_end3;
1144 44: mmio_cfg_space_tx_buffer = range_config4;
1145 48: mmio_cfg_space_tx_buffer = range_end4;
1146 52: mmio_cfg_space_tx_buffer = range_config5;
1147 56: mmio_cfg_space_tx_buffer = range_end5;
1148 60: mmio_cfg_space_tx_buffer = range_config6;
1149 64: mmio_cfg_space_tx_buffer = range_end6;
1150 68: mmio_cfg_space_tx_buffer = dma_config_reg1;
1151 72: mmio_cfg_space_tx_buffer = dma_config_reg2;
1152 76: mmio_cfg_space_tx_buffer = dma_config_reg3;
1153 80: mmio_cfg_space_tx_buffer = dma_config_reg4;
1154 84: mmio_cfg_space_tx_buffer = dma_config_reg5;
1155 88: mmio_cfg_space_tx_buffer = dma_config_reg6;
1156 92: mmio_cfg_space_tx_buffer = dma_config_reg7;
1157 96: mmio_cfg_space_tx_buffer = status_reg1;
1158 100: mmio_cfg_space_tx_buffer = status_reg2;
1159 104: mmio_cfg_space_tx_buffer = status_reg3;
1160 108: mmio_cfg_space_tx_buffer = status_reg4;
1161 112: mmio_cfg_space_tx_buffer = ipmi_bt_status_reg;
1162 default: mmio_cfg_space_tx_buffer = 0;
1163 endcase
1164
1165 // Specialty bit handlers
1166 case ({mmio_lpc_buffer_address_reg[19:2], 2'b00})
1167 12: mmio_cfg_space_tx_buffer[0] = 0; // continue_transfer
1168 endcase
1169
1170 // Endian swap
1171 slave_wishbone_dat_r_reg[31:24] <= mmio_cfg_space_tx_buffer[7:0];
1172 slave_wishbone_dat_r_reg[23:16] <= mmio_cfg_space_tx_buffer[15:8];
1173 slave_wishbone_dat_r_reg[15:8] <= mmio_cfg_space_tx_buffer[23:16];
1174 slave_wishbone_dat_r_reg[7:0] <= mmio_cfg_space_tx_buffer[31:24];
1175
1176 // Signal transfer complete
1177 slave_wishbone_ack_reg <= 1;
1178
1179 mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02;
1180 end else begin
1181 // Write requested
1182 case ({mmio_lpc_buffer_address_reg[19:2], 2'b00})
1183 // Device ID / version registers cannot be written, don't even try...
1184 12: mmio_cfg_space_rx_buffer = control_reg1;
1185 16: mmio_cfg_space_rx_buffer = control_reg2;
1186 20: mmio_cfg_space_rx_buffer = range_config1;
1187 24: mmio_cfg_space_rx_buffer = range_end1;
1188 28: mmio_cfg_space_rx_buffer = range_config2;
1189 32: mmio_cfg_space_rx_buffer = range_end2;
1190 36: mmio_cfg_space_rx_buffer = range_config3;
1191 40: mmio_cfg_space_rx_buffer = range_end3;
1192 44: mmio_cfg_space_rx_buffer = range_config4;
1193 48: mmio_cfg_space_rx_buffer = range_end4;
1194 52: mmio_cfg_space_rx_buffer = range_config5;
1195 56: mmio_cfg_space_rx_buffer = range_end5;
1196 60: mmio_cfg_space_rx_buffer = range_config6;
1197 64: mmio_cfg_space_rx_buffer = range_end6;
1198 68: mmio_cfg_space_rx_buffer = dma_config_reg1;
1199 72: mmio_cfg_space_rx_buffer = dma_config_reg2;
1200 76: mmio_cfg_space_rx_buffer = dma_config_reg3;
1201 80: mmio_cfg_space_rx_buffer = dma_config_reg4;
1202 84: mmio_cfg_space_rx_buffer = dma_config_reg5;
1203 88: mmio_cfg_space_rx_buffer = dma_config_reg6;
1204 92: mmio_cfg_space_rx_buffer = dma_config_reg7;
1205 // Status registers cannot be written, don't even try...
1206 default: mmio_cfg_space_rx_buffer = 0;
1207 endcase
1208
1209 if (slave_wishbone_sel[0]) begin
1210 mmio_cfg_space_rx_buffer[7:0] = slave_wishbone_dat_w[31:24];
1211 end
1212 if (slave_wishbone_sel[1]) begin
1213 mmio_cfg_space_rx_buffer[15:8] = slave_wishbone_dat_w[23:16];
1214 end
1215 if (slave_wishbone_sel[2]) begin
1216 mmio_cfg_space_rx_buffer[23:16] = slave_wishbone_dat_w[15:8];
1217 end
1218 if (slave_wishbone_sel[3]) begin
1219 mmio_cfg_space_rx_buffer[31:24] = slave_wishbone_dat_w[7:0];
1220 end
1221
1222 // Specialty bit handlers
1223 case ({mmio_lpc_buffer_address_reg[19:2], 2'b00})
1224 16: begin
1225 // continue_transfer
1226 if (mmio_cfg_space_rx_buffer[0]) begin
1227 mmio_cfg_space_rx_buffer[0] = 0;
1228 continue_transfer <= 1;
1229 end
1230 end
1231 108: begin
1232 // IPMI BT control register
1233 if (mmio_cfg_space_rx_buffer[0]) begin
1234 // CLR_WR_PTR
1235 ipmi_bt_input_xfer_write_addr <= 0;
1236 end
1237 if (mmio_cfg_space_rx_buffer[1]) begin
1238 // CLR_RD_PTR
1239 ipmi_bt_output_xfer_read_addr <= 0;
1240 end
1241 if (mmio_cfg_space_rx_buffer[2]) begin
1242 // H2B_ATN clear
1243 ipmi_bt_host_to_bmc_ctl_attn_req_cont <= 1;
1244 mmio_guard_condition_required = 1;
1245 end
1246 if (mmio_cfg_space_rx_buffer[3]) begin
1247 // B2H_ATN set
1248 ipmi_bt_bmc_to_host_ctl_attn_req <= 1;
1249 end
1250 if (mmio_cfg_space_rx_buffer[4]) begin
1251 // SMS_ATN set
1252 ipmi_bt_bmc_to_host_ctl_sms_req <= 1;
1253 end
1254 if (mmio_cfg_space_rx_buffer[5]) begin
1255 // OEM0 clear
1256 ipmi_bt_host_to_bmc_ctl_oem0_req_cont <= 1;
1257 mmio_guard_condition_required = 1;
1258 end
1259 if (mmio_cfg_space_rx_buffer[7]) begin
1260 // B_BUSY
1261 if (ipmi_bt_bmc_to_host_ctl_b_busy) begin
1262 ipmi_bt_bmc_to_host_ctl_b_busy <= 0;
1263 end else begin
1264 ipmi_bt_bmc_to_host_ctl_b_busy <= 1;
1265 end
1266 end
1267 end
1268 endcase
1269
1270 case ({mmio_lpc_buffer_address_reg[19:2], 2'b00})
1271 12: control_reg1 <= mmio_cfg_space_rx_buffer;
1272 16: control_reg2 <= mmio_cfg_space_rx_buffer;
1273 20: range_config1 <= mmio_cfg_space_rx_buffer;
1274 24: range_end1 <= mmio_cfg_space_rx_buffer;
1275 28: range_config2 <= mmio_cfg_space_rx_buffer;
1276 32: range_end2 <= mmio_cfg_space_rx_buffer;
1277 36: range_config3 <= mmio_cfg_space_rx_buffer;
1278 40: range_end3 <= mmio_cfg_space_rx_buffer;
1279 44: range_config4 <= mmio_cfg_space_rx_buffer;
1280 48: range_end4 <= mmio_cfg_space_rx_buffer;
1281 52: range_config5 <= mmio_cfg_space_rx_buffer;
1282 56: range_end5 <= mmio_cfg_space_rx_buffer;
1283 60: range_config6 <= mmio_cfg_space_rx_buffer;
1284 64: range_end6 <= mmio_cfg_space_rx_buffer;
1285 68: dma_config_reg1 <= mmio_cfg_space_rx_buffer;
1286 72: dma_config_reg2 <= mmio_cfg_space_rx_buffer;
1287 76: dma_config_reg3 <= mmio_cfg_space_rx_buffer;
1288 80: dma_config_reg4 <= mmio_cfg_space_rx_buffer;
1289 84: dma_config_reg5 <= mmio_cfg_space_rx_buffer;
1290 88: dma_config_reg6 <= mmio_cfg_space_rx_buffer;
1291 92: dma_config_reg7 <= mmio_cfg_space_rx_buffer;
1292 endcase
1293
1294 if (mmio_guard_condition_required) begin
1295 // Signal transfer complete
1296 slave_wishbone_ack_reg <= 1;
1297
1298 mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02;
1299 end else begin
1300 mmio_transfer_state <= MMIO_TRANSFER_STATE_TR01;
1301 end
1302 end
1303 end
1304 end else begin
1305 // Select 8-bit/32-bit transfer size via Wishbone access mode
1306 if (slave_wishbone_sel == 4'b1111) begin
1307 wishbone_mmio_access_is_32_bits = 1;
1308 end else begin
1309 wishbone_mmio_access_is_32_bits = 0;
1310 end
1311 slave_wishbone_sel_reg <= slave_wishbone_sel;
1312 if (!slave_wishbone_we) begin
1313 // Read requested
1314 // Set up read
1315 if (wishbone_mmio_access_is_32_bits) begin
1316 case (mmio_lpc_buffer_address_reg[31:20])
1317 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0];
1318 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0];
1319 endcase
1320 end else begin
1321 case (mmio_lpc_buffer_address_reg[31:20])
1322 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0];
1323 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0];
1324 endcase
1325 end
1326 mmio_transfer_state <= MMIO_TRANSFER_STATE_RD01;
1327 end else begin
1328 // Write requested
1329 // Take single cycle performance hit for simplicity here...
1330 mmio_transfer_state <= MMIO_TRANSFER_STATE_WR01;
1331 end
1332 end
1333 end
1334 end
1335 MMIO_TRANSFER_STATE_RD01: begin
1336 if (wishbone_mmio_access_is_32_bits) begin
1337 // Set up next read
1338 case (mmio_lpc_buffer_address_reg[31:20])
1339 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 1;
1340 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 1;
1341 endcase
1342 mmio_transfer_state <= MMIO_TRANSFER_STATE_RD02;
1343 end else begin
1344 // Wait for read data to become available
1345 mmio_transfer_state <= MMIO_TRANSFER_STATE_RD02;
1346 end
1347 end
1348 MMIO_TRANSFER_STATE_RD02: begin
1349 case (mmio_lpc_buffer_address_reg[31:20])
1350 12'h00c: xfer_read_data_buffer = lpc_fw_output_xfer_read_data;
1351 12'h00d: xfer_read_data_buffer = ipmi_bt_output_xfer_read_data;
1352 endcase
1353
1354 if (wishbone_mmio_access_is_32_bits) begin
1355 // Read first byte
1356 slave_wishbone_dat_r_reg[31:24] <= xfer_read_data_buffer;
1357
1358 // Set up next read
1359 case (mmio_lpc_buffer_address_reg[31:20])
1360 12'h00c: lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 2;
1361 12'h00d: ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 2;
1362 endcase
1363 mmio_transfer_state <= MMIO_TRANSFER_STATE_RD03;
1364 end else begin
1365 // Replicate the data byte to all active lanes
1366 if (slave_wishbone_sel_reg[0]) begin
1367 slave_wishbone_dat_r_reg[31:24] <= xfer_read_data_buffer;
1368 end
1369 if (slave_wishbone_sel_reg[1]) begin
1370 slave_wishbone_dat_r_reg[23:16] <= xfer_read_data_buffer;
1371 end
1372 if (slave_wishbone_sel_reg[2]) begin
1373 slave_wishbone_dat_r_reg[15:8] <= xfer_read_data_buffer;
1374 end
1375 if (slave_wishbone_sel_reg[3]) begin
1376 slave_wishbone_dat_r_reg[7:0] <= xfer_read_data_buffer;
1377 end
1378
1379 // Signal transfer complete
1380 slave_wishbone_ack_reg <= 1;
1381
1382 mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02;
1383 end
1384 end
1385 MMIO_TRANSFER_STATE_RD03: begin
1386 // Read second byte
1387 case (mmio_lpc_buffer_address_reg[31:20])
1388 12'h00c: begin
1389 slave_wishbone_dat_r_reg[23:16] <= lpc_fw_output_xfer_read_data;
1390 lpc_fw_output_xfer_mmio_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 3;
1391 end
1392 12'h00d: begin
1393 slave_wishbone_dat_r_reg[23:16] <= ipmi_bt_output_xfer_read_data;
1394 ipmi_bt_output_xfer_read_addr <= mmio_lpc_buffer_address_reg[8:0] + 3;
1395 end
1396 endcase
1397
1398 mmio_transfer_state <= MMIO_TRANSFER_STATE_RD04;
1399 end
1400 MMIO_TRANSFER_STATE_RD04: begin
1401 // Read third byte
1402 case (mmio_lpc_buffer_address_reg[31:20])
1403 12'h00c: slave_wishbone_dat_r_reg[15:8] <= lpc_fw_output_xfer_read_data;
1404 12'h00d: slave_wishbone_dat_r_reg[15:8] <= ipmi_bt_output_xfer_read_data;
1405 endcase
1406
1407 mmio_transfer_state <= MMIO_TRANSFER_STATE_RD05;
1408 end
1409 MMIO_TRANSFER_STATE_RD05: begin
1410 // Read fourth byte
1411 case (mmio_lpc_buffer_address_reg[31:20])
1412 12'h00c: slave_wishbone_dat_r_reg[7:0] <= lpc_fw_output_xfer_read_data;
1413 12'h00d: slave_wishbone_dat_r_reg[7:0] <= ipmi_bt_output_xfer_read_data;
1414 endcase
1415
1416 // Signal transfer complete
1417 slave_wishbone_ack_reg <= 1;
1418
1419 mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02;
1420 end
1421 MMIO_TRANSFER_STATE_WR01: begin
1422 if (lpc_fw_dma_cycle_inactive) begin
1423 // No conflict present on LPC buffer write signals
1424 if (wishbone_mmio_access_is_32_bits) begin
1425 // Write first byte
1426 case (mmio_lpc_buffer_address_reg[31:20])
1427 12'h00c: begin
1428 lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 3;
1429 lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[7:0];
1430 lpc_fw_input_xfer_mmio_write_wren <= 1;
1431 end
1432 12'h00d: begin
1433 ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 3;
1434 ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[7:0];
1435 ipmi_bt_input_xfer_write_wren <= 1;
1436 end
1437 endcase
1438 mmio_transfer_state <= MMIO_TRANSFER_STATE_WR02;
1439 end else begin
1440 // Read the data byte to write from the active lane
1441 if (slave_wishbone_sel_reg[0]) begin
1442 xfer_write_data_buffer = slave_wishbone_dat_w[31:24];
1443 end else if (slave_wishbone_sel_reg[1]) begin
1444 xfer_write_data_buffer = slave_wishbone_dat_w[23:16];
1445 end else if (slave_wishbone_sel_reg[2]) begin
1446 xfer_write_data_buffer = slave_wishbone_dat_w[15:8];
1447 end else if (slave_wishbone_sel_reg[3]) begin
1448 xfer_write_data_buffer = slave_wishbone_dat_w[7:0];
1449 end else begin
1450 xfer_write_data_buffer = 8'hff;
1451 end
1452 case (mmio_lpc_buffer_address_reg[31:20])
1453 12'h00c: begin
1454 lpc_fw_input_xfer_mmio_write_data <= xfer_write_data_buffer;
1455 lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0];
1456 lpc_fw_input_xfer_mmio_write_wren <= 1;
1457 end
1458 12'h00d: begin
1459 ipmi_bt_input_xfer_write_data <= xfer_write_data_buffer;
1460 ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0];
1461 ipmi_bt_input_xfer_write_wren <= 1;
1462 end
1463 endcase
1464
1465 // Signal transfer complete
1466 slave_wishbone_ack_reg <= 1;
1467
1468 mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02;
1469 end
1470 end
1471 end
1472 MMIO_TRANSFER_STATE_WR02: begin
1473 if (lpc_fw_dma_cycle_inactive) begin
1474 // No conflict present on LPC buffer write signals
1475 // Write second byte
1476 case (mmio_lpc_buffer_address_reg[31:20])
1477 12'h00c: begin
1478 lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 2;
1479 lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[15:8];
1480 lpc_fw_input_xfer_mmio_write_wren <= 1;
1481 end
1482 12'h00d: begin
1483 ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 2;
1484 ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[15:8];
1485 ipmi_bt_input_xfer_write_wren <= 1;
1486 end
1487 endcase
1488 mmio_transfer_state <= MMIO_TRANSFER_STATE_WR03;
1489 end
1490 end
1491 MMIO_TRANSFER_STATE_WR03: begin
1492 if (lpc_fw_dma_cycle_inactive) begin
1493 // No conflict present on LPC buffer write signals
1494 // Write third byte
1495 case (mmio_lpc_buffer_address_reg[31:20])
1496 12'h00c: begin
1497 lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 1;
1498 lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[23:16];
1499 lpc_fw_input_xfer_mmio_write_wren <= 1;
1500 end
1501 12'h00d: begin
1502 ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0] + 1;
1503 ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[23:16];
1504 ipmi_bt_input_xfer_write_wren <= 1;
1505 end
1506 endcase
1507 mmio_transfer_state <= MMIO_TRANSFER_STATE_WR04;
1508 end
1509 end
1510 MMIO_TRANSFER_STATE_WR04: begin
1511 if (lpc_fw_dma_cycle_inactive) begin
1512 // No conflict present on LPC buffer write signals
1513 // Write fourth byte
1514 case (mmio_lpc_buffer_address_reg[31:20])
1515 12'h00c: begin
1516 lpc_fw_input_xfer_mmio_write_addr <= mmio_lpc_buffer_address_reg[8:0];
1517 lpc_fw_input_xfer_mmio_write_data <= slave_wishbone_dat_w[31:24];
1518 lpc_fw_input_xfer_mmio_write_wren <= 1;
1519 end
1520 12'h00d: begin
1521 ipmi_bt_input_xfer_write_addr <= mmio_lpc_buffer_address_reg[8:0];
1522 ipmi_bt_input_xfer_write_data <= slave_wishbone_dat_w[31:24];
1523 ipmi_bt_input_xfer_write_wren <= 1;
1524 end
1525 endcase
1526
1527 // Signal transfer complete
1528 slave_wishbone_ack_reg <= 1;
1529
1530 mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02;
1531 end
1532 end
1533 MMIO_TRANSFER_STATE_TR01: begin
1534 if (ipmi_bt_host_to_bmc_ctl_attn_req_cont) begin
1535 if (!ipmi_bt_host_to_bmc_ctl_attn_req_sync[2]) begin
1536 ipmi_bt_host_to_bmc_ctl_attn_req_cont <= 0;
1537 end
1538 end else if (ipmi_bt_host_to_bmc_ctl_oem0_req_cont) begin
1539 if (!ipmi_bt_host_to_bmc_ctl_oem0_req_sync[2]) begin
1540 ipmi_bt_host_to_bmc_ctl_oem0_req_cont <= 0;
1541 end
1542 end else begin
1543 // Signal transfer complete
1544 slave_wishbone_ack_reg <= 1;
1545
1546 mmio_transfer_state <= MMIO_TRANSFER_STATE_TR02;
1547 end
1548 end
1549 MMIO_TRANSFER_STATE_TR02: begin
1550 // Cycle complete
1551 slave_wishbone_ack_reg <= 0;
1552 vuart1_h2b_fifo_rpop <= 0;
1553 vuart2_h2b_fifo_rpop <= 0;
1554 vuart1_b2h_fifo_wwren <= 0;
1555 vuart2_b2h_fifo_wwren <= 0;
1556 lpc_fw_input_xfer_mmio_write_wren <= 0;
1557 ipmi_bt_input_xfer_write_wren <= 0;
1558 mmio_transfer_state <= MMIO_TRANSFER_STATE_IDLE;
1559 end
1560 default: begin
1561 // Should never reach this state
1562 mmio_transfer_state <= MMIO_TRANSFER_STATE_IDLE;
1563 end
1564 endcase
1565
1566 // This assumes the MMIO handler is driven off the same clock as the LPC transfer handler
1567 // It will generate a single clock width pulse on the continue line
1568 if (continue_transfer) begin
1569 continue_transfer <= 0;
1570 end
1571
1572 if ((mmio_transfer_state == MMIO_TRANSFER_STATE_IDLE) && !(slave_wishbone_cyc && slave_wishbone_stb)) begin
1573 // Bits are not being actively set / cleared by the BMC, therefore it is now safe
1574 // to execute the handshake logic (potential race conditions avoided)
1575
1576 // VUART handshake logic
1577 if (vuart1_control_register[0] && vuart1_assert_b2h_break_clear_sync[2]) begin
1578 vuart1_control_register[0] <= 0;
1579 end
1580 if (vuart2_control_register[0] && vuart2_assert_b2h_break_clear_sync[2]) begin
1581 vuart2_control_register[0] <= 0;
1582 end
1583
1584 if (vuart1_status_register_sync_2[6]) begin
1585 vuart1_lcr_break_request <= 1;
1586 end else begin
1587 if (vuart1_lcr_break_ack) begin
1588 vuart1_lcr_break_request <= 0;
1589 vuart1_lcr_break_ack <= 0;
1590 end
1591 end
1592 if (vuart2_status_register_sync_2[6]) begin
1593 vuart2_lcr_break_request <= 1;
1594 end else begin
1595 if (vuart2_lcr_break_ack) begin
1596 vuart2_lcr_break_request <= 0;
1597 vuart2_lcr_break_ack <= 0;
1598 end
1599 end
1600
1601 // VUART1 IRQ handler logic
1602 if (vuart1_h2b_fifo_rpop) begin
1603 vuart1_h2b_fifo_read_timeout_counter <= 0;
1604 vuart1_h2b_fifo_read_timeout <= 0;
1605 end else begin
1606 if (vuart1_h2b_fifo_rempty) begin
1607 vuart1_h2b_fifo_read_timeout_counter <= 0;
1608 vuart1_h2b_fifo_read_timeout <= 0;
1609 end else begin
1610 if (vuart1_h2b_fifo_read_timeout_counter > 1000) begin
1611 vuart1_h2b_fifo_read_timeout <= 1;
1612 end else begin
1613 vuart1_h2b_fifo_read_timeout_counter <= vuart1_h2b_fifo_read_timeout_counter + 1;
1614 end
1615 end
1616 end
1617
1618 if ((vuart1_h2b_fifo_data_available_count[3:0] >= vuart1_h2b_fifo_irq_trigger_level) || vuart1_h2b_fifo_wfull) begin
1619 vuart1_h2b_fifo_queue_past_trigger <= 1;
1620 end else begin
1621 vuart1_h2b_fifo_queue_past_trigger <= 0;
1622 end
1623 if (vuart1_irqs_enabled) begin
1624 if (vuart1_h2b_fifo_irq_enabled && vuart1_h2b_fifo_queue_past_trigger) begin
1625 vuart1_h2b_fifo_irq <= 1;
1626 vuart1_irq_source <= VUART_IRQ_REASON_QUEUE_TRIGGER;
1627 end else if (vuart1_h2b_fifo_irq_enabled && vuart1_h2b_fifo_read_timeout) begin
1628 vuart1_h2b_fifo_irq <= 1;
1629 vuart1_irq_source <= VUART_IRQ_REASON_QUEUE_TIMEOUT;
1630 end else begin
1631 vuart1_irq_source <= VUART_IRQ_REASON_NONE;
1632 vuart1_h2b_fifo_irq <= 0;
1633 end
1634 end else begin
1635 vuart1_irq_source <= VUART_IRQ_REASON_NONE;
1636 vuart1_h2b_fifo_irq <= 0;
1637 end
1638
1639 // VUART2 IRQ handler logic
1640 if (vuart2_h2b_fifo_rpop) begin
1641 vuart2_h2b_fifo_read_timeout_counter <= 0;
1642 vuart2_h2b_fifo_read_timeout <= 0;
1643 end else begin
1644 if (vuart2_h2b_fifo_rempty) begin
1645 vuart2_h2b_fifo_read_timeout_counter <= 0;
1646 vuart2_h2b_fifo_read_timeout <= 0;
1647 end else begin
1648 if (vuart2_h2b_fifo_read_timeout_counter > 1000) begin
1649 vuart2_h2b_fifo_read_timeout <= 1;
1650 end else begin
1651 vuart2_h2b_fifo_read_timeout_counter <= vuart2_h2b_fifo_read_timeout_counter + 1;
1652 end
1653 end
1654 end
1655
1656 if ((vuart2_h2b_fifo_data_available_count[3:0] >= vuart2_h2b_fifo_irq_trigger_level) || vuart2_h2b_fifo_wfull) begin
1657 vuart2_h2b_fifo_queue_past_trigger <= 1;
1658 end else begin
1659 vuart2_h2b_fifo_queue_past_trigger <= 0;
1660 end
1661 if (vuart2_irqs_enabled) begin
1662 if (vuart2_h2b_fifo_irq_enabled && vuart2_h2b_fifo_queue_past_trigger) begin
1663 vuart2_h2b_fifo_irq <= 1;
1664 vuart2_irq_source <= VUART_IRQ_REASON_QUEUE_TRIGGER;
1665 end else if (vuart2_h2b_fifo_irq_enabled && vuart2_h2b_fifo_read_timeout) begin
1666 vuart2_h2b_fifo_irq <= 1;
1667 vuart2_irq_source <= VUART_IRQ_REASON_QUEUE_TIMEOUT;
1668 end else begin
1669 vuart2_irq_source <= VUART_IRQ_REASON_NONE;
1670 vuart2_h2b_fifo_irq <= 0;
1671 end
1672 end else begin
1673 vuart2_irq_source <= VUART_IRQ_REASON_NONE;
1674 vuart2_h2b_fifo_irq <= 0;
1675 end
1676
1677 // IPMI handshake handler logic
1678 if (ipmi_bt_bmc_to_host_ctl_attn_ack_sync[2]) begin
1679 ipmi_bt_bmc_to_host_ctl_attn_req <= 0;
1680 ipmi_bt_bmc_to_host_ctl_attn_ack_cont <= 1;
1681 end else begin
1682 ipmi_bt_bmc_to_host_ctl_attn_ack_cont <= 0;
1683 end
1684 if (ipmi_bt_bmc_to_host_ctl_sms_ack_sync[2]) begin
1685 ipmi_bt_bmc_to_host_ctl_sms_req <= 0;
1686 ipmi_bt_bmc_to_host_ctl_sms_ack_cont <= 1;
1687 end else begin
1688 ipmi_bt_bmc_to_host_ctl_sms_ack_cont <= 0;
1689 end
1690
1691 // IPMI BMC IRQ handler logic
1692 if (ipmi_bt_irq_ack_sync[2]) begin
1693 ipmi_bt_irq_req <= 0;
1694 ipmi_bt_irq_ack_cont <= 1;
1695 end else begin
1696 if (!ipmi_bt_irq_ack_cont) begin
1697 if (ipmi_bt_irq_enable_sync[2]
1698 && !ipmi_bt_irq_ack_cont
1699 && ((!ipmi_bt_h2b_oem0_req_prev && ipmi_bt_h2b_oem0_req)
1700 || (!ipmi_bt_bmc_to_host_ctl_attn_req_prev && ipmi_bt_bmc_to_host_ctl_attn_req)
1701 || (!ipmi_bt_bmc_to_host_ctl_sms_req_prev && ipmi_bt_bmc_to_host_ctl_sms_req))) begin
1702 ipmi_bt_irq_req <= 1;
1703 end
1704 end else begin
1705 ipmi_bt_irq_ack_cont <= 0;
1706 end
1707 end
1708 if (!ipmi_bt_irq_ack_cont) begin
1709 // Wait for prior IRQ line handshake to complete before sampling the B2H_ATN line
1710 // This ensures that the IRQ is still fired if the continue signal is asserted while
1711 // B2H_ATN transitions from inactive to active.
1712 ipmi_bt_bmc_to_host_ctl_attn_req_prev <= ipmi_bt_bmc_to_host_ctl_attn_req;
1713 ipmi_bt_bmc_to_host_ctl_sms_req_prev <= ipmi_bt_bmc_to_host_ctl_sms_req;
1714 ipmi_bt_h2b_oem0_req_prev <= ipmi_bt_h2b_oem0_req;
1715 end
1716
1717 if (ipmi_bt_bmc_irq_enable && ipmi_bt_host_to_bmc_ctl_attn_req_sync[2]) begin
1718 ipmi_bt_bmc_irq <= 1;
1719 end else begin
1720 ipmi_bt_bmc_irq <= 0;
1721 end
1722 end
1723 end
1724 end
1725
1726 // Wishbone connector -- CSRs
1727 //
1728 // WARNING: The LPC slave will run at ~33.33MHz from an external clock source.
1729 // This module assumes the Wishbone clock will be clocked no lower than 1.5x the
1730 // external LPC frequency, i.e. no lower than 50MHz. All synchronizer logic is
1731 // built around this assumption; violating it *will* lead to data corruption and
1732 // unpredictable / undefined behavior!
1733 always @(posedge peripheral_clock) begin
1734 if (peripheral_reset || !lpc_reset_n_sync[2]) begin
1735 // Reset Wishbone interface / control state machine
1736 lpc_slave_address_reg <= 0;
1737 lpc_slave_firmware_cycle_reg <= 0;
1738 attn_req <= 0;
1739 pending_address <= 0;
1740 pending_data <= 0;
1741 pending_fw_cycle_idsel <= 0;
1742 pending_fw_cycle_msize <= 0;
1743 lpc_fw_dma_cycle_active <= 0;
1744 lpc_fw_dma_cycle_inactive <= 1;
1745 lpc_io_cycle_irq <= 0;
1746 lpc_tpm_cycle_irq <= 0;
1747 lpc_firmware_cycle_irq <= 0;
1748 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE;
1749 end else begin
1750 case (lpc_slave_transfer_state)
1751 LPC_SLAVE_TRANSFER_STATE_IDLE: begin
1752 if (lpc_slave_address_ready_sync[2]) begin
1753 // Determine cycle type
1754 is_firmware_cycle_type = lpc_slave_firmware_cycle && !lpc_slave_tpm_cycle;
1755 is_tpm_cycle_type = !lpc_slave_firmware_cycle && lpc_slave_tpm_cycle;
1756 is_io_cycle_type = !lpc_slave_firmware_cycle && !lpc_slave_tpm_cycle;
1757
1758 // Check if cycle is configured for intercept
1759 cycle_range_intercept_allowed = 0;
1760 if (range_1_enable && ((is_io_cycle_type && range_1_allow_io) || (is_tpm_cycle_type && range_1_allow_tpm))) begin
1761 if ((lpc_slave_address >= range_1_start_address) && (lpc_slave_address <= range_1_end_address)) begin
1762 cycle_range_intercept_allowed = 1;
1763 end
1764 end
1765 if (range_2_enable && ((is_io_cycle_type && range_2_allow_io) || (is_tpm_cycle_type && range_2_allow_tpm))) begin
1766 if ((lpc_slave_address >= range_2_start_address) && (lpc_slave_address <= range_2_end_address)) begin
1767 cycle_range_intercept_allowed = 1;
1768 end
1769 end
1770 if (range_3_enable && ((is_io_cycle_type && range_3_allow_io) || (is_tpm_cycle_type && range_3_allow_tpm))) begin
1771 if ((lpc_slave_address >= range_3_start_address) && (lpc_slave_address <= range_3_end_address)) begin
1772 cycle_range_intercept_allowed = 1;
1773 end
1774 end
1775 if (range_4_enable && ((is_io_cycle_type && range_4_allow_io) || (is_tpm_cycle_type && range_4_allow_tpm))) begin
1776 if ((lpc_slave_address >= range_4_start_address) && (lpc_slave_address <= range_4_end_address)) begin
1777 cycle_range_intercept_allowed = 1;
1778 end
1779 end
1780 if (range_5_enable && ((is_io_cycle_type && range_5_allow_io) || (is_tpm_cycle_type && range_5_allow_tpm))) begin
1781 if ((lpc_slave_address >= range_5_start_address) && (lpc_slave_address <= range_5_end_address)) begin
1782 cycle_range_intercept_allowed = 1;
1783 end
1784 end
1785 if (range_6_enable && ((is_io_cycle_type && range_6_allow_io) || (is_tpm_cycle_type && range_6_allow_tpm))) begin
1786 if ((lpc_slave_address >= range_6_start_address) && (lpc_slave_address <= range_6_end_address)) begin
1787 cycle_range_intercept_allowed = 1;
1788 end
1789 end
1790 if (is_firmware_cycle_type) begin
1791 // Firmware cycles are not range-configurable
1792 cycle_range_intercept_allowed = 1;
1793 end
1794
1795 if (enable_firmware_cycles && is_firmware_cycle_type && cycle_range_intercept_allowed) begin
1796 // Handle firmware cycle here...
1797 cycle_type <= AQUIEL_LPC_CYCLE_TYPE_FIRMWARE;
1798 pending_address <= lpc_slave_address;
1799 cycle_direction <= lpc_slave_cycle_direction;
1800 pending_fw_cycle_idsel <= lpc_slave_fw_idsel;
1801 pending_fw_cycle_msize <= lpc_slave_fw_msize;
1802 if (lpc_slave_cycle_direction) begin
1803 // Write
1804 if (lpc_fw_cycle_dma_write_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == lpc_slave_fw_idsel))) begin
1805 // DMA enabled
1806 lpc_fw_dma_current_buffer_address <= 0;
1807 lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + (lpc_slave_address & lpc_fw_dma_offset_address_mask);
1808 lpc_fw_dma_cycle_active <= 1;
1809 lpc_fw_dma_cycle_inactive <= 0;
1810 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW01;
1811 end else begin
1812 // DMA disabled
1813 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FW01;
1814 end
1815 end else begin
1816 // Read
1817 if (lpc_fw_cycle_dma_read_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == lpc_slave_fw_idsel))) begin
1818 // DMA enabled
1819 lpc_fw_dma_current_buffer_address <= 0;
1820 lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + (lpc_slave_address & lpc_fw_dma_offset_address_mask);
1821 lpc_fw_dma_cycle_active <= 1;
1822 lpc_fw_dma_cycle_inactive <= 0;
1823 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR01;
1824 end else begin
1825 // DMA disabled
1826 attn_req <= 1;
1827 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR01;
1828 end
1829 end
1830 end else if (enable_tpm_cycles && is_tpm_cycle_type && cycle_range_intercept_allowed) begin
1831 // Handle TPM cycle here...
1832 cycle_type <= AQUIEL_LPC_CYCLE_TYPE_TPM;
1833 pending_address <= lpc_slave_address;
1834 cycle_direction <= lpc_slave_cycle_direction;
1835 pending_fw_cycle_idsel <= 0;
1836 pending_fw_cycle_msize <= 0;
1837 if (lpc_slave_cycle_direction) begin
1838 // Write
1839 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW01;
1840 end else begin
1841 // Read
1842 attn_req <= 1;
1843 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR01;
1844 end
1845 end else if (enable_io_cycles && is_io_cycle_type && cycle_range_intercept_allowed) begin
1846 // Handle I/O cycle here...
1847 cycle_type <= AQUIEL_LPC_CYCLE_TYPE_IO;
1848 pending_address <= lpc_slave_address;
1849 cycle_direction <= lpc_slave_cycle_direction;
1850 pending_fw_cycle_idsel <= 0;
1851 pending_fw_cycle_msize <= 0;
1852 if (lpc_slave_cycle_direction) begin
1853 // Write
1854 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW01;
1855 end else begin
1856 // Read
1857 attn_req <= 1;
1858 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR01;
1859 end
1860 end else begin
1861 // Ignore every other cycle type and any known cycle types that the CPU has chosen to ignore
1862 if (lpc_slave_data_ready_sync[2] && !lpc_slave_continue) begin
1863 lpc_slave_continue <= 1;
1864 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR02;
1865 end
1866 if (lpc_slave_data_ready_sync[2] && !lpc_slave_data_ack) begin
1867 lpc_slave_data_ack <= 1;
1868 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW03;
1869 end
1870 if (lpc_slave_exception_sync_2 && !lpc_slave_exception_ack) begin
1871 lpc_slave_exception_ack <= 1;
1872 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_ER01;
1873 end
1874 end
1875
1876 // Latch address and control registers for further use
1877 lpc_slave_address_reg <= lpc_slave_address;
1878 lpc_slave_firmware_cycle_reg <= lpc_slave_firmware_cycle;
1879 end else begin
1880 // Ensure LPC DMA transfer buffer control is released if no LPC cycle is active
1881 lpc_fw_dma_cycle_active <= 0;
1882 lpc_fw_dma_cycle_inactive <= 1;
1883 end
1884 end
1885 LPC_SLAVE_TRANSFER_STATE_IW01: begin
1886 if (lpc_slave_data_ready_sync[2]) begin
1887 // Latch data register for CPU to read
1888 pending_data <= lpc_slave_rx_data;
1889
1890 // Signal CPU attention required
1891 attn_req <= 1;
1892 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW02;
1893 end
1894 end
1895 LPC_SLAVE_TRANSFER_STATE_IW02: begin
1896 if (continue_transfer) begin
1897 // CPU handler complete!
1898 // Deassert attention request and start LPC ACK process
1899 lpc_slave_data_ack <= 1;
1900 attn_req <= 0;
1901 lpc_slave_signal_error <= signal_error;
1902 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW03;
1903 end
1904 end
1905 LPC_SLAVE_TRANSFER_STATE_IW03: begin
1906 if (lpc_slave_data_ready_cont_sync[2]) begin
1907 lpc_slave_data_ack <= 0;
1908 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW04;
1909 end
1910 end
1911 LPC_SLAVE_TRANSFER_STATE_IW04: begin
1912 if ((!lpc_slave_address_ready_sync[2]) && (!lpc_slave_data_ready_cont_sync[2])) begin
1913 // Interlocked handshake complete!
1914 // Return to idle
1915 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE;
1916 end
1917 end
1918 LPC_SLAVE_TRANSFER_STATE_IR01: begin
1919 if (continue_transfer) begin
1920 // CPU handler complete!
1921 // Deassert attention request and start LPC response process
1922 if (signal_error) begin
1923 lpc_slave_tx_data <= 8'hff;
1924 end else begin
1925 lpc_slave_tx_data <= data_out;
1926 end
1927 lpc_slave_continue <= 1;
1928 attn_req <= 0;
1929 lpc_slave_signal_error <= signal_error;
1930 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR02;
1931 end
1932 end
1933 LPC_SLAVE_TRANSFER_STATE_IR02: begin
1934 if (lpc_slave_continue_cont_sync[2]) begin
1935 lpc_slave_continue <= 0;
1936 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IR03;
1937 end
1938 end
1939 LPC_SLAVE_TRANSFER_STATE_IR03: begin
1940 if (!lpc_slave_address_ready_sync[2]) begin
1941 // Interlocked handshake complete!
1942 // Return to idle
1943 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE;
1944 end
1945 end
1946 LPC_SLAVE_TRANSFER_STATE_DW01: begin
1947 if (lpc_slave_data_ready_sync[2]) begin
1948 // Set up first byte read
1949 lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 0;
1950
1951 // Data ready, fire off DMA engine
1952 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW02;
1953 end
1954 end
1955 LPC_SLAVE_TRANSFER_STATE_DW02: begin
1956 if (((pending_address & lpc_fw_dma_offset_address_mask) >= lpc_fw_dma_valid_window_start_offset) && ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_valid_window_end_offset)) begin
1957 if ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_length) begin
1958 // DMA request is valid, start transfer
1959 // Set up second byte read
1960 lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 1;
1961
1962 // Continue processing
1963 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW03;
1964 end else begin
1965 // Invalid DMA requested, fall back to CPU processing
1966 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01;
1967
1968 // Release transfer RAM control signals
1969 lpc_fw_dma_cycle_active <= 0;
1970 end
1971 end else begin
1972 // Invalid DMA requested, fall back to CPU processing
1973 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01;
1974
1975 // Release transfer RAM control signals
1976 lpc_fw_dma_cycle_active <= 0;
1977 end
1978
1979 lpc_fw_input_xfer_dma_write_wren <= 0;
1980 end
1981 LPC_SLAVE_TRANSFER_STATE_DW03: begin
1982 // Read first byte
1983 master_wishbone_dat_w_reg[63:56] <= lpc_fw_output_xfer_read_data;
1984
1985 // Set up third byte read
1986 lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 2;
1987
1988 // Continue processing
1989 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW04;
1990 end
1991 LPC_SLAVE_TRANSFER_STATE_DW04: begin
1992 // Read second byte
1993 master_wishbone_dat_w_reg[55:48] <= lpc_fw_output_xfer_read_data;
1994
1995 // Set up fourth byte read
1996 lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 3;
1997
1998 // Continue processing
1999 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW05;
2000 end
2001 LPC_SLAVE_TRANSFER_STATE_DW05: begin
2002 // Read third byte
2003 master_wishbone_dat_w_reg[47:40] <= lpc_fw_output_xfer_read_data;
2004
2005 // Set up fifth byte read
2006 lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 4;
2007
2008 // Continue processing
2009 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW06;
2010 end
2011 LPC_SLAVE_TRANSFER_STATE_DW06: begin
2012 // Read fourth byte
2013 master_wishbone_dat_w_reg[39:32] <= lpc_fw_output_xfer_read_data;
2014
2015 // Set up sixth byte read
2016 lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 5;
2017
2018 // Continue processing
2019 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW07;
2020 end
2021 LPC_SLAVE_TRANSFER_STATE_DW07: begin
2022 // Read fifth byte
2023 master_wishbone_dat_w_reg[31:24] <= lpc_fw_output_xfer_read_data;
2024
2025 // Set up seventh byte read
2026 lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 6;
2027
2028 // Continue processing
2029 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW08;
2030 end
2031 LPC_SLAVE_TRANSFER_STATE_DW08: begin
2032 // Read sixth byte
2033 master_wishbone_dat_w_reg[23:16] <= lpc_fw_output_xfer_read_data;
2034
2035 // Set up eigth byte read
2036 lpc_fw_output_xfer_dma_read_addr <= lpc_fw_dma_current_buffer_address[8:0] + 7;
2037
2038 // Continue processing
2039 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW09;
2040 end
2041 LPC_SLAVE_TRANSFER_STATE_DW09: begin
2042 // Read seventh byte
2043 master_wishbone_dat_w_reg[15:8] <= lpc_fw_output_xfer_read_data;
2044
2045 // Continue processing
2046 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW10;
2047 end
2048 LPC_SLAVE_TRANSFER_STATE_DW10: begin
2049 if (master_wishbone_ack) begin
2050 // Internal fault, fall back to CPU processing
2051 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01;
2052
2053 // Release transfer RAM control signals
2054 lpc_fw_dma_cycle_active <= 0;
2055 end else begin
2056 if (lpc_fw_cycle_dma_write_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == pending_fw_cycle_idsel))) begin
2057 // Read eigth byte
2058 master_wishbone_dat_w_reg[7:0] <= lpc_fw_output_xfer_read_data;
2059
2060 // Start Wishbone transfer
2061 master_wishbone_adr_reg <= lpc_fw_dma_current_wb_address;
2062 master_wishbone_we_reg <= 1;
2063 master_wishbone_sel_reg <= 8'b11111111;
2064 master_wishbone_cyc_reg <= 1;
2065 master_wishbone_stb_reg <= 1;
2066 end else begin
2067 // Internal fault, fall back to CPU processing
2068 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01;
2069
2070 // Release transfer RAM control signals
2071 lpc_fw_dma_cycle_active <= 0;
2072 end
2073 end
2074
2075 // Wait for Wishbone response
2076 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW11;
2077 end
2078 LPC_SLAVE_TRANSFER_STATE_DW11: begin
2079 if (master_wishbone_err) begin
2080 // Release bus
2081 master_wishbone_cyc_reg <= 0;
2082 master_wishbone_stb_reg <= 0;
2083
2084 // DMA failed, fall back to CPU processing
2085 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01;
2086
2087 // Release transfer RAM control signals
2088 lpc_fw_dma_cycle_active <= 0;
2089 end else if (master_wishbone_ack) begin
2090 // Release bus
2091 master_wishbone_cyc_reg <= 0;
2092 master_wishbone_stb_reg <= 0;
2093
2094 if (((pending_fw_cycle_msize == 4'b0100) && (lpc_fw_dma_current_buffer_address < (16 - 4)))
2095 || ((pending_fw_cycle_msize == 4'b0111) && (lpc_fw_dma_current_buffer_address < (128 - 4)))) begin
2096 // Set up next transfer
2097 lpc_fw_dma_current_buffer_address <= lpc_fw_dma_current_buffer_address + 8;
2098 pending_address <= pending_address + 8;
2099 lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + ((pending_address + 8) & lpc_fw_dma_offset_address_mask);
2100 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DW02;
2101 end else begin
2102 // Release transfer RAM control signals
2103 lpc_fw_dma_cycle_active <= 0;
2104
2105 // Start LPC response process
2106 lpc_slave_data_ack <= 1;
2107 lpc_slave_signal_error <= 0;
2108 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW03;
2109 end
2110 end
2111 end
2112 LPC_SLAVE_TRANSFER_STATE_FW01: begin
2113 if (lpc_slave_data_ready_sync[2]) begin
2114 // Signal CPU attention required
2115 attn_req <= 1;
2116 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW02;
2117 end
2118 end
2119 LPC_SLAVE_TRANSFER_STATE_DR01: begin
2120 if (((pending_address & lpc_fw_dma_offset_address_mask) >= lpc_fw_dma_valid_window_start_offset) && ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_valid_window_end_offset)) begin
2121 if ((pending_address & lpc_fw_dma_offset_address_mask) < lpc_fw_dma_length) begin
2122 if (master_wishbone_ack) begin
2123 // Internal fault, fall back to CPU processing
2124 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01;
2125
2126 // Release transfer RAM control signals
2127 lpc_fw_dma_cycle_active <= 0;
2128 end else begin
2129 // DMA request is valid, start transfer
2130 master_wishbone_adr_reg <= lpc_fw_dma_current_wb_address;
2131 master_wishbone_we_reg <= 0;
2132 master_wishbone_sel_reg <= 8'b11111111;
2133 master_wishbone_cyc_reg <= 1;
2134 master_wishbone_stb_reg <= 1;
2135
2136 // Wait for Wishbone response
2137 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR02;
2138 end
2139 end else begin
2140 // Invalid DMA requested, fall back to CPU processing
2141 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01;
2142
2143 // Release transfer RAM control signals
2144 lpc_fw_dma_cycle_active <= 0;
2145 end
2146 end else begin
2147 // Invalid DMA requested, fall back to CPU processing
2148 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01;
2149
2150 // Release transfer RAM control signals
2151 lpc_fw_dma_cycle_active <= 0;
2152 end
2153
2154 lpc_fw_input_xfer_dma_write_wren <= 0;
2155 end
2156 LPC_SLAVE_TRANSFER_STATE_DR02: begin
2157 if (master_wishbone_err) begin
2158 // Release bus
2159 master_wishbone_cyc_reg <= 0;
2160 master_wishbone_stb_reg <= 0;
2161
2162 // DMA failed, fall back to CPU processing
2163 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01;
2164
2165 // Release transfer RAM control signals
2166 lpc_fw_dma_cycle_active <= 0;
2167 end else if (master_wishbone_ack) begin
2168 // Release bus
2169 master_wishbone_cyc_reg <= 0;
2170 master_wishbone_stb_reg <= 0;
2171
2172 // Cache read data
2173 lpc_fw_dma_data_cache_reg <= master_wishbone_dat_r;
2174
2175 // Write first byte
2176 lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 0;
2177 lpc_fw_input_xfer_dma_write_data <= master_wishbone_dat_r[63:56];
2178 lpc_fw_input_xfer_dma_write_wren <= 1;
2179
2180 // Continue processing
2181 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR03;
2182 end
2183 end
2184 LPC_SLAVE_TRANSFER_STATE_DR03: begin
2185 // Write second byte
2186 lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 1;
2187 lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[55:48];
2188 lpc_fw_input_xfer_dma_write_wren <= 1;
2189 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR04;
2190 end
2191 LPC_SLAVE_TRANSFER_STATE_DR04: begin
2192 // Write third byte
2193 lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 2;
2194 lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[47:40];
2195 lpc_fw_input_xfer_dma_write_wren <= 1;
2196 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR05;
2197 end
2198 LPC_SLAVE_TRANSFER_STATE_DR05: begin
2199 // Write third byte
2200 lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 3;
2201 lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[39:32];
2202 lpc_fw_input_xfer_dma_write_wren <= 1;
2203 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR06;
2204 end
2205 LPC_SLAVE_TRANSFER_STATE_DR06: begin
2206 // Write third byte
2207 lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 4;
2208 lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[31:24];
2209 lpc_fw_input_xfer_dma_write_wren <= 1;
2210 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR07;
2211 end
2212 LPC_SLAVE_TRANSFER_STATE_DR07: begin
2213 // Write third byte
2214 lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 5;
2215 lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[23:16];
2216 lpc_fw_input_xfer_dma_write_wren <= 1;
2217 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR08;
2218 end
2219 LPC_SLAVE_TRANSFER_STATE_DR08: begin
2220 // Write third byte
2221 lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 6;
2222 lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[15:8];
2223 lpc_fw_input_xfer_dma_write_wren <= 1;
2224 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR09;
2225 end
2226 LPC_SLAVE_TRANSFER_STATE_DR09: begin
2227 // Write fourth byte
2228 lpc_fw_input_xfer_dma_write_addr <= lpc_fw_dma_current_buffer_address[8:0] + 7;
2229 lpc_fw_input_xfer_dma_write_data <= lpc_fw_dma_data_cache_reg[7:0];
2230 lpc_fw_input_xfer_dma_write_wren <= 1;
2231
2232 if (lpc_fw_cycle_dma_read_enable && (!lpc_fw_cycle_dma_idsel_filter_enable || (lpc_fw_cycle_dma_idsel_filter == pending_fw_cycle_idsel))) begin
2233 if (((pending_fw_cycle_msize == 4'b0100) && (lpc_fw_dma_current_buffer_address < (16 - 4)))
2234 || ((pending_fw_cycle_msize == 4'b0111) && (lpc_fw_dma_current_buffer_address < (128 - 4)))) begin
2235 // Set up next transfer
2236 lpc_fw_dma_current_buffer_address <= lpc_fw_dma_current_buffer_address + 8;
2237 pending_address <= pending_address + 8;
2238 lpc_fw_dma_current_wb_address <= lpc_fw_dma_base_address + ((pending_address + 8) & lpc_fw_dma_offset_address_mask);
2239 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR01;
2240 end else begin
2241 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DR10;
2242 end
2243 end else begin
2244 // DMA failed, fall back to CPU processing
2245 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_DF01;
2246
2247 // Release transfer RAM control signals
2248 lpc_fw_dma_cycle_active <= 0;
2249 end
2250 end
2251 LPC_SLAVE_TRANSFER_STATE_DR10: begin
2252 // Release transfer RAM control signals
2253 lpc_fw_dma_cycle_active <= 0;
2254
2255 // Start LPC response process
2256 lpc_slave_continue <= 1;
2257 lpc_slave_signal_error <= 0;
2258 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR02;
2259 end
2260 LPC_SLAVE_TRANSFER_STATE_DF01: begin
2261 // If DMA was active, allow one cycle for RAM control signals to reload from override status
2262 lpc_fw_dma_cycle_inactive <= 1;
2263
2264 // DMA transfer failed, fall back to CPU processing
2265 attn_req <= 1;
2266 if (cycle_direction) begin
2267 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IW02;
2268 end else begin
2269 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR01;
2270 end
2271 end
2272 LPC_SLAVE_TRANSFER_STATE_FR01: begin
2273 if (continue_transfer) begin
2274 // CPU handler complete!
2275 // Deassert attention request and start LPC response process
2276 lpc_slave_continue <= 1;
2277 attn_req <= 0;
2278 lpc_slave_signal_error <= signal_error;
2279 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR02;
2280 end
2281 end
2282 LPC_SLAVE_TRANSFER_STATE_FR02: begin
2283 // Allow one cycle for RAM control signals to reload from override status
2284 // This is safe to set here regardless of if the previous cycle was actually the DMA engine
2285 lpc_fw_dma_cycle_inactive <= 1;
2286
2287 if (lpc_slave_continue_cont_sync[2]) begin
2288 lpc_slave_continue <= 0;
2289 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_FR03;
2290 end
2291 end
2292 LPC_SLAVE_TRANSFER_STATE_FR03: begin
2293 if (!lpc_slave_address_ready_sync[2]) begin
2294 // Interlocked handshake complete!
2295 // Return to idle
2296 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE;
2297 end
2298 end
2299 LPC_SLAVE_TRANSFER_STATE_ER01: begin
2300 if (!lpc_slave_exception_sync_2) begin
2301 lpc_slave_exception_ack <= 0;
2302
2303 // Interlocked handshake complete!
2304 // Return to idle
2305 lpc_slave_transfer_state <= LPC_SLAVE_TRANSFER_STATE_IDLE;
2306 end
2307 end
2308 endcase
2309
2310 if (attn_req) begin
2311 case (cycle_type)
2312 AQUIEL_LPC_CYCLE_TYPE_IO: begin
2313 if (lpc_io_cycle_irq_enable) begin
2314 lpc_io_cycle_irq <= 1;
2315 end else begin
2316 lpc_io_cycle_irq <= 0;
2317 end
2318 lpc_tpm_cycle_irq <= 0;
2319 lpc_firmware_cycle_irq <= 0;
2320 end
2321 AQUIEL_LPC_CYCLE_TYPE_TPM: begin
2322 lpc_io_cycle_irq <= 0;
2323 if (lpc_tpm_cycle_irq_enable) begin
2324 lpc_tpm_cycle_irq <= 1;
2325 end else begin
2326 lpc_tpm_cycle_irq <= 0;
2327 end
2328 lpc_firmware_cycle_irq <= 0;
2329 end
2330 AQUIEL_LPC_CYCLE_TYPE_FIRMWARE: begin
2331 lpc_io_cycle_irq <= 0;
2332 lpc_tpm_cycle_irq <= 0;
2333 if (lpc_firmware_cycle_irq_enable) begin
2334 lpc_firmware_cycle_irq <= 1;
2335 end else begin
2336 lpc_firmware_cycle_irq <= 0;
2337 end
2338 end
2339 default: begin
2340 lpc_io_cycle_irq <= 0;
2341 lpc_tpm_cycle_irq <= 0;
2342 lpc_firmware_cycle_irq <= 0;
2343 end
2344 endcase
2345 end else begin
2346 lpc_io_cycle_irq <= 0;
2347 lpc_tpm_cycle_irq <= 0;
2348 lpc_firmware_cycle_irq <= 0;
2349 end
2350 end
2351
2352 // Synchronizer logic for LPC core to Wishbone traffic
2353 // Three flip flops used for maximum MTBF on control lines
2354 // All data paths are synhronized from these signals using req/ack handshaking mechanisms
2355 lpc_slave_address_ready_sync[2] <= lpc_slave_address_ready_sync[1];
2356 lpc_slave_address_ready_sync[1] <= lpc_slave_address_ready_sync[0];
2357 lpc_slave_address_ready_sync[0] <= lpc_slave_address_ready;
2358 lpc_slave_data_ready_sync[2] <= lpc_slave_data_ready_sync[1];
2359 lpc_slave_data_ready_sync[1] <= lpc_slave_data_ready_sync[0];
2360 lpc_slave_data_ready_sync[0] <= lpc_slave_data_ready;
2361 lpc_slave_exception_sync_2 <= lpc_slave_exception_sync_1;
2362 lpc_slave_exception_sync_1 <= lpc_slave_exception_sync_0;
2363 lpc_slave_exception_sync_0 <= lpc_slave_exception;
2364 lpc_slave_data_ready_cont_sync[2] <= lpc_slave_data_ready_cont_sync[1];
2365 lpc_slave_data_ready_cont_sync[1] <= lpc_slave_data_ready_cont_sync[0];
2366 lpc_slave_data_ready_cont_sync[0] <= lpc_slave_data_ready_cont;
2367 lpc_slave_continue_cont_sync[2] <= lpc_slave_continue_cont_sync[1];
2368 lpc_slave_continue_cont_sync[1] <= lpc_slave_continue_cont;
2369 lpc_reset_n_sync[2] <= lpc_reset_n_sync[1];
2370 lpc_reset_n_sync[1] <= lpc_reset_n_sync[0];
2371 lpc_reset_n_sync[0] <= lpc_reset_n;
2372
2373 vuart1_h2b_fifo_reset_sync[2] <= vuart1_h2b_fifo_reset_sync[1];
2374 vuart1_h2b_fifo_reset_sync[1] <= vuart1_h2b_fifo_reset_sync[0];
2375 vuart1_h2b_fifo_reset_sync[0] <= vuart1_h2b_fifo_reset;
2376 vuart2_h2b_fifo_reset_sync[2] <= vuart2_h2b_fifo_reset_sync[1];
2377 vuart2_h2b_fifo_reset_sync[1] <= vuart2_h2b_fifo_reset_sync[0];
2378 vuart2_h2b_fifo_reset_sync[0] <= vuart2_h2b_fifo_reset;
2379 vuart1_b2h_fifo_wfull_sync[2] <= vuart1_b2h_fifo_wfull_sync[1];
2380 vuart1_b2h_fifo_wfull_sync[1] <= vuart1_b2h_fifo_wfull_sync[0];
2381 vuart1_b2h_fifo_wfull_sync[0] <= vuart1_b2h_fifo_wfull;
2382 vuart1_b2h_fifo_reset_sync[2] <= vuart1_b2h_fifo_reset_sync[1];
2383 vuart1_b2h_fifo_reset_sync[1] <= vuart1_b2h_fifo_reset_sync[0];
2384 vuart1_b2h_fifo_reset_sync[0] <= vuart1_b2h_fifo_reset;
2385 vuart2_b2h_fifo_wfull_sync[2] <= vuart2_b2h_fifo_wfull_sync[1];
2386 vuart2_b2h_fifo_wfull_sync[1] <= vuart2_b2h_fifo_wfull_sync[0];
2387 vuart2_b2h_fifo_wfull_sync[0] <= vuart2_b2h_fifo_wfull;
2388 vuart2_b2h_fifo_reset_sync[2] <= vuart2_b2h_fifo_reset_sync[1];
2389 vuart2_b2h_fifo_reset_sync[1] <= vuart2_b2h_fifo_reset_sync[0];
2390 vuart2_b2h_fifo_reset_sync[0] <= vuart2_b2h_fifo_reset;
2391 vuart1_status_register_sync_2 <= vuart1_status_register_sync_1;
2392 vuart1_status_register_sync_1 <= vuart1_status_register_sync_0;
2393 vuart1_status_register_sync_0 <= vuart1_status_register;
2394 vuart2_status_register_sync_2 <= vuart2_status_register_sync_1;
2395 vuart2_status_register_sync_1 <= vuart2_status_register_sync_0;
2396 vuart2_status_register_sync_0 <= vuart2_status_register;
2397 vuart1_assert_b2h_break_clear_sync[2] <= vuart1_assert_b2h_break_clear_sync[1];
2398 vuart1_assert_b2h_break_clear_sync[1] <= vuart1_assert_b2h_break_clear_sync[0];
2399 vuart1_assert_b2h_break_clear_sync[0] <= vuart1_assert_b2h_break_clear;
2400 vuart2_assert_b2h_break_clear_sync[2] <= vuart2_assert_b2h_break_clear_sync[1];
2401 vuart2_assert_b2h_break_clear_sync[1] <= vuart2_assert_b2h_break_clear_sync[0];
2402 vuart2_assert_b2h_break_clear_sync[0] <= vuart2_assert_b2h_break_clear;
2403
2404 ipmi_bt_bmc_to_host_ctl_sms_ack_sync[2] <= ipmi_bt_bmc_to_host_ctl_sms_ack_sync[1];
2405 ipmi_bt_bmc_to_host_ctl_sms_ack_sync[1] <= ipmi_bt_bmc_to_host_ctl_sms_ack_sync[0];
2406 ipmi_bt_bmc_to_host_ctl_sms_ack_sync[0] <= ipmi_bt_bmc_to_host_ctl_sms_ack;
2407 ipmi_bt_bmc_to_host_ctl_attn_ack_sync[2] <= ipmi_bt_bmc_to_host_ctl_attn_ack_sync[1];
2408 ipmi_bt_bmc_to_host_ctl_attn_ack_sync[1] <= ipmi_bt_bmc_to_host_ctl_attn_ack_sync[0];
2409 ipmi_bt_bmc_to_host_ctl_attn_ack_sync[0] <= ipmi_bt_bmc_to_host_ctl_attn_ack;
2410 ipmi_bt_host_to_bmc_ctl_attn_req_sync[2] <= ipmi_bt_host_to_bmc_ctl_attn_req_sync[1];
2411 ipmi_bt_host_to_bmc_ctl_attn_req_sync[1] <= ipmi_bt_host_to_bmc_ctl_attn_req_sync[0];
2412 ipmi_bt_host_to_bmc_ctl_attn_req_sync[0] <= ipmi_bt_host_to_bmc_ctl_attn_req;
2413 ipmi_bt_host_to_bmc_ctl_oem0_req_sync[2] <= ipmi_bt_host_to_bmc_ctl_oem0_req_sync[1];
2414 ipmi_bt_host_to_bmc_ctl_oem0_req_sync[1] <= ipmi_bt_host_to_bmc_ctl_oem0_req_sync[0];
2415 ipmi_bt_host_to_bmc_ctl_oem0_req_sync[0] <= ipmi_bt_host_to_bmc_ctl_oem0_req;
2416 ipmi_bt_irq_ack_sync[2] <= ipmi_bt_irq_ack_sync[1];
2417 ipmi_bt_irq_ack_sync[1] <= ipmi_bt_irq_ack_sync[0];
2418 ipmi_bt_irq_ack_sync[0] <= ipmi_bt_irq_ack;
2419 ipmi_bt_irq_bmc_reset_sync[2] <= ipmi_bt_irq_bmc_reset_sync[1];
2420 ipmi_bt_irq_bmc_reset_sync[1] <= ipmi_bt_irq_bmc_reset_sync[0];
2421 ipmi_bt_irq_bmc_reset_sync[0] <= ipmi_bt_irq_bmc_reset;
2422 ipmi_bt_host_to_bmc_ctl_h_busy_sync[2] <= ipmi_bt_host_to_bmc_ctl_h_busy_sync[1];
2423 ipmi_bt_host_to_bmc_ctl_h_busy_sync[1] <= ipmi_bt_host_to_bmc_ctl_h_busy_sync[0];
2424 ipmi_bt_host_to_bmc_ctl_h_busy_sync[0] <= ipmi_bt_host_to_bmc_ctl_h_busy;
2425 ipmi_bt_irq_enable_sync[2] <= ipmi_bt_irq_enable_sync[1];
2426 ipmi_bt_irq_enable_sync[1] <= ipmi_bt_irq_enable_sync[0];
2427 ipmi_bt_irq_enable_sync[0] <= ipmi_bt_irq_enable;
2428 end
2429
2430 // Synchronizer logic for Wishbone to LPC core traffic
2431 always @(posedge lpc_clock) begin
2432 // Two flip flops used on the return path
2433 lpc_slave_continue_sync[1] <= lpc_slave_continue_sync[0];
2434 lpc_slave_continue_sync[0] <= lpc_slave_continue;
2435 lpc_slave_data_ack_sync[1] <= lpc_slave_data_ack_sync[0];
2436 lpc_slave_data_ack_sync[0] <= lpc_slave_data_ack;
2437 lpc_slave_signal_error_sync[1] <= lpc_slave_signal_error_sync[0];
2438 lpc_slave_signal_error_sync[0] <= lpc_slave_signal_error;
2439 lpc_slave_exception_ack_sync[1] <= lpc_slave_exception_ack_sync[0];
2440 lpc_slave_exception_ack_sync[0] <= lpc_slave_exception_ack;
2441 irq_tx_ready_sync[1] <= irq_tx_ready_sync[0];
2442 irq_tx_ready_sync[0] <= irq_tx_ready;
2443 irq_request_sync_1 <= irq_request_sync_0;
2444 irq_request_sync_0 <= irq_request;
2445 peripheral_reset_sync[1] <= peripheral_reset_sync[0];
2446 peripheral_reset_sync[0] <= peripheral_reset;
2447
2448 vuart1_h2b_fifo_rempty_sync[1] <= vuart1_h2b_fifo_rempty_sync[0];
2449 vuart1_h2b_fifo_rempty_sync[0] <= vuart1_h2b_fifo_rempty;
2450 vuart2_h2b_fifo_rempty_sync[1] <= vuart2_h2b_fifo_rempty_sync[0];
2451 vuart2_h2b_fifo_rempty_sync[0] <= vuart2_h2b_fifo_rempty;
2452 vuart1_control_register_sync_1 <= vuart1_control_register_sync_0;
2453 vuart1_control_register_sync_0 <= vuart1_control_register;
2454 vuart2_control_register_sync_1 <= vuart2_control_register_sync_0;
2455 vuart2_control_register_sync_0 <= vuart2_control_register;
2456
2457 ipmi_bt_bmc_to_host_ctl_sms_req_sync[1] <= ipmi_bt_bmc_to_host_ctl_sms_req_sync[0];
2458 ipmi_bt_bmc_to_host_ctl_sms_req_sync[0] <= ipmi_bt_bmc_to_host_ctl_sms_req;
2459 ipmi_bt_bmc_to_host_ctl_attn_req_sync[1] <= ipmi_bt_bmc_to_host_ctl_attn_req_sync[0];
2460 ipmi_bt_bmc_to_host_ctl_attn_req_sync[0] <= ipmi_bt_bmc_to_host_ctl_attn_req;
2461 ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[1] <= ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[0];
2462 ipmi_bt_bmc_to_host_ctl_sms_ack_cont_sync[0] <= ipmi_bt_bmc_to_host_ctl_sms_ack_cont;
2463 ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[1] <= ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[0];
2464 ipmi_bt_bmc_to_host_ctl_attn_ack_cont_sync[0] <= ipmi_bt_bmc_to_host_ctl_attn_ack_cont;
2465 ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[1] <= ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[0];
2466 ipmi_bt_host_to_bmc_ctl_attn_req_cont_sync[0] <= ipmi_bt_host_to_bmc_ctl_attn_req_cont;
2467 ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[1] <= ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[0];
2468 ipmi_bt_host_to_bmc_ctl_oem0_req_cont_sync[0] <= ipmi_bt_host_to_bmc_ctl_oem0_req_cont;
2469 ipmi_bt_irq_ack_cont_sync[1] <= ipmi_bt_irq_ack_cont_sync[0];
2470 ipmi_bt_irq_ack_cont_sync[0] <= ipmi_bt_irq_ack_cont;
2471 ipmi_bt_irq_bmc_reset_cont_sync[1] <= ipmi_bt_irq_bmc_reset_cont_sync[0];
2472 ipmi_bt_irq_bmc_reset_cont_sync[0] <= ipmi_bt_irq_bmc_reset_cont;
2473 ipmi_bt_bmc_to_host_ctl_b_busy_sync[1] <= ipmi_bt_bmc_to_host_ctl_b_busy_sync[0];
2474 ipmi_bt_bmc_to_host_ctl_b_busy_sync[0] <= ipmi_bt_bmc_to_host_ctl_b_busy;
2475 ipmi_bt_irq_req_sync[1] <= ipmi_bt_irq_req_sync[0];
2476 ipmi_bt_irq_req_sync[0] <= ipmi_bt_irq_req;
2477 end
2478 endmodule