1 # Copyright 2018 ETH Zurich and University of Bologna.
2 # Copyright and related rights are licensed under the Solderpad Hardware
3 # License, Version 0.51 (the "License"); you may not use this file except in
4 # compliance with the License. You may obtain a copy of the License at
5 # http:#solderpad.org/licenses/SHL-0.51. Unless required by applicable law
6 # or agreed to in writing, software, hardware and materials distributed under
7 # this License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
8 # CONDITIONS OF ANY KIND, either express or implied. See the License for the
9 # specific language governing permissions and limitations under the License.
11 # Author: Florian Zaruba, ETH Zurich
13 # Description: Handles cache misses.
19 import std_cache_pkg
::*;
23 class MissReq(RecordObject
):
24 def __init__(self
, name
=None):
25 Record
.__init
__(self
, name
)
27 self
.addr
= Signal(64)
31 self
.wdata
= Signal(64)
36 self
.tag
= Signal(DCACHE_TAG_WIDTH
) # tag array
37 self
.data
= Signal(DCACHE_LINE_WIDTH
) # data array
38 self
.valid
= Signal() # state array
39 self
.dirty
= Signal() # state array
41 # cache line byte enable
44 self
.tag
= Signal(DCACHE_TAG_WIDTH
+7)//8) # byte enable into tag array
45 self
.data
= Signal(DCACHE_LINE_WIDTH
+7)//8) # byte enable data array
46 # bit enable into state array (valid for a pair of dirty/valid bits)
47 self
.vldrty
= Signal(DCACHE_SET_ASSOC
)
58 WB_CACHELINE_FLUSH, # 3
60 WB_CACHELINE_MISS, # 5
73 class MissHandler(Elaboratable
):
75 self
.flush_i
= Signal() # flush request
76 self
.flush_ack_o
= Signal() # acknowledge successful flush
77 self
.miss_o
= Signal()
78 self
.busy_i
= Signal() # dcache is busy with something
81 self
.miss_req_i
= Array(MissReq(name
="missreq") for i
in range(NR_PORTS
))
83 bypass_gnt_o
= Signal(NR_PORTS
)
84 bypass_valid_o
= Signal(NR_PORTS
)
85 self
.bypass_data_o
= Array(Signal(name
="bdata_o", 64) \
86 for i
in range(NR_PORTS
))
89 output ariane_axi
::req_t axi_bypass_o
,
90 input ariane_axi
::resp_t axi_bypass_i
,
92 # Miss handling (~> cacheline refill)
93 miss_gnt_o
= Signal(NR_PORTS
)
94 active_serving_o
= Signal(NR_PORTS
)
96 critical_word_o
= Signal(64)
97 critical_word_valid_o
= Signal()
98 output ariane_axi
::req_t axi_data_o
,
99 input ariane_axi
::resp_t axi_data_i
,
101 self
.mshr_addr_i
= Array(Signal(name
="bdata_o", 56) \
102 for i
in range(NR_PORTS
))
103 mshr_addr_matches_o
= Signal(NR_PORTS
)
104 mshr_index_matches_o
= Signal(NR_PORTS
)
108 amo_resp_o
= AMOResp()
109 # Port to SRAMs, for refill and eviction
110 req_o
= Signal(DCACHE_SET_ASSOC
)
111 addr_o
= Signal(DCACHE_INDEX_WIDTH
) # address into cache array
114 self
.data_i
= Array(CacheLine() \
115 for i
in range(DCACHE_SET_ASSOC
))
118 def elaborate(self
, platform
):
120 mshr_t mshr_d
, mshr_q
;
121 logic
[DCACHE_INDEX_WIDTH
-1:0] cnt_d
, cnt_q
;
122 logic
[DCACHE_SET_ASSOC
-1:0] evict_way_d
, evict_way_q
;
123 # cache line to evict
124 cache_line_t evict_cl_d
, evict_cl_q
;
126 logic serve_amo_d
, serve_amo_q
;
127 # Request from one FSM
128 logic
[NR_PORTS
-1:0] miss_req_valid
;
129 logic
[NR_PORTS
-1:0] miss_req_bypass
;
130 logic
[NR_PORTS
-1:0][63:0] miss_req_addr
;
131 logic
[NR_PORTS
-1:0][63:0] miss_req_wdata
;
132 logic
[NR_PORTS
-1:0] miss_req_we
;
133 logic
[NR_PORTS
-1:0][7:0] miss_req_be
;
134 logic
[NR_PORTS
-1:0][1:0] miss_req_size
;
136 # Cache Line Refill <-> AXI
137 logic req_fsm_miss_valid
;
138 logic
[63:0] req_fsm_miss_addr
;
139 logic
[DCACHE_LINE_WIDTH
-1:0] req_fsm_miss_wdata
;
140 logic req_fsm_miss_we
;
141 logic
[(DCACHE_LINE_WIDTH
/8)-1:0] req_fsm_miss_be
;
142 ariane_axi
::ad_req_t req_fsm_miss_req
;
143 logic
[1:0] req_fsm_miss_size
;
146 logic valid_miss_fsm
;
147 logic
[(DCACHE_LINE_WIDTH
/64)-1:0][63:0] data_miss_fsm
;
149 # Cache Management <-> LFSR
151 logic
[DCACHE_SET_ASSOC
-1:0] lfsr_oh
;
152 logic
[$
clog2(DCACHE_SET_ASSOC
-1)-1:0] lfsr_bin
;
154 ariane_pkg
::amo_t amo_op
;
155 logic
[63:0] amo_operand_a
, amo_operand_b
, amo_result_o
;
158 logic
[63:3] address
;
160 } reservation_d
, reservation_q
;
162 # ------------------------------
164 # ------------------------------
165 evict_way
= Signal(DCACHE_SET_ASSOC
)
166 valid_way
= Signal(DCACHE_SET_ASSOC
)
168 for (i
in range(DCACHE_SET_ASSOC
):
169 comb
+= evict_way
[i
].eq(data_i
[i
].valid
& data_i
[i
].dirty
)
170 comb
+= valid_way
[i
].eq(data_i
[i
].valid
)
172 # ----------------------
173 # Default Assignments
174 # ----------------------
176 req_fsm_miss_req
= ariane_axi
::CACHE_LINE_REQ
;
177 req_fsm_miss_size
= Const(0b11, 2)
179 serve_amo_d
= serve_amo_q
;
180 # --------------------------------
181 # Flush and Miss operation
182 # --------------------------------
185 evict_way_d
= evict_way_q
;
186 evict_cl_d
= evict_cl_q
;
188 # communicate to the requester which unit we are currently serving
189 active_serving_o
[mshr_q
.id] = mshr_q
.valid
;
191 # silence the unit when not used
192 amo_op
= amo_req_i
.amo_op
;
194 reservation_d
= reservation_q
;
195 with m
.FSM() as state_q
:
198 # lowest priority are AMOs, wait until everything else
199 # is served before going for the AMOs
200 with m
.If (amo_req_i
.req
& ~busy_i
):
202 with m
.If(~serve_amo_q
):
203 m
.next
= "FLUSH_REQ_STATUS"
211 # check if we want to flush and can flush
212 # e.g.: we are not busy anymore
213 # TODO: Check that the busy flag is indeed needed
214 with m
.If (flush_i
& ~busy_i
):
215 m
.next
= "FLUSH_REQ_STATUS"
218 # check if one of the state machines missed
219 for i
in range(NR_PORTS
):
220 # here comes the refill portion of code
221 with m
.If (miss_req_valid
[i
] & ~miss_req_bypass
[i
]):
223 # we are taking another request so don't
227 wid
= DCACHE_TAG_WIDTH
+DCACHE_INDEX_WIDTH
228 comb
+= [ mshr_d
.valid
.eq(0b1),
229 mshr_d
.we
.eq(miss_req_we
[i
]),
231 mshr_d
.addr
.eq(miss_req_addr
[i
][0:wid
]),
232 mshr_d
.wdata
.eq(miss_req_wdata
[i
]),
233 mshr_d
.be
.eq(miss_req_be
[i
]),
237 # ~> we missed on the cache
239 # 1. Check if there is an empty cache-line
240 # 2. If not -> evict one
242 sync
+= addr_o
.eq(mshr_q
.addr
[:DCACHE_INDEX_WIDTH
]
246 # ~> second miss cycle
247 with m
.Case("MISS_REPL"):
248 # if all are valid we need to evict one,
249 # pseudo random from LFSR
250 with m
.If(~
(~valid_way
).bool()):
251 comb
+= lfsr_enable
.eq(0b1)
252 comb
+= evict_way_d
.eq(lfsr_oh
)
253 # do we need to write back the cache line?
254 with m
.If(data_i
[lfsr_bin
].dirty
):
255 state_d
= WB_CACHELINE_MISS
;
256 comb
+= evict_cl_d
.tag
.eq(data_i
[lfsr_bin
].tag
)
257 comb
+= evict_cl_d
.data
.eq(data_i
[lfsr_bin
].data
)
258 comb
+= cnt_d
.eq(mshr_q
.addr
[:DCACHE_INDEX_WIDTH
])
259 # no - we can request a cache line now
261 m
.next
= "REQ_CACHELINE"
262 # we have at least one free way
264 # get victim cache-line by looking for the
265 # first non-valid bit
266 comb
+= evict_way_d
.eq(get_victim_cl(~valid_way
)
267 m
.next
= "REQ_CACHELINE"
269 # ~> we can just load the cache-line,
270 # the way is store in evict_way_q
271 with m
.Case("REQ_CACHELINE"):
272 comb
+= req_fsm_miss_valid
.eq(1)
273 sync
+= req_fsm_miss_addr
.eq(mshr_q
.addr
)
275 with m
.If (gnt_miss_fsm
):
276 m
.next
= "SAVE_CACHELINE"
277 comb
+= miss_gnt_o
[mshr_q
.id].eq(1)
279 # ~> replace the cacheline
280 with m
.Case("SAVE_CACHELINE"):
281 # calculate cacheline offset
282 automatic logic
[$
clog2(DCACHE_LINE_WIDTH
)-1:0] cl_offset
;
283 sync
+= cl_offset
.eq(mshr_q
.addr
[3:DCACHE_BYTE_OFFSET
] << 6)
284 # we've got a valid response from refill unit
285 with m
.If (valid_miss_fsm
):
286 wid
= DCACHE_TAG_WIDTH
+DCACHE_INDEX_WIDTH
287 sync
+= addr_o
.eq(mshr_q
.addr
[:DCACHE_INDEX_WIDTH
])
288 sync
+= req_o
.eq(evict_way_q
)
291 sync
+= be_o
.vldrty
.eq(evict_way_q
)
292 sync
+= data_o
.tag
.eq(mshr_q
.addr
[DCACHE_INDEX_WIDTH
:wid
]
293 comb
+= data_o
.data
.eq(data_miss_fsm
)
294 comb
+= data_o
.valid
.eq(1)
295 comb
+= data_o
.dirty
.eq(0)
298 with m
.If (mshr_q
.we
):
299 # Yes, so safe the updated data now
301 # check if we really want to write
302 # the corresponding byte
303 with m
.If (mshr_q
.be
[i
]):
304 sync
+= data_o
.data
[(cl_offset
+ i
*8) +: 8].eq(mshr_q
.wdata
[i
];
305 # it's immediately dirty if we write
306 comb
+= data_o
.dirty
.eq(1)
309 comb
+= mshr_d
.valid
.eq(0)
313 # ------------------------------
314 # Write Back Operation
315 # ------------------------------
316 # ~> evict a cache line from way saved in evict_way_q
317 with m
.Case("WB_CACHELINE_FLUSH"):
318 with m
.Case("WB_CACHELINE_MISS"):
320 comb
+= req_fsm_miss_valid
.eq(0b1)
321 sync
+= req_fsm_miss_addr
.eq({evict_cl_q
.tag
, cnt_q
[DCACHE_INDEX_WIDTH
-1:DCACHE_BYTE_OFFSET
], {{DCACHE_BYTE_OFFSET}{0b0}
}};
322 comb
+= req_fsm_miss_be
.eq(1)
323 comb
+= req_fsm_miss_we
.eq(0b1)
324 sync
+= req_fsm_miss_wdata
.eq(evict_cl_q
.data
;
326 # we've got a grant --> this is timing critical, think about it
327 if (gnt_miss_fsm
) begin
329 sync
+= addr_o
.eq(cnt_q
)
330 comb
+= req_o
.eq(0b1)
331 comb
+= we_o
.eq(0b1)
332 comb
+= data_o
.valid
.eq(INVALIDATE_ON_FLUSH ?
0b0 : 0b1)
334 sync
+= be_o
.vldrty
.eq(evict_way_q
)
335 # go back to handling the miss or flushing,
336 # depending on where we came from
337 with m
.If(state_q
== WB_CACHELINE_MISS
):
340 m
.next
= "FLUSH_REQ_STATUS"
342 # ------------------------------
343 # Flushing & Initialization
344 # ------------------------------
345 # ~> make another request to check the same
346 # cache-line if there are still some valid entries
347 with m
.Case("FLUSH_REQ_STATUS"):
349 sync
+= addr_o
.eq(cnt_q
)
352 with m
.Case("FLUSHING"):
354 # at least one of the cache lines is dirty
355 with m
.If(~evict_way
):
356 # evict cache line, look for the first
357 # cache-line which is dirty
358 comb
+= evict_way_d
.eq(get_victim_cl(evict_way
))
359 comb
+= evict_cl_d
.eq(data_i
[one_hot_to_bin(evict_way
)])
360 state_d
= WB_CACHELINE_FLUSH
;
361 # not dirty ~> increment and continue
363 # increment and re-request
364 sync
+= cnt_d
.eq(cnt_q
+ (1 << DCACHE_BYTE_OFFSET
))
365 m
.next
= "FLUSH_REQ_STATUS"
366 sync
+= addr_o
.eq(cnt_q
)
368 comb
+= be_o
.vldrty
.eq(INVALIDATE_ON_FLUSH ?
1 : 0)
370 # finished with flushing operation, go back to idle
371 with m
.If (cnt_q
[DCACHE_BYTE_OFFSET
:DCACHE_INDEX_WIDTH
] \
372 == DCACHE_NUM_WORDS
-1):
373 # only acknowledge if the flush wasn't
374 # triggered by an atomic
375 sync
+= flush_ack_o
.eq(~serve_amo_q
)
378 # ~> only called after reset
380 # initialize status array
381 sync
+= addr_o
.eq(cnt_q
)
384 # only write the dirty array
385 comb
+= be_o
.vldrty
.eq(1)
386 sync
+= cnt_d
.eq(cnt_q
+ (1 << DCACHE_BYTE_OFFSET
))
387 # finished initialization
388 with m
.If (cnt_q
[DCACHE_BYTE_OFFSET
:DCACHE_INDEX_WIDTH
] \
389 == DCACHE_NUM_WORDS
-1)
392 # ----------------------
394 # ----------------------
395 # TODO(zarubaf) Move this closer to memory
396 # ~> we are here because we need to do the AMO,
397 # the cache is clean at this point
398 # start by executing the load
399 with m
.Case("AMO_LOAD"):
400 comb
+= req_fsm_miss_valid
.eq(1)
401 # address is in operand a
402 comb
+= req_fsm_miss_addr
.eq(amo_req_i
.operand_a
)
403 comb
+= req_fsm_miss_req
.eq(ariane_axi
::SINGLE_REQ
)
404 comb
+= req_fsm_miss_size
.eq(amo_req_i
.size
)
405 # the request has been granted
406 with m
.If(gnt_miss_fsm
):
407 m
.next
= "AMO_SAVE_LOAD"
408 # save the load value
409 with m
.Case("AMO_SAVE_LOAD"):
410 with m
.If (valid_miss_fsm
):
411 # we are only concerned about the lower 64-bit
412 comb
+= mshr_d
.wdata
.eq(data_miss_fsm
[0])
415 with m
.Case("AMO_STORE"):
416 load_data
= Signal(64)
418 comb
+= load_data
.eq(data_align(amo_req_i
.operand_a
[:3],
420 # Sign-extend for word operation
421 with m
.If (amo_req_i
.size
== 0b10):
422 comb
+= amo_operand_a
.eq(sext32(load_data
[:32]))
423 comb
+= amo_operand_b
.eq(sext32(amo_req_i
.operand_b
[:32]))
425 comb
+= amo_operand_a
.eq(load_data
)
426 comb
+= amo_operand_b
.eq(amo_req_i
.operand_b
)
428 # we do not need a store request for load reserved
429 # or a failing store conditional
430 # we can bail-out without making any further requests
431 with m
.If ((amo_req_i
.amo_op
== AMO_LR
) | \
432 ((amo_req_i
.amo_op
== AMO_SC
) & \
433 ((reservation_q
.valid
& \
434 (reservation_q
.address
!= \
435 amo_req_i
.operand_a
[3:64])) | \
436 ~reservation_q
.valid
))):
437 comb
+= req_fsm_miss_valid
.eq(0)
439 comb
+= amo_resp_o
.ack
.eq(1)
440 # write-back the result
441 comb
+= amo_resp_o
.result
.eq(amo_operand_a
)
442 # we know that the SC failed
443 with m
.If (amo_req_i
.amo_op
== AMO_SC
):
444 comb
+= amo_resp_o
.result
.eq(1)
445 # also clear the reservation
446 comb
+= reservation_d
.valid
.eq(0)
448 comb
+= req_fsm_miss_valid
.eq(1)
450 comb
+= req_fsm_miss_we
.eq(1)
451 comb
+= req_fsm_miss_req
.eq(ariane_axi
::SINGLE_REQ
)
452 comb
+= req_fsm_miss_size
.eq(amo_req_i
.size
)
453 comb
+= req_fsm_miss_addr
.eq(amo_req_i
.operand_a
)
455 comb
+= req_fsm_miss_wdata
.eq(
456 data_align(amo_req_i
.operand_a
[0:3], amo_result_o
))
457 comb
+= req_fsm_miss_be
.eq(
458 be_gen(amo_req_i
.operand_a
[0:3], amo_req_i
.size
))
460 # place a reservation on the memory
461 with m
.If (amo_req_i
.amo_op
== AMO_LR
):
462 comb
+= reservation_d
.address
.eq(amo_req_i
.operand_a
[3:64])
463 comb
+= reservation_d
.valid
.eq(1)
465 # the request is valid or we didn't need to go for another store
466 with m
.If (valid_miss_fsm
):
468 comb
+= amo_resp_o
.ack
.eq(1)
469 # write-back the result
470 comb
+= amo_resp_o
.result
.eq(amo_operand_a
;
472 if (amo_req_i
.amo_op
== AMO_SC
) begin
473 comb
+= amo_resp_o
.result
.eq(0)
474 # An SC must fail if there is another SC
475 # (to any address) between the LR and the SC in
476 # program order (even to the same address).
477 # in any case destroy the reservation
478 comb
+= reservation_d
.valid
.eq(0)
480 # check MSHR for aliasing
482 comb
+= mshr_addr_matches_o
.eq(0)
483 comb
+= mshr_index_matches_o
.eq()
485 for i
in range(NR_PORTS
):
486 # check mshr for potential matching of other units,
487 # exclude the unit currently being served
488 with m
.If (mshr_q
.valid
& \
489 (mshr_addr_i
[i
][DCACHE_BYTE_OFFSET
:56] == \
490 mshr_q
.addr
[DCACHE_BYTE_OFFSET
:56])):
491 comb
+= mshr_addr_matches_o
[i
].eq(1)
493 # same as previous, but checking only the index
494 with m
.If (mshr_q
.valid
& \
495 (mshr_addr_i
[i
][DCACHE_BYTE_OFFSET
:DCACHE_INDEX_WIDTH
] == \
496 mshr_q
.addr
[DCACHE_BYTE_OFFSET
:DCACHE_INDEX_WIDTH
])):
497 mshr_index_matches_o
[i
].eq(1)
499 # --------------------
501 # --------------------
504 #pragma translate_off
506 # assert that cache only hits on one way
508 @(posedge clk_i) $onehot0(evict_way_q)) else $warning("Evict-way should be one-hot encoded");
513 # ----------------------
515 # ----------------------
516 # Connection Arbiter <-> AXI
517 logic req_fsm_bypass_valid
;
518 logic
[63:0] req_fsm_bypass_addr
;
519 logic
[63:0] req_fsm_bypass_wdata
;
520 logic req_fsm_bypass_we
;
521 logic
[7:0] req_fsm_bypass_be
;
522 logic
[1:0] req_fsm_bypass_size
;
523 logic gnt_bypass_fsm
;
524 logic valid_bypass_fsm
;
525 logic
[63:0] data_bypass_fsm
;
526 logic
[$
clog2(NR_PORTS
)-1:0] id_fsm_bypass
;
527 logic
[3:0] id_bypass_fsm
;
528 logic
[3:0] gnt_id_bypass_fsm
;
531 .NR_PORTS ( NR_PORTS
),
535 .data_req_i ( miss_req_valid
& miss_req_bypass
),
536 .address_i ( miss_req_addr
),
537 .data_wdata_i ( miss_req_wdata
),
538 .data_we_i ( miss_req_we
),
539 .data_be_i ( miss_req_be
),
540 .data_size_i ( miss_req_size
),
541 .data_gnt_o ( bypass_gnt_o
),
542 .data_rvalid_o ( bypass_valid_o
),
543 .data_rdata_o ( bypass_data_o
),
545 .id_i ( id_bypass_fsm
[$
clog2(NR_PORTS
)-1:0] ),
546 .id_o ( id_fsm_bypass
),
547 .gnt_id_i ( gnt_id_bypass_fsm
[$
clog2(NR_PORTS
)-1:0] ),
548 .address_o ( req_fsm_bypass_addr
),
549 .data_wdata_o ( req_fsm_bypass_wdata
),
550 .data_req_o ( req_fsm_bypass_valid
),
551 .data_we_o ( req_fsm_bypass_we
),
552 .data_be_o ( req_fsm_bypass_be
),
553 .data_size_o ( req_fsm_bypass_size
),
554 .data_gnt_i ( gnt_bypass_fsm
),
555 .data_rvalid_i ( valid_bypass_fsm
),
556 .data_rdata_i ( data_bypass_fsm
),
563 .CACHELINE_BYTE_OFFSET ( DCACHE_BYTE_OFFSET
)
564 ) i_bypass_axi_adapter (
567 .req_i ( req_fsm_bypass_valid
),
568 .type_i ( ariane_axi
::SINGLE_REQ
),
569 .gnt_o ( gnt_bypass_fsm
),
570 .addr_i ( req_fsm_bypass_addr
),
571 .we_i ( req_fsm_bypass_we
),
572 .wdata_i ( req_fsm_bypass_wdata
),
573 .be_i ( req_fsm_bypass_be
),
574 .size_i ( req_fsm_bypass_size
),
575 .id_i ( Cat(id_fsm_bypass
, 0, 0) ),
576 .valid_o ( valid_bypass_fsm
),
577 .rdata_o ( data_bypass_fsm
),
578 .gnt_id_o ( gnt_id_bypass_fsm
),
579 .id_o ( id_bypass_fsm
),
580 .critical_word_o ( ), # not used for single requests
581 .critical_word_valid_o ( ), # not used for single requests
582 .axi_req_o ( axi_bypass_o
),
583 .axi_resp_i ( axi_bypass_i
)
586 # ----------------------
587 # Cache Line AXI Refill
588 # ----------------------
590 .DATA_WIDTH ( DCACHE_LINE_WIDTH
),
592 .CACHELINE_BYTE_OFFSET ( DCACHE_BYTE_OFFSET
)
593 ) i_miss_axi_adapter (
596 .req_i ( req_fsm_miss_valid
),
597 .type_i ( req_fsm_miss_req
),
598 .gnt_o ( gnt_miss_fsm
),
599 .addr_i ( req_fsm_miss_addr
),
600 .we_i ( req_fsm_miss_we
),
601 .wdata_i ( req_fsm_miss_wdata
),
602 .be_i ( req_fsm_miss_be
),
603 .size_i ( req_fsm_miss_size
),
604 .id_i ( Const(0b1100, 4) ),
605 .gnt_id_o ( ), # open
606 .valid_o ( valid_miss_fsm
),
607 .rdata_o ( data_miss_fsm
),
610 .critical_word_valid_o
,
611 .axi_req_o ( axi_data_o
),
612 .axi_resp_i ( axi_data_i
)
618 lfsr_8bit
#(.WIDTH (DCACHE_SET_ASSOC)) i_lfsr (
619 .en_i ( lfsr_enable
),
620 .refill_way_oh ( lfsr_oh
),
621 .refill_way_bin ( lfsr_bin
),
629 .amo_op_i ( amo_op
),
630 .amo_operand_a_i ( amo_operand_a
),
631 .amo_operand_b_i ( amo_operand_b
),
632 .amo_result_o ( amo_result_o
)
639 for i
in range(NR_PORTS
):
641 comb
+= miss_req
.eq(miss_req_i
[i
]);
642 comb
+= miss_req_valid
[i
] .eq(miss_req
.valid
)
643 comb
+= miss_req_bypass
[i
] .eq(miss_req
.bypass
)
644 comb
+= miss_req_addr
[i
] .eq(miss_req
.addr
)
645 comb
+= miss_req_wdata
[i
] .eq(miss_req
.wdata
)
646 comb
+= miss_req_we
[i
] .eq(miss_req
.we
)
647 comb
+= miss_req_be
[i
] .eq(miss_req
.be
)
648 comb
+= miss_req_size
[i
] .eq(miss_req
.size
)
654 # Description: Arbitrates access to AXI refill/bypass
657 def __init__(self
, NR_PORTS
= 3, DATA_WIDTH
= 64):
658 self
.pwid
= pwid
= ceil(log(NR_PORTS
) / log(2))
659 rst_ni
= ResetSignal() # Asynchronous reset active low
661 self
.data_req_i
= Signal(NR_PORTS
)
662 self
.address_i
= Array(Signal(name
="address_i", 64) \
663 for i
in range(NR_PORTS
))
664 self
.data_wdata_i
= Array(Signal(name
="data_wdata_i", 64) \
665 for i
in range(NR_PORTS
))
666 self
.data_we_i
= Signal(NR_PORTS
)
667 self
.data_be_i
= Array(Signal(name
="data_wdata_i", DATA_WIDTH
/8) \
668 for i
in range(NR_PORTS
))
669 self
.data_size_i
= Array(Signal(name
="data_size_i", 2) \
670 for i
in range(NR_PORTS
))
671 self
.data_gnt_o
= Signal(NR_PORTS
)
672 self
.data_rvalid_o
= Signal(NR_PORTS
)
673 self
.data_rdata_o
= Array(Signal(name
="data_rdata_o", 64) \
674 for i
in range(NR_PORTS
))
677 self
.id_i
= Signal(pwid
)
678 self
.id_o
= Signal(pwid
)
679 self
.gnt_id_i
= Signal(pwid
)
680 self
.data_req_o
= Signal()
681 self
.address_o
= Signal(64)
682 self
.data_wdata_o
= Signal(DATA_WIDTH
)
683 self
.data_we_o
= Signal()
684 self
.data_be_o
= Signal(DATA_WIDTH
/8)
685 self
.data_size_o
= Signal(2)
686 self
.data_gnt_i
= Signal()
687 self
.data_rvalid_i
= Signal()
688 self
.data_rdata_i
= Signal(DATA_WIDTH
)
690 def elaborate(self
, platform
):
691 #enum logic [1:0] { IDLE, REQ, SERVING } state_d, state_q;
694 def __init__(self
, pwid
, DATA_WIDTH
):
695 self
.id = Signal(pwid
)
696 self
.address
= Signal(64)
697 self
.data
= Signal(64)
698 self
.size
= Signal(2)
699 self
.be
= Signal(DATA_WIDTH
/8)
702 request_index
= Signal(self
.pwid
)
705 sync
+= address_o
.eq(req_q
.address
)
706 sync
+= data_wdata_o
.eq(req_q
.data
)
707 sync
+= data_be_o
.eq(req_q
.be
)
708 sync
+= data_size_o
.eq(req_q
.size
)
709 sync
+= data_we_o
.eq(req_q
.we
)
710 sync
+= id_o
.eq(req_q
.id)
711 comb
+= data_gnt_o
.eq(0)
713 comb
+= data_rvalid_o
.eq(0)
714 comb
+= data_rdata_o
.eq(0)
715 comb
+= data_rdata_o
[req_q
.id] .eq(data_rdata_i
)
717 with m
.Switch("state") as s
:
720 # wait for incoming requests
721 for (int unsigned i
= 0; i
< NR_PORTS
; i
++) begin
722 if (data_req_i
[i
] == 0b1) begin
723 comb
+= data_req_o
.eq(data_req_i
[i
])
724 comb
+= data_gnt_o
[i
].eq(data_req_i
[i
])
725 comb
+= request_index
.eq(i
[$
bits(request_index
)-1:0])
727 comb
+= req_d
.address
.eq(address_i
[i
])
728 sync
+= req_d
.id.eq(i
[$
bits(req_q
.id)-1:0])
729 comb
+= req_d
.data
.eq(data_wdata_i
[i
])
730 comb
+= req_d
.size
.eq(data_size_i
[i
])
731 comb
+= req_d
.be
.eq(data_be_i
[i
])
732 comb
+= req_d
.we
.eq(data_we_i
[i
])
734 break; # break here as this is a priority select
736 comb
+= address_o
.eq(address_i
[request_index
])
737 comb
+= data_wdata_o
.eq(data_wdata_i
[request_index
])
738 comb
+= data_be_o
.eq(data_be_i
[request_index
])
739 comb
+= data_size_o
.eq(data_size_i
[request_index
])
740 comb
+= data_we_o
.eq(data_we_i
[request_index
])
741 comb
+= id_o
.eq(request_index
)
743 with m
.Case("SERVING"):
744 comb
+= data_req_o
.eq(0b1)
745 with m
.If (data_rvalid_i
:
746 comb
+= data_rvalid_o
[req_q
.id].eq(0b1)
754 #pragma translate_off
756 # make sure that we eventually get an rvalid after we received a grant
757 assert property (@(posedge clk_i) data_gnt_i |-> ##[1:$] data_rvalid_i )
758 else begin $error("There was a grant without a rvalid"); $stop(); end
759 # assert that there is no grant without a request
760 assert property (@(negedge clk_i) data_gnt_i |-> data_req_o)
761 else begin $error("There was a grant without a request."); $stop(); end
762 # assert that the address does not contain X when request is sent
763 assert property ( @(posedge clk_i) (data_req_o) |-> (!$isunknown(address_o)) )
764 else begin $error("address contains X when request is set"); $stop(); end