706d6518f7fb9418f4af80d41bd2ca65f21c0510
2 # Copyright 2018 ETH Zurich and University of Bologna.
3 # Copyright and related rights are licensed under the Solderpad Hardware
4 # License, Version 0.51 (the "License"); you may not use this file except in
5 # compliance with the License. You may obtain a copy of the License at
6 # http:#solderpad.org/licenses/SHL-0.51. Unless required by applicable law
7 # or agreed to in writing, software, hardware and materials distributed under
8 # this License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
9 # CONDITIONS OF ANY KIND, either express or implied. See the License for the
10 # specific language governing permissions and limitations under the License.
12 # Author: David Schaffenrath, TU Graz
13 # Author: Florian Zaruba, ETH Zurich
15 # Description: Hardware-PTW
17 /* verilator lint_off WIDTH */
20 see linux kernel source:
22 * "arch/riscv/include/asm/page.h"
23 * "arch/riscv/include/asm/mmu_context.h"
24 * "arch/riscv/Kconfig" (CONFIG_PAGE_OFFSET)
28 from nmigen
import Const
, Signal
, Cat
, Module
, Elaboratable
29 from nmigen
.hdl
.ast
import ArrayProxy
30 from nmigen
.cli
import verilog
, rtlil
35 CONFIG_L1D_SIZE
= 32*1024
36 DCACHE_INDEX_WIDTH
= int(log2(CONFIG_L1D_SIZE
/ DCACHE_SET_ASSOC
))
37 DCACHE_TAG_WIDTH
= 56 - DCACHE_INDEX_WIDTH
44 self
.address_index
= Signal(DCACHE_INDEX_WIDTH
)
45 self
.address_tag
= Signal(DCACHE_TAG_WIDTH
)
46 self
.data_wdata
= Signal(64)
47 self
.data_req
= Signal()
48 self
.data_we
= Signal()
49 self
.data_be
= Signal(8)
50 self
.data_size
= Signal(2)
51 self
.kill_req
= Signal()
52 self
.tag_valid
= Signal()
56 for (o
, i
) in zip(self
.ports(), inp
.ports()):
61 return [self
.address_index
, self
.address_tag
,
62 self
.data_wdata
, self
.data_req
,
63 self
.data_we
, self
.data_be
, self
.data_size
,
64 self
.kill_req
, self
.tag_valid
,
69 self
.data_gnt
= Signal()
70 self
.data_rvalid
= Signal()
71 self
.data_rdata
= Signal(64) # actually in PTE object format
75 for (o
, i
) in zip(self
.ports(), inp
.ports()):
80 return [self
.data_gnt
, self
.data_rvalid
, self
.data_rdata
]
83 class PTE
: #(RecordObject):
95 self
.reserved
= Signal(10)
98 return Cat(*self
.ports())
101 if isinstance(x
, ArrayProxy
):
103 for o
in self
.ports():
104 i
= getattr(x
, o
.name
)
109 return self
.flatten().eq(x
)
112 """ order is critical so that flatten creates LSB to MSB
131 def __init__(self
, asid_width
):
132 self
.valid
= Signal() # valid flag
133 self
.is_2M
= Signal()
134 self
.is_1G
= Signal()
135 self
.is_512G
= Signal()
136 self
.vpn
= Signal(27)
137 self
.asid
= Signal(asid_width
)
141 return Cat(*self
.ports())
144 return self
.flatten().eq(x
.flatten())
147 return [self
.valid
, self
.is_2M
, self
.is_1G
, self
.vpn
, self
.asid
] + \
151 # SV48 defines four levels of page tables
152 LVL1
= Const(0, 2) # defined to 0 so that ptw_lvl default-resets to LVL1
158 class PTW(Elaboratable
):
159 def __init__(self
, asid_width
=8):
160 self
.asid_width
= asid_width
162 self
.flush_i
= Signal() # flush everything, we need to do this because
163 # actually everything we do is speculative at this stage
164 # e.g.: there could be a CSR instruction that changes everything
165 self
.ptw_active_o
= Signal(reset
=1) # active if not IDLE
166 self
.walking_instr_o
= Signal() # set when walking for TLB
167 self
.ptw_error_o
= Signal() # set when an error occurred
168 self
.enable_translation_i
= Signal() # CSRs indicate to enable SV48
169 self
.en_ld_st_translation_i
= Signal() # enable VM translation for ld/st
171 self
.lsu_is_store_i
= Signal() # translation triggered by store
172 # PTW memory interface
173 self
.req_port_i
= DCacheReqO()
174 self
.req_port_o
= DCacheReqI()
176 # to TLBs, update logic
177 self
.itlb_update_o
= TLBUpdate(asid_width
)
178 self
.dtlb_update_o
= TLBUpdate(asid_width
)
180 self
.update_vaddr_o
= Signal(48)
182 self
.asid_i
= Signal(self
.asid_width
)
185 self
.itlb_access_i
= Signal()
186 self
.itlb_hit_i
= Signal()
187 self
.itlb_vaddr_i
= Signal(64)
189 self
.dtlb_access_i
= Signal()
190 self
.dtlb_hit_i
= Signal()
191 self
.dtlb_vaddr_i
= Signal(64)
193 self
.satp_ppn_i
= Signal(44) # ppn from satp
194 self
.mxr_i
= Signal()
195 # Performance counters
196 self
.itlb_miss_o
= Signal()
197 self
.dtlb_miss_o
= Signal()
200 return [self
.ptw_active_o
, self
.walking_instr_o
, self
.ptw_error_o
,
203 self
.enable_translation_i
, self
.en_ld_st_translation_i
,
204 self
.lsu_is_store_i
, self
.req_port_i
, self
.req_port_o
,
207 self
.itlb_access_i
, self
.itlb_hit_i
, self
.itlb_vaddr_i
,
208 self
.dtlb_access_i
, self
.dtlb_hit_i
, self
.dtlb_vaddr_i
,
209 self
.satp_ppn_i
, self
.mxr_i
,
210 self
.itlb_miss_o
, self
.dtlb_miss_o
211 ] + self
.itlb_update_o
.ports() + self
.dtlb_update_o
.ports()
213 def elaborate(self
, platform
):
217 data_rvalid
= Signal()
218 data_rdata
= Signal(64)
220 # NOTE: pte decodes the incoming bit-field (data_rdata). data_rdata
221 # is spec'd in 64-bit binary-format: better to spec as Record?
223 m
.d
.comb
+= pte
.flatten().eq(data_rdata
)
225 # SV48 defines four levels of page tables
226 ptw_lvl
= Signal(2) # default=0=LVL1 on reset (see above)
231 m
.d
.comb
+= [ptw_lvl1
.eq(ptw_lvl
== LVL1
),
232 ptw_lvl2
.eq(ptw_lvl
== LVL2
),
233 ptw_lvl3
.eq(ptw_lvl
== LVL3
),
234 ptw_lvl4
.eq(ptw_lvl
== LVL4
)
237 # is this an instruction page table walk?
238 is_instr_ptw
= Signal()
239 global_mapping
= Signal()
243 tlb_update_asid
= Signal(self
.asid_width
)
244 # register VPN we need to walk, SV48 defines a 48 bit virtual addr
246 # 4 byte aligned physical pointer
247 ptw_pptr
= Signal(56)
249 end
= DCACHE_INDEX_WIDTH
+ DCACHE_TAG_WIDTH
252 self
.update_vaddr_o
.eq(vaddr
),
254 self
.walking_instr_o
.eq(is_instr_ptw
),
255 # directly output the correct physical address
256 self
.req_port_o
.address_index
.eq(ptw_pptr
[0:DCACHE_INDEX_WIDTH
]),
257 self
.req_port_o
.address_tag
.eq(ptw_pptr
[DCACHE_INDEX_WIDTH
:end
]),
258 # we are never going to kill this request
259 self
.req_port_o
.kill_req
.eq(0), # XXX assign comb?
260 # we are never going to write with the HPTW
261 self
.req_port_o
.data_wdata
.eq(Const(0, 64)), # XXX assign comb?
265 self
.itlb_update_o
.vpn
.eq(vaddr
[12:48]),
266 self
.dtlb_update_o
.vpn
.eq(vaddr
[12:48]),
267 # update the correct page table level
268 self
.itlb_update_o
.is_2M
.eq(ptw_lvl3
),
269 self
.itlb_update_o
.is_1G
.eq(ptw_lvl2
),
270 self
.itlb_update_o
.is_512G
.eq(ptw_lvl1
),
271 self
.dtlb_update_o
.is_2M
.eq(ptw_lvl3
),
272 self
.dtlb_update_o
.is_1G
.eq(ptw_lvl2
),
273 self
.dtlb_update_o
.is_512G
.eq(ptw_lvl1
),
275 # output the correct ASID
276 self
.itlb_update_o
.asid
.eq(tlb_update_asid
),
277 self
.dtlb_update_o
.asid
.eq(tlb_update_asid
),
278 # set the global mapping bit
279 self
.itlb_update_o
.content
.eq(pte
),
280 self
.itlb_update_o
.content
.g
.eq(global_mapping
),
281 self
.dtlb_update_o
.content
.eq(pte
),
282 self
.dtlb_update_o
.content
.g
.eq(global_mapping
),
284 self
.req_port_o
.tag_valid
.eq(tag_valid
),
288 # Page table walker #needs update
290 # A virtual address va is translated into a physical address pa as
292 # 1. Let a be sptbr.ppn × PAGESIZE, and let i = LEVELS-1. (For Sv48,
293 # PAGESIZE=2^12 and LEVELS=4.)
294 # 2. Let pte be the value of the PTE at address a+va.vpn[i]×PTESIZE.
295 # (For Sv32, PTESIZE=4.)
296 # 3. If pte.v = 0, or if pte.r = 0 and pte.w = 1, stop and raise an
298 # 4. Otherwise, the PTE is valid. If pte.r = 1 or pte.x = 1, go to
299 # step 5. Otherwise, this PTE is a pointer to the next level of
301 # Let i=i-1. If i < 0, stop and raise an access exception.
302 # Otherwise, let a = pte.ppn × PAGESIZE and go to step 2.
303 # 5. A leaf PTE has been found. Determine if the requested memory
304 # access is allowed by the pte.r, pte.w, and pte.x bits. If not,
305 # stop and raise an access exception. Otherwise, the translation is
306 # successful. Set pte.a to 1, and, if the memory access is a
307 # store, set pte.d to 1.
308 # The translated physical address is given as follows:
309 # - pa.pgoff = va.pgoff.
310 # - If i > 0, then this is a superpage translation and
311 # pa.ppn[i-1:0] = va.vpn[i-1:0].
312 # - pa.ppn[LEVELS-1:i] = pte.ppn[LEVELS-1:i].
313 # 6. If i > 0 and pa.ppn[i − 1 : 0] != 0, this is a misaligned
314 # superpage stop and raise a page-fault exception.
316 m
.d
.sync
+= tag_valid
.eq(0)
318 # default assignments
320 # PTW memory interface
321 self
.req_port_o
.data_req
.eq(0),
322 self
.req_port_o
.data_be
.eq(Const(0xFF, 8)),
323 self
.req_port_o
.data_size
.eq(Const(0b11, 2)),
324 self
.req_port_o
.data_we
.eq(0),
325 self
.ptw_error_o
.eq(0),
326 self
.itlb_update_o
.valid
.eq(0),
327 self
.dtlb_update_o
.valid
.eq(0),
329 self
.itlb_miss_o
.eq(0),
330 self
.dtlb_miss_o
.eq(0),
339 with m
.State("IDLE"):
340 self
.idle(m
, is_instr_ptw
, ptw_lvl
, global_mapping
,
341 ptw_pptr
, vaddr
, tlb_update_asid
)
343 with m
.State("WAIT_GRANT"):
344 self
.grant(m
, tag_valid
, data_rvalid
)
346 with m
.State("PTE_LOOKUP"):
347 # we wait for the valid signal
348 with m
.If(data_rvalid
):
349 self
.lookup(m
, pte
, ptw_lvl
, ptw_lvl1
, ptw_lvl2
, ptw_lvl3
, ptw_lvl4
,
350 data_rvalid
, global_mapping
,
351 is_instr_ptw
, ptw_pptr
)
353 # Propagate error to MMU/LSU
354 with m
.State("PROPAGATE_ERROR"):
356 m
.d
.comb
+= self
.ptw_error_o
.eq(1)
358 # wait for the rvalid before going back to IDLE
359 with m
.State("WAIT_RVALID"):
360 with m
.If(data_rvalid
):
363 m
.d
.sync
+= [data_rdata
.eq(self
.req_port_i
.data_rdata
),
364 data_rvalid
.eq(self
.req_port_i
.data_rvalid
)
369 def set_grant_state(self
, m
):
370 # should we have flushed before we got an rvalid,
371 # wait for it until going back to IDLE
372 with m
.If(self
.flush_i
):
373 with m
.If (self
.req_port_i
.data_gnt
):
374 m
.next
= "WAIT_RVALID"
378 m
.next
= "WAIT_GRANT"
380 def idle(self
, m
, is_instr_ptw
, ptw_lvl
, global_mapping
,
381 ptw_pptr
, vaddr
, tlb_update_asid
):
382 # by default we start with the top-most page table
383 m
.d
.sync
+= [is_instr_ptw
.eq(0),
385 global_mapping
.eq(0),
386 self
.ptw_active_o
.eq(0), # deactive (IDLE)
388 # work out itlb/dtlb miss
389 m
.d
.comb
+= self
.itlb_miss_o
.eq(self
.enable_translation_i
& \
390 self
.itlb_access_i
& \
393 m
.d
.comb
+= self
.dtlb_miss_o
.eq(self
.en_ld_st_translation_i
& \
394 self
.dtlb_access_i
& \
396 # we got an ITLB miss?
397 with m
.If(self
.itlb_miss_o
):
398 pptr
= Cat(Const(0, 3), self
.itlb_vaddr_i
[30:48],
400 m
.d
.sync
+= [ptw_pptr
.eq(pptr
),
402 vaddr
.eq(self
.itlb_vaddr_i
),
403 tlb_update_asid
.eq(self
.asid_i
),
405 self
.set_grant_state(m
)
407 # we got a DTLB miss?
408 with m
.Elif(self
.dtlb_miss_o
):
409 pptr
= Cat(Const(0, 3), self
.dtlb_vaddr_i
[30:48],
411 m
.d
.sync
+= [ptw_pptr
.eq(pptr
),
412 vaddr
.eq(self
.dtlb_vaddr_i
),
413 tlb_update_asid
.eq(self
.asid_i
),
415 self
.set_grant_state(m
)
417 def grant(self
, m
, tag_valid
, data_rvalid
):
418 # we've got a data WAIT_GRANT so tell the
419 # cache that the tag is valid
422 m
.d
.comb
+= self
.req_port_o
.data_req
.eq(1)
423 # wait for the WAIT_GRANT
424 with m
.If(self
.req_port_i
.data_gnt
):
425 # send the tag valid signal one cycle later
426 m
.d
.sync
+= tag_valid
.eq(1)
427 # should we have flushed before we got an rvalid,
428 # wait for it until going back to IDLE
429 with m
.If(self
.flush_i
):
430 with m
.If (~data_rvalid
):
431 m
.next
= "WAIT_RVALID"
435 m
.next
= "PTE_LOOKUP"
437 def lookup(self
, m
, pte
, ptw_lvl
, ptw_lvl1
, ptw_lvl2
, ptw_lvl3
, ptw_lvl4
,
438 data_rvalid
, global_mapping
,
439 is_instr_ptw
, ptw_pptr
):
441 pte_rx
= Signal(reset_less
=True)
442 pte_exe
= Signal(reset_less
=True)
443 pte_inv
= Signal(reset_less
=True)
444 pte_a
= Signal(reset_less
=True)
445 st_wd
= Signal(reset_less
=True)
446 m
.d
.comb
+= [pte_rx
.eq(pte
.r | pte
.x
),
447 pte_exe
.eq(~pte
.x | ~pte
.a
),
448 pte_inv
.eq(~pte
.v |
(~pte
.r
& pte
.w
)),
449 pte_a
.eq(pte
.a
& (pte
.r |
(pte
.x
& self
.mxr_i
))),
450 st_wd
.eq(self
.lsu_is_store_i
& (~pte
.w | ~pte
.d
))]
452 l1err
= Signal(reset_less
=True)
453 l2err
= Signal(reset_less
=True)
454 m
.d
.comb
+= [l3err
.eq((ptw_lvl3
) & pte
.ppn
[0:9] != Const(0,0)),
455 l2err
.eq((ptw_lvl2
) & pte
.ppn
[0:18] != Const(0, 18)),
456 l1err
.eq((ptw_lvl1
) & pte
.ppn
[0:27] != Const(0, 27))]
458 # check if the global mapping bit is set
460 m
.d
.sync
+= global_mapping
.eq(1)
467 # If pte.v = 0, or if pte.r = 0 and pte.w = 1,
468 # stop and raise a page-fault exception.
470 m
.next
= "PROPAGATE_ERROR"
477 # if pte.r = 1 or pte.x = 1 it is a valid PTE
478 with m
.Elif (pte_rx
):
479 # Valid translation found (either 1G, 2M or 4K)
480 with m
.If(is_instr_ptw
):
484 # If page not executable, we can directly raise error.
485 # This doesn't put a useless entry into the TLB.
486 # The same idea applies to the access flag since we let
487 # the access flag be managed by SW.
491 m
.d
.comb
+= self
.itlb_update_o
.valid
.eq(1)
497 # Check if the access flag has been set, otherwise
498 # throw page-fault and let software handle those bits.
499 # If page not readable (there are no write-only pages)
500 # directly raise an error. This doesn't put a useless
501 # entry into the TLB.
503 m
.d
.comb
+= self
.dtlb_update_o
.valid
.eq(1)
505 m
.next
= "PROPAGATE_ERROR"
506 # Request is a store: perform additional checks
507 # If the request was a store and the page not
508 # write-able, raise an error
509 # the same applies if the dirty flag is not set
511 m
.d
.comb
+= self
.dtlb_update_o
.valid
.eq(0)
512 m
.next
= "PROPAGATE_ERROR"
514 # check if the ppn is correctly aligned: Case (6)
515 with m
.If(l1err | l2err | l3err
):
516 m
.next
= "PROPAGATE_ERROR"
517 m
.d
.comb
+= [self
.dtlb_update_o
.valid
.eq(0),
518 self
.itlb_update_o
.valid
.eq(0)]
520 # this is a pointer to the next TLB level
522 # pointer to next level of page table
523 with m
.If (ptw_lvl1
):
524 # we are in the second level now
525 pptr
= Cat(Const(0, 3), self
.dtlb_vaddr_i
[30:39], pte
.ppn
)
526 m
.d
.sync
+= [ptw_pptr
.eq(pptr
),
530 # here we received a pointer to the third level
531 pptr
= Cat(Const(0, 3), self
.dtlb_vaddr_i
[21:30], pte
.ppn
)
532 m
.d
.sync
+= [ptw_pptr
.eq(pptr
),
535 with m
.If(ptw_lvl3
): #guess: shift page levels by one
536 # here we received a pointer to the fourth level
537 # the last one is near the page offset
538 pptr
= Cat(Const(0, 3), self
.dtlb_vaddr_i
[12:21], pte
.ppn
)
539 m
.d
.sync
+= [ptw_pptr
.eq(pptr
),
542 self
.set_grant_state(m
)
544 with m
.If (ptw_lvl4
):
545 # Should already be the last level
546 # page table => Error
547 m
.d
.sync
+= ptw_lvl
.eq(LVL4
)
548 m
.next
= "PROPAGATE_ERROR"
551 if __name__
== '__main__':
553 vl
= rtlil
.convert(ptw
, ports
=ptw
.ports())
554 with
open("test_ptw.il", "w") as f
: