# -*- mode:python -*-
# Copyright (c) 2007 MIPS Technologies, Inc.
+# Copyright (c) 2020 Barkhausen Institut
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-from m5.SimObject import SimObject
from m5.params import *
+from m5.proxy import *
from m5.objects.BaseTLB import BaseTLB
+from m5.objects.ClockedObject import ClockedObject
+
+class RiscvPagetableWalker(ClockedObject):
+ type = 'RiscvPagetableWalker'
+ cxx_class = 'RiscvISA::Walker'
+ cxx_header = 'arch/riscv/pagetable_walker.hh'
+ port = MasterPort("Port for the hardware table walker")
+ system = Param.System(Parent.any, "system object")
+ num_squash_per_cycle = Param.Unsigned(4,
+ "Number of outstanding walks that can be squashed per cycle")
class RiscvTLB(BaseTLB):
type = 'RiscvTLB'
cxx_class = 'RiscvISA::TLB'
cxx_header = 'arch/riscv/tlb.hh'
size = Param.Int(64, "TLB size")
+ walker = Param.RiscvPagetableWalker(\
+ RiscvPagetableWalker(), "page table walker")
# Copyright (c) 2013 ARM Limited
# Copyright (c) 2014 Sven Karlsson
+# Copyright (c) 2020 Barkhausen Institut
# All rights reserved
#
# The license below extends only to copyright in the software and shall
Source('locked_mem.cc')
Source('process.cc')
Source('pagetable.cc')
+ Source('pagetable_walker.cc')
Source('remote_gdb.cc')
Source('stacktrace.cc')
Source('tlb.cc')
SimObject('RiscvTLB.py')
DebugFlag('RiscvMisc')
- DebugFlag('RiscvTLB')
+ DebugFlag('TLBVerbose')
+ DebugFlag('PageTableWalker', \
+ "Page table walker state machine debugging")
# Add in files generated by the ISA description.
ISADesc('isa/main.isa')
/*
* Copyright (c) 2018 TU Dresden
+ * Copyright (c) 2020 Barkhausen Institut
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
warn_if(!bootloader->buildImage().write(system->physProxy),
"Could not load sections to memory.");
+
+ for (auto *tc: system->threadContexts) {
+ RiscvISA::Reset().invoke(tc);
+ tc->activate();
+ }
}
} // namespace RiscvISA
/*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* Copyright (c) 2007 MIPS Technologies, Inc.
+ * Copyright (c) 2020 Barkhausen Institut
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include "arch/riscv/pagetable.hh"
+#include "arch/riscv/isa_traits.hh"
#include "sim/serialize.hh"
namespace RiscvISA
{
void
-PTE::serialize(CheckpointOut &cp) const
+TlbEntry::serialize(CheckpointOut &cp) const
{
- SERIALIZE_SCALAR(Mask);
- SERIALIZE_SCALAR(VPN);
+ SERIALIZE_SCALAR(paddr);
+ SERIALIZE_SCALAR(vaddr);
+ SERIALIZE_SCALAR(logBytes);
SERIALIZE_SCALAR(asid);
- SERIALIZE_SCALAR(G);
- SERIALIZE_SCALAR(PFN0);
- SERIALIZE_SCALAR(D0);
- SERIALIZE_SCALAR(V0);
- SERIALIZE_SCALAR(C0);
- SERIALIZE_SCALAR(PFN1);
- SERIALIZE_SCALAR(D1);
- SERIALIZE_SCALAR(V1);
- SERIALIZE_SCALAR(C1);
- SERIALIZE_SCALAR(AddrShiftAmount);
- SERIALIZE_SCALAR(OffsetMask);
+ SERIALIZE_SCALAR(pte);
+ SERIALIZE_SCALAR(lruSeq);
}
void
-PTE::unserialize(CheckpointIn &cp)
+TlbEntry::unserialize(CheckpointIn &cp)
{
- UNSERIALIZE_SCALAR(Mask);
- UNSERIALIZE_SCALAR(VPN);
+ UNSERIALIZE_SCALAR(paddr);
+ UNSERIALIZE_SCALAR(vaddr);
+ UNSERIALIZE_SCALAR(logBytes);
UNSERIALIZE_SCALAR(asid);
- UNSERIALIZE_SCALAR(G);
- UNSERIALIZE_SCALAR(PFN0);
- UNSERIALIZE_SCALAR(D0);
- UNSERIALIZE_SCALAR(V0);
- UNSERIALIZE_SCALAR(C0);
- UNSERIALIZE_SCALAR(PFN1);
- UNSERIALIZE_SCALAR(D1);
- UNSERIALIZE_SCALAR(V1);
- UNSERIALIZE_SCALAR(C1);
- UNSERIALIZE_SCALAR(AddrShiftAmount);
- UNSERIALIZE_SCALAR(OffsetMask);
+ UNSERIALIZE_SCALAR(pte);
+ UNSERIALIZE_SCALAR(lruSeq);
}
}
/*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* Copyright (c) 2007 MIPS Technologies, Inc.
+ * Copyright (c) 2020 Barkhausen Institut
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#define __ARCH_RISCV_PAGETABLE_H__
#include "base/logging.hh"
+#include "base/trie.hh"
#include "base/types.hh"
#include "sim/serialize.hh"
namespace RiscvISA {
-struct VAddr
-{
-};
+BitUnion64(SATP)
+ Bitfield<63, 60> mode;
+ Bitfield<59, 44> asid;
+ Bitfield<43, 0> ppn;
+EndBitUnion(SATP)
-// ITB/DTB page table entry
-struct PTE
+enum AddrXlateMode
{
- Addr Mask;
- Addr VPN;
- uint8_t asid;
-
- bool G;
-
- /* Contents of Entry Lo0 */
- Addr PFN0; // Physical Frame Number - Even
- bool D0; // Even entry Dirty Bit
- bool V0; // Even entry Valid Bit
- uint8_t C0; // Cache Coherency Bits - Even
-
- /* Contents of Entry Lo1 */
- Addr PFN1; // Physical Frame Number - Odd
- bool D1; // Odd entry Dirty Bit
- bool V1; // Odd entry Valid Bit
- uint8_t C1; // Cache Coherency Bits (3 bits)
-
- /*
- * The next few variables are put in as optimizations to reduce
- * TLB lookup overheads. For a given Mask, what is the address shift
- * amount, and what is the OffsetMask
- */
- int AddrShiftAmount;
- int OffsetMask;
-
- bool Valid() { return (V0 | V1); };
- void serialize(CheckpointOut &cp) const;
- void unserialize(CheckpointIn &cp);
+ BARE = 0,
+ SV39 = 8,
+ SV48 = 9,
};
-// WARN: This particular TLB entry is not necessarily conformed to RISC-V ISA
-struct TlbEntry
+// Sv39 paging
+const Addr VADDR_BITS = 39;
+const Addr LEVEL_BITS = 9;
+const Addr LEVEL_MASK = (1 << LEVEL_BITS) - 1;
+
+BitUnion64(PTESv39)
+ Bitfield<53, 10> ppn;
+ Bitfield<53, 28> ppn2;
+ Bitfield<27, 19> ppn1;
+ Bitfield<18, 10> ppn0;
+ Bitfield<7> d;
+ Bitfield<6> a;
+ Bitfield<5> g;
+ Bitfield<4> u;
+ Bitfield<3, 1> perm;
+ Bitfield<3> x;
+ Bitfield<2> w;
+ Bitfield<1> r;
+ Bitfield<0> v;
+EndBitUnion(PTESv39)
+
+struct TlbEntry;
+typedef Trie<Addr, TlbEntry> TlbEntryTrie;
+
+struct TlbEntry : public Serializable
{
- Addr _pageStart;
- TlbEntry() {}
- TlbEntry(Addr asn, Addr vaddr, Addr paddr,
- bool uncacheable, bool read_only)
- : _pageStart(paddr)
- {
- if (uncacheable || read_only)
- warn("RISC-V TlbEntry does not support uncacheable"
- " or read-only mappings\n");
- }
+ // The base of the physical page.
+ Addr paddr;
- Addr pageStart()
- {
- return _pageStart;
- }
+ // The beginning of the virtual page this entry maps.
+ Addr vaddr;
+ // The size of the page this represents, in address bits.
+ unsigned logBytes;
- void
- updateVaddr(Addr new_vaddr) {}
+ uint16_t asid;
- void serialize(CheckpointOut &cp) const
- {
- SERIALIZE_SCALAR(_pageStart);
- }
+ PTESv39 pte;
+
+ TlbEntryTrie::Handle trieHandle;
+
+ // A sequence number to keep track of LRU.
+ uint64_t lruSeq;
+
+ TlbEntry()
+ : paddr(0), vaddr(0), logBytes(0), pte(), lruSeq(0)
+ {}
- void unserialize(CheckpointIn &cp)
+ // Return the page size in bytes
+ Addr size() const
{
- UNSERIALIZE_SCALAR(_pageStart);
+ return (static_cast<Addr>(1) << logBytes);
}
+ void serialize(CheckpointOut &cp) const override;
+ void unserialize(CheckpointIn &cp) override;
};
};
--- /dev/null
+/*
+ * Copyright (c) 2012 ARM Limited
+ * Copyright (c) 2020 Barkhausen Institut
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2007 The Hewlett-Packard Development Company
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "arch/riscv/pagetable_walker.hh"
+
+#include <memory>
+
+#include "arch/riscv/faults.hh"
+#include "arch/riscv/pagetable.hh"
+#include "arch/riscv/tlb.hh"
+#include "base/bitfield.hh"
+#include "base/trie.hh"
+#include "cpu/base.hh"
+#include "cpu/thread_context.hh"
+#include "debug/PageTableWalker.hh"
+#include "mem/packet_access.hh"
+#include "mem/request.hh"
+
+namespace RiscvISA {
+
+Fault
+Walker::start(ThreadContext * _tc, BaseTLB::Translation *_translation,
+ const RequestPtr &_req, BaseTLB::Mode _mode)
+{
+ // TODO: in timing mode, instead of blocking when there are other
+ // outstanding requests, see if this request can be coalesced with
+ // another one (i.e. either coalesce or start walk)
+ WalkerState * newState = new WalkerState(this, _translation, _req);
+ newState->initState(_tc, _mode, sys->isTimingMode());
+ if (currStates.size()) {
+ assert(newState->isTiming());
+ DPRINTF(PageTableWalker, "Walks in progress: %d\n", currStates.size());
+ currStates.push_back(newState);
+ return NoFault;
+ } else {
+ currStates.push_back(newState);
+ Fault fault = newState->startWalk();
+ if (!newState->isTiming()) {
+ currStates.pop_front();
+ delete newState;
+ }
+ return fault;
+ }
+}
+
+Fault
+Walker::startFunctional(ThreadContext * _tc, Addr &addr, unsigned &logBytes,
+ BaseTLB::Mode _mode)
+{
+ funcState.initState(_tc, _mode);
+ return funcState.startFunctional(addr, logBytes);
+}
+
+bool
+Walker::WalkerPort::recvTimingResp(PacketPtr pkt)
+{
+ return walker->recvTimingResp(pkt);
+}
+
+bool
+Walker::recvTimingResp(PacketPtr pkt)
+{
+ WalkerSenderState * senderState =
+ dynamic_cast<WalkerSenderState *>(pkt->popSenderState());
+ WalkerState * senderWalk = senderState->senderWalk;
+ bool walkComplete = senderWalk->recvPacket(pkt);
+ delete senderState;
+ if (walkComplete) {
+ std::list<WalkerState *>::iterator iter;
+ for (iter = currStates.begin(); iter != currStates.end(); iter++) {
+ WalkerState * walkerState = *(iter);
+ if (walkerState == senderWalk) {
+ iter = currStates.erase(iter);
+ break;
+ }
+ }
+ delete senderWalk;
+ // Since we block requests when another is outstanding, we
+ // need to check if there is a waiting request to be serviced
+ if (currStates.size() && !startWalkWrapperEvent.scheduled())
+ // delay sending any new requests until we are finished
+ // with the responses
+ schedule(startWalkWrapperEvent, clockEdge());
+ }
+ return true;
+}
+
+void
+Walker::WalkerPort::recvReqRetry()
+{
+ walker->recvReqRetry();
+}
+
+void
+Walker::recvReqRetry()
+{
+ std::list<WalkerState *>::iterator iter;
+ for (iter = currStates.begin(); iter != currStates.end(); iter++) {
+ WalkerState * walkerState = *(iter);
+ if (walkerState->isRetrying()) {
+ walkerState->retry();
+ }
+ }
+}
+
+bool Walker::sendTiming(WalkerState* sendingState, PacketPtr pkt)
+{
+ WalkerSenderState* walker_state = new WalkerSenderState(sendingState);
+ pkt->pushSenderState(walker_state);
+ if (port.sendTimingReq(pkt)) {
+ return true;
+ } else {
+ // undo the adding of the sender state and delete it, as we
+ // will do it again the next time we attempt to send it
+ pkt->popSenderState();
+ delete walker_state;
+ return false;
+ }
+
+}
+
+Port &
+Walker::getPort(const std::string &if_name, PortID idx)
+{
+ if (if_name == "port")
+ return port;
+ else
+ return ClockedObject::getPort(if_name, idx);
+}
+
+void
+Walker::WalkerState::initState(ThreadContext * _tc,
+ BaseTLB::Mode _mode, bool _isTiming)
+{
+ assert(state == Ready);
+ started = false;
+ tc = _tc;
+ mode = _mode;
+ timing = _isTiming;
+}
+
+void
+Walker::startWalkWrapper()
+{
+ unsigned num_squashed = 0;
+ WalkerState *currState = currStates.front();
+ while ((num_squashed < numSquashable) && currState &&
+ currState->translation->squashed()) {
+ currStates.pop_front();
+ num_squashed++;
+
+ DPRINTF(PageTableWalker, "Squashing table walk for address %#x\n",
+ currState->req->getVaddr());
+
+ // finish the translation which will delete the translation object
+ currState->translation->finish(
+ std::make_shared<UnimpFault>("Squashed Inst"),
+ currState->req, currState->tc, currState->mode);
+
+ // delete the current request if there are no inflight packets.
+ // if there is something in flight, delete when the packets are
+ // received and inflight is zero.
+ if (currState->numInflight() == 0) {
+ delete currState;
+ } else {
+ currState->squash();
+ }
+
+ // check the next translation request, if it exists
+ if (currStates.size())
+ currState = currStates.front();
+ else
+ currState = NULL;
+ }
+ if (currState && !currState->wasStarted())
+ currState->startWalk();
+}
+
+Fault
+Walker::WalkerState::startWalk()
+{
+ Fault fault = NoFault;
+ assert(!started);
+ started = true;
+ setupWalk(req->getVaddr());
+ if (timing) {
+ nextState = state;
+ state = Waiting;
+ timingFault = NoFault;
+ sendPackets();
+ } else {
+ do {
+ walker->port.sendAtomic(read);
+ PacketPtr write = NULL;
+ fault = stepWalk(write);
+ assert(fault == NoFault || read == NULL);
+ state = nextState;
+ nextState = Ready;
+ if (write)
+ walker->port.sendAtomic(write);
+ } while (read);
+ state = Ready;
+ nextState = Waiting;
+ }
+ return fault;
+}
+
+Fault
+Walker::WalkerState::startFunctional(Addr &addr, unsigned &logBytes)
+{
+ Fault fault = NoFault;
+ assert(!started);
+ started = true;
+ setupWalk(addr);
+
+ do {
+ walker->port.sendFunctional(read);
+ // On a functional access (page table lookup), writes should
+ // not happen so this pointer is ignored after stepWalk
+ PacketPtr write = NULL;
+ fault = stepWalk(write);
+ assert(fault == NoFault || read == NULL);
+ state = nextState;
+ nextState = Ready;
+ } while (read);
+ logBytes = entry.logBytes;
+ addr = entry.paddr << PageShift;
+
+ return fault;
+}
+
+Fault
+Walker::WalkerState::stepWalk(PacketPtr &write)
+{
+ assert(state != Ready && state != Waiting);
+ Fault fault = NoFault;
+ write = NULL;
+ PTESv39 pte = read->getLE<uint64_t>();
+ Addr nextRead = 0;
+ bool doWrite = false;
+ bool doTLBInsert = false;
+ bool doEndWalk = false;
+
+ DPRINTF(PageTableWalker, "Got level%d PTE: %#x\n", level, pte);
+
+ // step 2: TODO check PMA and PMP
+
+ // step 3:
+ if (!pte.v || (!pte.r && pte.w)) {
+ doEndWalk = true;
+ DPRINTF(PageTableWalker, "PTE invalid, raising PF\n");
+ fault = pageFault(pte.v);
+ }
+ else {
+ // step 4:
+ if (pte.r || pte.x) {
+ // step 5: leaf PTE
+ doEndWalk = true;
+ fault = walker->tlb->checkPermissions(tc, entry.vaddr, mode, pte);
+
+ // step 6
+ if (fault == NoFault) {
+ if (level >= 1 && pte.ppn0 != 0) {
+ DPRINTF(PageTableWalker,
+ "PTE has misaligned PPN, raising PF\n");
+ fault = pageFault(true);
+ }
+ else if (level == 2 && pte.ppn1 != 0) {
+ DPRINTF(PageTableWalker,
+ "PTE has misaligned PPN, raising PF\n");
+ fault = pageFault(true);
+ }
+ }
+
+ if (fault == NoFault) {
+ // step 7
+ if (!pte.a) {
+ pte.a = 1;
+ doWrite = true;
+ }
+ if (!pte.d && mode == TLB::Write) {
+ pte.d = 1;
+ doWrite = true;
+ }
+ // TODO check if this violates a PMA or PMP
+
+ // step 8
+ entry.logBytes = PageShift + (level * LEVEL_BITS);
+ entry.paddr = pte.ppn;
+ entry.vaddr &= ~((1 << entry.logBytes) - 1);
+ entry.pte = pte;
+ // put it non-writable into the TLB to detect writes and redo
+ // the page table walk in order to update the dirty flag.
+ if (!pte.d && mode != TLB::Write)
+ entry.pte.w = 0;
+ doTLBInsert = true;
+ }
+ }
+ else {
+ level--;
+ if (level < 0) {
+ DPRINTF(PageTableWalker, "No leaf PTE found, raising PF\n");
+ doEndWalk = true;
+ fault = pageFault(true);
+ }
+ else {
+ Addr shift = (PageShift + LEVEL_BITS * level);
+ Addr idx = (entry.vaddr >> shift) & LEVEL_MASK;
+ nextRead = (pte.ppn << PageShift) + (idx * sizeof(pte));
+ nextState = Translate;
+ }
+ }
+ }
+
+ PacketPtr oldRead = read;
+ Request::Flags flags = oldRead->req->getFlags();
+
+ if (doEndWalk) {
+ // If we need to write, adjust the read packet to write the modified
+ // value back to memory.
+ if (!functional && doWrite) {
+ DPRINTF(PageTableWalker, "Writing level%d PTE to %#x: %#x\n",
+ level, oldRead->getAddr(), pte);
+ write = oldRead;
+ write->setLE<uint64_t>(pte);
+ write->cmd = MemCmd::WriteReq;
+ read = NULL;
+ } else {
+ write = NULL;
+ }
+
+ if (doTLBInsert) {
+ if (!functional)
+ walker->tlb->insert(entry.vaddr, entry);
+ else {
+ Addr offset = entry.vaddr & mask(entry.logBytes);
+ Addr paddr = entry.paddr << PageShift | offset;
+ DPRINTF(PageTableWalker, "Translated %#x -> %#x\n",
+ entry.vaddr, paddr);
+ }
+ }
+ endWalk();
+ }
+ else {
+ //If we didn't return, we're setting up another read.
+ RequestPtr request = std::make_shared<Request>(
+ nextRead, oldRead->getSize(), flags, walker->masterId);
+ read = new Packet(request, MemCmd::ReadReq);
+ read->allocate();
+
+ DPRINTF(PageTableWalker,
+ "Loading level%d PTE from %#x\n", level, nextRead);
+ }
+
+ return fault;
+}
+
+void
+Walker::WalkerState::endWalk()
+{
+ nextState = Ready;
+ delete read;
+ read = NULL;
+}
+
+void
+Walker::WalkerState::setupWalk(Addr vaddr)
+{
+ vaddr &= ((static_cast<Addr>(1) << VADDR_BITS) - 1);
+
+ SATP satp = tc->readMiscReg(MISCREG_SATP);
+ assert(satp.mode == AddrXlateMode::SV39);
+
+ Addr shift = PageShift + LEVEL_BITS * 2;
+ Addr idx = (vaddr >> shift) & LEVEL_MASK;
+ Addr topAddr = (satp.ppn << PageShift) + (idx * sizeof(PTESv39));
+ level = 2;
+
+ DPRINTF(PageTableWalker, "Performing table walk for address %#x\n", vaddr);
+ DPRINTF(PageTableWalker, "Loading level%d PTE from %#x\n", level, topAddr);
+
+ state = Translate;
+ nextState = Ready;
+ entry.vaddr = vaddr;
+ entry.asid = satp.asid;
+
+ Request::Flags flags = Request::PHYSICAL;
+ RequestPtr request = std::make_shared<Request>(
+ topAddr, sizeof(PTESv39), flags, walker->masterId);
+
+ read = new Packet(request, MemCmd::ReadReq);
+ read->allocate();
+}
+
+bool
+Walker::WalkerState::recvPacket(PacketPtr pkt)
+{
+ assert(pkt->isResponse());
+ assert(inflight);
+ assert(state == Waiting);
+ inflight--;
+ if (squashed) {
+ // if were were squashed, return true once inflight is zero and
+ // this WalkerState will be freed there.
+ return (inflight == 0);
+ }
+ if (pkt->isRead()) {
+ // should not have a pending read it we also had one outstanding
+ assert(!read);
+
+ // @todo someone should pay for this
+ pkt->headerDelay = pkt->payloadDelay = 0;
+
+ state = nextState;
+ nextState = Ready;
+ PacketPtr write = NULL;
+ read = pkt;
+ timingFault = stepWalk(write);
+ state = Waiting;
+ assert(timingFault == NoFault || read == NULL);
+ if (write) {
+ writes.push_back(write);
+ }
+ sendPackets();
+ } else {
+ sendPackets();
+ }
+ if (inflight == 0 && read == NULL && writes.size() == 0) {
+ state = Ready;
+ nextState = Waiting;
+ if (timingFault == NoFault) {
+ /*
+ * Finish the translation. Now that we know the right entry is
+ * in the TLB, this should work with no memory accesses.
+ * There could be new faults unrelated to the table walk like
+ * permissions violations, so we'll need the return value as
+ * well.
+ */
+ bool delayedResponse;
+ Fault fault = walker->tlb->doTranslate(req, tc, NULL, mode,
+ delayedResponse);
+ assert(!delayedResponse);
+ // Let the CPU continue.
+ translation->finish(fault, req, tc, mode);
+ } else {
+ // There was a fault during the walk. Let the CPU know.
+ translation->finish(timingFault, req, tc, mode);
+ }
+ return true;
+ }
+
+ return false;
+}
+
+void
+Walker::WalkerState::sendPackets()
+{
+ //If we're already waiting for the port to become available, just return.
+ if (retrying)
+ return;
+
+ //Reads always have priority
+ if (read) {
+ PacketPtr pkt = read;
+ read = NULL;
+ inflight++;
+ if (!walker->sendTiming(this, pkt)) {
+ retrying = true;
+ read = pkt;
+ inflight--;
+ return;
+ }
+ }
+ //Send off as many of the writes as we can.
+ while (writes.size()) {
+ PacketPtr write = writes.back();
+ writes.pop_back();
+ inflight++;
+ if (!walker->sendTiming(this, write)) {
+ retrying = true;
+ writes.push_back(write);
+ inflight--;
+ return;
+ }
+ }
+}
+
+unsigned
+Walker::WalkerState::numInflight() const
+{
+ return inflight;
+}
+
+bool
+Walker::WalkerState::isRetrying()
+{
+ return retrying;
+}
+
+bool
+Walker::WalkerState::isTiming()
+{
+ return timing;
+}
+
+bool
+Walker::WalkerState::wasStarted()
+{
+ return started;
+}
+
+void
+Walker::WalkerState::squash()
+{
+ squashed = true;
+}
+
+void
+Walker::WalkerState::retry()
+{
+ retrying = false;
+ sendPackets();
+}
+
+Fault
+Walker::WalkerState::pageFault(bool present)
+{
+ DPRINTF(PageTableWalker, "Raising page fault.\n");
+ return walker->tlb->createPagefault(entry.vaddr, mode);
+}
+
+} /* end namespace RiscvISA */
+
+RiscvISA::Walker *
+RiscvPagetableWalkerParams::create()
+{
+ return new RiscvISA::Walker(this);
+}
--- /dev/null
+/*
+ * Copyright (c) 2007 The Hewlett-Packard Development Company
+ * Copyright (c) 2020 Barkhausen Institut
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __ARCH_RISCV_TABLE_WALKER_HH__
+#define __ARCH_RISCV_TABLE_WALKER_HH__
+
+#include <vector>
+
+#include "arch/riscv/pagetable.hh"
+#include "arch/riscv/tlb.hh"
+#include "base/types.hh"
+#include "mem/packet.hh"
+#include "params/RiscvPagetableWalker.hh"
+#include "sim/clocked_object.hh"
+#include "sim/faults.hh"
+#include "sim/system.hh"
+
+class ThreadContext;
+
+namespace RiscvISA
+{
+ class Walker : public ClockedObject
+ {
+ protected:
+ // Port for accessing memory
+ class WalkerPort : public MasterPort
+ {
+ public:
+ WalkerPort(const std::string &_name, Walker * _walker) :
+ MasterPort(_name, _walker), walker(_walker)
+ {}
+
+ protected:
+ Walker *walker;
+
+ bool recvTimingResp(PacketPtr pkt);
+ void recvReqRetry();
+ };
+
+ friend class WalkerPort;
+ WalkerPort port;
+
+ // State to track each walk of the page table
+ class WalkerState
+ {
+ friend class Walker;
+ private:
+ enum State {
+ Ready,
+ Waiting,
+ Translate,
+ };
+
+ protected:
+ Walker *walker;
+ ThreadContext *tc;
+ RequestPtr req;
+ State state;
+ State nextState;
+ int level;
+ unsigned inflight;
+ TlbEntry entry;
+ PacketPtr read;
+ std::vector<PacketPtr> writes;
+ Fault timingFault;
+ TLB::Translation * translation;
+ BaseTLB::Mode mode;
+ bool functional;
+ bool timing;
+ bool retrying;
+ bool started;
+ bool squashed;
+ public:
+ WalkerState(Walker * _walker, BaseTLB::Translation *_translation,
+ const RequestPtr &_req, bool _isFunctional = false) :
+ walker(_walker), req(_req), state(Ready),
+ nextState(Ready), level(0), inflight(0),
+ translation(_translation),
+ functional(_isFunctional), timing(false),
+ retrying(false), started(false), squashed(false)
+ {
+ }
+ void initState(ThreadContext * _tc, BaseTLB::Mode _mode,
+ bool _isTiming = false);
+ Fault startWalk();
+ Fault startFunctional(Addr &addr, unsigned &logBytes);
+ bool recvPacket(PacketPtr pkt);
+ unsigned numInflight() const;
+ bool isRetrying();
+ bool wasStarted();
+ bool isTiming();
+ void retry();
+ void squash();
+ std::string name() const {return walker->name();}
+
+ private:
+ void setupWalk(Addr vaddr);
+ Fault stepWalk(PacketPtr &write);
+ void sendPackets();
+ void endWalk();
+ Fault pageFault(bool present);
+ };
+
+ friend class WalkerState;
+ // State for timing and atomic accesses (need multiple per walker in
+ // the case of multiple outstanding requests in timing mode)
+ std::list<WalkerState *> currStates;
+ // State for functional accesses (only need one of these per walker)
+ WalkerState funcState;
+
+ struct WalkerSenderState : public Packet::SenderState
+ {
+ WalkerState * senderWalk;
+ WalkerSenderState(WalkerState * _senderWalk) :
+ senderWalk(_senderWalk) {}
+ };
+
+ public:
+ // Kick off the state machine.
+ Fault start(ThreadContext * _tc, BaseTLB::Translation *translation,
+ const RequestPtr &req, BaseTLB::Mode mode);
+ Fault startFunctional(ThreadContext * _tc, Addr &addr,
+ unsigned &logBytes, BaseTLB::Mode mode);
+ Port &getPort(const std::string &if_name,
+ PortID idx=InvalidPortID) override;
+
+ protected:
+ // The TLB we're supposed to load.
+ TLB * tlb;
+ System * sys;
+ MasterID masterId;
+
+ // The number of outstanding walks that can be squashed per cycle.
+ unsigned numSquashable;
+
+ // Wrapper for checking for squashes before starting a translation.
+ void startWalkWrapper();
+
+ /**
+ * Event used to call startWalkWrapper.
+ **/
+ EventFunctionWrapper startWalkWrapperEvent;
+
+ // Functions for dealing with packets.
+ bool recvTimingResp(PacketPtr pkt);
+ void recvReqRetry();
+ bool sendTiming(WalkerState * sendingState, PacketPtr pkt);
+
+ public:
+
+ void setTLB(TLB * _tlb)
+ {
+ tlb = _tlb;
+ }
+
+ typedef RiscvPagetableWalkerParams Params;
+
+ const Params *
+ params() const
+ {
+ return static_cast<const Params *>(_params);
+ }
+
+ Walker(const Params *params) :
+ ClockedObject(params), port(name() + ".port", this),
+ funcState(this, NULL, NULL, true), tlb(NULL), sys(params->system),
+ masterId(sys->getMasterId(this)),
+ numSquashable(params->num_squash_per_cycle),
+ startWalkWrapperEvent([this]{ startWalkWrapper(); }, name())
+ {
+ }
+ };
+}
+
+#endif // __ARCH_RISCV_PAGE_TABLE_WALKER_HH__
/*
* Copyright (c) 2001-2005 The Regents of The University of Michigan
* Copyright (c) 2007 MIPS Technologies, Inc.
+ * Copyright (c) 2020 Barkhausen Institut
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#include "arch/riscv/faults.hh"
#include "arch/riscv/fs_workload.hh"
#include "arch/riscv/pagetable.hh"
+#include "arch/riscv/pagetable_walker.hh"
#include "arch/riscv/pra_constants.hh"
#include "arch/riscv/utility.hh"
#include "base/inifile.hh"
#include "base/str.hh"
#include "base/trace.hh"
#include "cpu/thread_context.hh"
-#include "debug/RiscvTLB.hh"
#include "debug/TLB.hh"
+#include "debug/TLBVerbose.hh"
#include "mem/page_table.hh"
#include "params/RiscvTLB.hh"
#include "sim/full_system.hh"
// RISC-V TLB
//
-TLB::TLB(const Params *p)
- : BaseTLB(p), size(p->size), nlu(0)
+static Addr
+buildKey(Addr vpn, uint16_t asid)
{
- table = new PTE[size];
- memset(table, 0, sizeof(PTE[size]));
- smallPages = 0;
+ return (static_cast<Addr>(asid) << 48) | vpn;
}
-TLB::~TLB()
+TLB::TLB(const Params *p)
+ : BaseTLB(p), size(p->size), tlb(size), lruSeq(0)
{
- if (table)
- delete [] table;
+ for (size_t x = 0; x < size; x++) {
+ tlb[x].trieHandle = NULL;
+ freeList.push_back(&tlb[x]);
+ }
+
+ walker = p->walker;
+ walker->setTLB(this);
}
-// look up an entry in the TLB
-RiscvISA::PTE *
-TLB::lookup(Addr vpn, uint8_t asn) const
+Walker *
+TLB::getWalker()
{
- // assume not found...
- PTE *retval = nullptr;
- PageTable::const_iterator i = lookupTable.find(vpn);
- if (i != lookupTable.end()) {
- while (i->first == vpn) {
- int index = i->second;
- PTE *pte = &table[index];
-
- /* 1KB TLB Lookup code - from MIPS ARM Volume III - Rev. 2.50 */
- Addr Mask = pte->Mask;
- Addr InvMask = ~Mask;
- Addr VPN = pte->VPN;
- if (((vpn & InvMask) == (VPN & InvMask)) &&
- (pte->G || (asn == pte->asid))) {
- // We have a VPN + ASID Match
- retval = pte;
- break;
- }
- ++i;
- }
- }
-
- DPRINTF(TLB, "lookup %#x, asn %#x -> %s ppn %#x\n", vpn, (int)asn,
- retval ? "hit" : "miss", retval ? retval->PFN1 : 0);
- return retval;
+ return walker;
}
-RiscvISA::PTE*
-TLB::getEntry(unsigned Index) const
+void
+TLB::evictLRU()
{
- // Make sure that Index is valid
- assert(Index<size);
- return &table[Index];
+ // Find the entry with the lowest (and hence least recently updated)
+ // sequence number.
+
+ size_t lru = 0;
+ for (size_t i = 1; i < size; i++) {
+ if (tlb[i].lruSeq < tlb[lru].lruSeq)
+ lru = i;
+ }
+
+ remove(lru);
}
-int
-TLB::probeEntry(Addr vpn, uint8_t asn) const
+TlbEntry *
+TLB::lookup(Addr vpn, uint16_t asid, Mode mode, bool hidden)
{
- // assume not found...
- int Ind = -1;
- PageTable::const_iterator i = lookupTable.find(vpn);
- if (i != lookupTable.end()) {
- while (i->first == vpn) {
- int index = i->second;
- PTE *pte = &table[index];
-
- /* 1KB TLB Lookup code - from MIPS ARM Volume III - Rev. 2.50 */
- Addr Mask = pte->Mask;
- Addr InvMask = ~Mask;
- Addr VPN = pte->VPN;
- if (((vpn & InvMask) == (VPN & InvMask)) &&
- (pte->G || (asn == pte->asid))) {
- // We have a VPN + ASID Match
- Ind = index;
- break;
- }
- ++i;
+ TlbEntry *entry = trie.lookup(buildKey(vpn, asid));
+
+ if (!hidden) {
+ if (entry)
+ entry->lruSeq = nextSeq();
+
+ if (mode == Write)
+ write_accesses++;
+ else
+ read_accesses++;
+
+ if (!entry) {
+ if (mode == Write)
+ write_misses++;
+ else
+ read_misses++;
}
+ else {
+ if (mode == Write)
+ write_hits++;
+ else
+ read_hits++;
+ }
+
+ DPRINTF(TLBVerbose, "lookup(vpn=%#x, asid=%#x): %s ppn %#x\n",
+ vpn, asid, entry ? "hit" : "miss", entry ? entry->paddr : 0);
}
- DPRINTF(RiscvTLB,"VPN: %x, asid: %d, Result of TLBP: %d\n",vpn,asn,Ind);
- return Ind;
+
+ return entry;
}
-inline Fault
-TLB::checkCacheability(const RequestPtr &req)
+TlbEntry *
+TLB::insert(Addr vpn, const TlbEntry &entry)
{
- Addr VAddrUncacheable = 0xA0000000;
- // In MIPS, cacheability is controlled by certain bits of the virtual
- // address or by the TLB entry
- if ((req->getVaddr() & VAddrUncacheable) == VAddrUncacheable) {
- // mark request as uncacheable
- req->setFlags(Request::UNCACHEABLE | Request::STRICT_ORDER);
+ DPRINTF(TLB, "insert(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
+ vpn, entry.asid, entry.paddr, entry.pte, entry.size());
+
+ // If somebody beat us to it, just use that existing entry.
+ TlbEntry *newEntry = lookup(vpn, entry.asid, Mode::Read, true);
+ if (newEntry) {
+ // update PTE flags (maybe we set the dirty/writable flag)
+ newEntry->pte = entry.pte;
+ assert(newEntry->vaddr == vpn);
+ return newEntry;
}
- return NoFault;
+
+ if (freeList.empty())
+ evictLRU();
+
+ newEntry = freeList.front();
+ freeList.pop_front();
+
+ Addr key = buildKey(vpn, entry.asid);
+ *newEntry = entry;
+ newEntry->lruSeq = nextSeq();
+ newEntry->vaddr = vpn;
+ newEntry->trieHandle =
+ trie.insert(key, TlbEntryTrie::MaxBits - entry.logBytes, newEntry);
+ return newEntry;
}
void
-TLB::insertAt(PTE &pte, unsigned Index, int _smallPages)
+TLB::demapPage(Addr vpn, uint64_t asid)
{
- smallPages = _smallPages;
- if (Index > size) {
- warn("Attempted to write at index (%d) beyond TLB size (%d)",
- Index, size);
- } else {
- // Update TLB
- DPRINTF(TLB, "TLB[%d]: %x %x %x %x\n",
- Index, pte.Mask << 11,
- ((pte.VPN << 11) | pte.asid),
- ((pte.PFN0 << 6) | (pte.C0 << 3) |
- (pte.D0 << 2) | (pte.V0 <<1) | pte.G),
- ((pte.PFN1 <<6) | (pte.C1 << 3) |
- (pte.D1 << 2) | (pte.V1 <<1) | pte.G));
- if (table[Index].V0 || table[Index].V1) {
- // Previous entry is valid
- PageTable::iterator i = lookupTable.find(table[Index].VPN);
- lookupTable.erase(i);
+ asid &= 0xFFFF;
+
+ if (vpn == 0 && asid == 0)
+ flushAll();
+ else {
+ DPRINTF(TLB, "flush(vpn=%#x, asid=%#x)\n", vpn, asid);
+ if (vpn != 0 && asid != 0) {
+ TlbEntry *newEntry = lookup(vpn, asid, Mode::Read, true);
+ if (newEntry)
+ remove(newEntry - tlb.data());
+ }
+ else {
+ for (size_t i = 0; i < size; i++) {
+ if (tlb[i].trieHandle) {
+ Addr mask = ~(tlb[i].size() - 1);
+ if ((vpn == 0 || (vpn & mask) == tlb[i].vaddr) &&
+ (asid == 0 || tlb[i].asid == asid))
+ remove(i);
+ }
+ }
}
- table[Index]=pte;
- // Update fast lookup table
- lookupTable.insert(make_pair(table[Index].VPN, Index));
}
}
-// insert a new TLB entry
void
-TLB::insert(Addr addr, PTE &pte)
+TLB::flushAll()
{
- fatal("TLB Insert not yet implemented\n");
+ DPRINTF(TLB, "flushAll()\n");
+ for (size_t i = 0; i < size; i++) {
+ if (tlb[i].trieHandle)
+ remove(i);
+ }
}
void
-TLB::flushAll()
+TLB::remove(size_t idx)
{
- DPRINTF(TLB, "flushAll\n");
- memset(table, 0, sizeof(PTE[size]));
- lookupTable.clear();
- nlu = 0;
+ DPRINTF(TLB, "remove(vpn=%#x, asid=%#x): ppn=%#x pte=%#x size=%#x\n",
+ tlb[idx].vaddr, tlb[idx].asid, tlb[idx].paddr, tlb[idx].pte,
+ tlb[idx].size());
+
+ assert(tlb[idx].trieHandle);
+ trie.remove(tlb[idx].trieHandle);
+ tlb[idx].trieHandle = NULL;
+ freeList.push_back(&tlb[idx]);
}
-void
-TLB::serialize(CheckpointOut &cp) const
+Fault
+TLB::checkPermissions(ThreadContext *tc, Addr vaddr, Mode mode, PTESv39 pte)
{
- SERIALIZE_SCALAR(size);
- SERIALIZE_SCALAR(nlu);
+ Fault fault = NoFault;
- for (int i = 0; i < size; i++) {
- ScopedCheckpointSection sec(cp, csprintf("PTE%d", i));
- table[i].serialize(cp);
+ if (mode == TLB::Read && !pte.r) {
+ DPRINTF(TLB, "PTE has no read perm, raising PF\n");
+ fault = createPagefault(vaddr, mode);
+ }
+ else if (mode == TLB::Write && !pte.w) {
+ DPRINTF(TLB, "PTE has no write perm, raising PF\n");
+ fault = createPagefault(vaddr, mode);
+ }
+ else if (mode == TLB::Execute && !pte.x) {
+ DPRINTF(TLB, "PTE has no exec perm, raising PF\n");
+ fault = createPagefault(vaddr, mode);
}
-}
-void
-TLB::unserialize(CheckpointIn &cp)
-{
- UNSERIALIZE_SCALAR(size);
- UNSERIALIZE_SCALAR(nlu);
-
- for (int i = 0; i < size; i++) {
- ScopedCheckpointSection sec(cp, csprintf("PTE%d", i));
- table[i].unserialize(cp);
- if (table[i].V0 || table[i].V1) {
- lookupTable.insert(make_pair(table[i].VPN, i));
+ if (fault == NoFault) {
+ // check pte.u
+ STATUS status = tc->readMiscReg(MISCREG_STATUS);
+ PrivilegeMode pmode = getMemPriv(tc, mode);
+ if (pmode == PrivilegeMode::PRV_U && !pte.u) {
+ DPRINTF(TLB, "PTE is not user accessible, raising PF\n");
+ fault = createPagefault(vaddr, mode);
+ }
+ else if (pmode == PrivilegeMode::PRV_S && pte.u && status.sum == 0) {
+ DPRINTF(TLB, "PTE is only user accessible, raising PF\n");
+ fault = createPagefault(vaddr, mode);
}
}
+
+ return fault;
}
-void
-TLB::regStats()
+Fault
+TLB::createPagefault(Addr vaddr, Mode mode)
{
- BaseTLB::regStats();
-
- read_hits
- .name(name() + ".read_hits")
- .desc("DTB read hits")
- ;
-
- read_misses
- .name(name() + ".read_misses")
- .desc("DTB read misses")
- ;
-
-
- read_accesses
- .name(name() + ".read_accesses")
- .desc("DTB read accesses")
- ;
-
- write_hits
- .name(name() + ".write_hits")
- .desc("DTB write hits")
- ;
-
- write_misses
- .name(name() + ".write_misses")
- .desc("DTB write misses")
- ;
-
-
- write_accesses
- .name(name() + ".write_accesses")
- .desc("DTB write accesses")
- ;
-
- hits
- .name(name() + ".hits")
- .desc("DTB hits")
- ;
-
- misses
- .name(name() + ".misses")
- .desc("DTB misses")
- ;
-
- accesses
- .name(name() + ".accesses")
- .desc("DTB accesses")
- ;
-
- hits = read_hits + write_hits;
- misses = read_misses + write_misses;
- accesses = read_accesses + write_accesses;
+ ExceptionCode code;
+ if (mode == TLB::Read)
+ code = ExceptionCode::LOAD_PAGE;
+ else if (mode == TLB::Write)
+ code = ExceptionCode::STORE_PAGE;
+ else
+ code = ExceptionCode::INST_PAGE;
+ return std::make_shared<AddressFault>(vaddr, code);
}
Fault
-TLB::translateInst(const RequestPtr &req, ThreadContext *tc)
+TLB::doTranslate(const RequestPtr &req, ThreadContext *tc,
+ Translation *translation, Mode mode, bool &delayed)
{
- if (FullSystem) {
- /**
- * check if we simulate a bare metal system
- * if so, we have no tlb, phys addr == virt addr
- */
- auto *workload = dynamic_cast<FsWorkload *>(
- tc->getSystemPtr()->workload);
- if (workload->isBareMetal())
- req->setFlags(Request::PHYSICAL);
+ delayed = false;
- if (req->getFlags() & Request::PHYSICAL) {
- /**
- * we simply set the virtual address to physical address
- */
- req->setPaddr(req->getVaddr());
- return checkCacheability(req);
- } else {
- /**
- * as we currently support bare metal only, we throw a panic,
- * if it is not a bare metal system
- */
- panic("translateInst not implemented in RISC-V.\n");
+ Addr vaddr = req->getVaddr() & ((static_cast<Addr>(1) << VADDR_BITS) - 1);
+ SATP satp = tc->readMiscReg(MISCREG_SATP);
+
+ TlbEntry *e = lookup(vaddr, satp.asid, mode, false);
+ if (!e) {
+ Fault fault = walker->start(tc, translation, req, mode);
+ if (translation != nullptr || fault != NoFault) {
+ // This gets ignored in atomic mode.
+ delayed = true;
+ return fault;
}
- } else {
- Process * p = tc->getProcessPtr();
+ e = lookup(vaddr, satp.asid, mode, false);
+ assert(e != nullptr);
+ }
- Fault fault = p->pTable->translate(req);
+ Fault fault = checkPermissions(tc, vaddr, mode, e->pte);
+ if (fault != NoFault) {
+ // if we want to write and it isn't writable, do a page table walk
+ // again to update the dirty flag.
+ if (mode == TLB::Write && !e->pte.w) {
+ DPRINTF(TLB, "Dirty bit not set, repeating PT walk\n");
+ fault = walker->start(tc, translation, req, mode);
+ if (translation != nullptr || fault != NoFault) {
+ delayed = true;
+ return fault;
+ }
+ }
if (fault != NoFault)
return fault;
-
- return NoFault;
}
+
+ Addr paddr = e->paddr << PageShift | (vaddr & mask(e->logBytes));
+ DPRINTF(TLBVerbose, "translate(vpn=%#x, asid=%#x): %#x\n",
+ vaddr, satp.asid, paddr);
+ req->setPaddr(paddr);
+
+ return NoFault;
+}
+
+PrivilegeMode
+TLB::getMemPriv(ThreadContext *tc, Mode mode)
+{
+ STATUS status = (STATUS)tc->readMiscReg(MISCREG_STATUS);
+ PrivilegeMode pmode = (PrivilegeMode)tc->readMiscReg(MISCREG_PRV);
+ if (mode != Mode::Execute && status.mprv == 1)
+ pmode = (PrivilegeMode)(RegVal)status.mpp;
+ return pmode;
}
Fault
-TLB::translateData(const RequestPtr &req, ThreadContext *tc, bool write)
+TLB::translate(const RequestPtr &req, ThreadContext *tc,
+ Translation *translation, Mode mode, bool &delayed)
{
+ delayed = false;
+
if (FullSystem) {
- /**
- * check if we simulate a bare metal system
- * if so, we have no tlb, phys addr == virt addr
- */
- auto *workload = dynamic_cast<FsWorkload *>(
- tc->getSystemPtr()->workload);
- if (workload->isBareMetal())
+ PrivilegeMode pmode = getMemPriv(tc, mode);
+ SATP satp = tc->readMiscReg(MISCREG_SATP);
+ if (pmode == PrivilegeMode::PRV_M || satp.mode == AddrXlateMode::BARE)
req->setFlags(Request::PHYSICAL);
+ Fault fault;
if (req->getFlags() & Request::PHYSICAL) {
/**
* we simply set the virtual address to physical address
*/
req->setPaddr(req->getVaddr());
- return checkCacheability(req);
+ fault = NoFault;
} else {
- /**
- * as we currently support bare metal only, we throw a panic,
- * if it is not a bare metal system
- */
- panic("translateData not implemented in RISC-V.\n");
+ fault = doTranslate(req, tc, translation, mode, delayed);
}
+
+ // according to the RISC-V tests, negative physical addresses trigger
+ // an illegal address exception.
+ // TODO where is that written in the manual?
+ if (!delayed && fault == NoFault && bits(req->getPaddr(), 63)) {
+ ExceptionCode code;
+ if (mode == TLB::Read)
+ code = ExceptionCode::LOAD_ACCESS;
+ else if (mode == TLB::Write)
+ code = ExceptionCode::STORE_ACCESS;
+ else
+ code = ExceptionCode::INST_ACCESS;
+ fault = make_shared<AddressFault>(req->getVaddr(), code);
+ }
+
+ return fault;
} else {
// In the O3 CPU model, sometimes a memory access will be speculatively
// executed along a branch that will end up not being taken where the
Fault
TLB::translateAtomic(const RequestPtr &req, ThreadContext *tc, Mode mode)
{
- if (mode == Execute)
- return translateInst(req, tc);
- else
- return translateData(req, tc, mode == Write);
+ bool delayed;
+ return translate(req, tc, nullptr, mode, delayed);
}
void
TLB::translateTiming(const RequestPtr &req, ThreadContext *tc,
- Translation *translation, Mode mode)
+ Translation *translation, Mode mode)
{
+ bool delayed;
assert(translation);
- translation->finish(translateAtomic(req, tc, mode), req, tc, mode);
+ Fault fault = translate(req, tc, translation, mode, delayed);
+ if (!delayed)
+ translation->finish(fault, req, tc, mode);
+ else
+ translation->markDelayed();
}
Fault
TLB::translateFunctional(const RequestPtr &req, ThreadContext *tc, Mode mode)
{
- panic_if(FullSystem,
- "translateFunctional not implemented for full system.");
-
const Addr vaddr = req->getVaddr();
- Process *process = tc->getProcessPtr();
- const auto *pte = process->pTable->lookup(vaddr);
-
- if (!pte && mode != Execute) {
- // Check if we just need to grow the stack.
- if (process->fixupFault(vaddr)) {
- // If we did, lookup the entry for the new page.
- pte = process->pTable->lookup(vaddr);
+ Addr paddr = vaddr;
+
+ if (FullSystem) {
+ TLB *tlb = dynamic_cast<TLB *>(tc->getDTBPtr());
+
+ PrivilegeMode pmode = tlb->getMemPriv(tc, mode);
+ SATP satp = tc->readMiscReg(MISCREG_SATP);
+ if (pmode != PrivilegeMode::PRV_M &&
+ satp.mode != AddrXlateMode::BARE) {
+ Walker *walker = tlb->getWalker();
+ unsigned logBytes;
+ Fault fault = walker->startFunctional(
+ tc, paddr, logBytes, mode);
+ if (fault != NoFault)
+ return fault;
+
+ Addr masked_addr = vaddr & mask(logBytes);
+ paddr |= masked_addr;
}
}
+ else {
+ Process *process = tc->getProcessPtr();
+ const auto *pte = process->pTable->lookup(vaddr);
+
+ if (!pte && mode != Execute) {
+ // Check if we just need to grow the stack.
+ if (process->fixupFault(vaddr)) {
+ // If we did, lookup the entry for the new page.
+ pte = process->pTable->lookup(vaddr);
+ }
+ }
- if (!pte)
- return std::make_shared<GenericPageTableFault>(req->getVaddr());
+ if (!pte)
+ return std::make_shared<GenericPageTableFault>(req->getVaddr());
- Addr paddr = pte->paddr | process->pTable->pageOffset(vaddr);
+ paddr = pte->paddr | process->pTable->pageOffset(vaddr);
+ }
DPRINTF(TLB, "Translated (functional) %#x -> %#x.\n", vaddr, paddr);
req->setPaddr(paddr);
return NoFault;
}
+void
+TLB::serialize(CheckpointOut &cp) const
+{
+ // Only store the entries in use.
+ uint32_t _size = size - freeList.size();
+ SERIALIZE_SCALAR(_size);
+ SERIALIZE_SCALAR(lruSeq);
+
+ uint32_t _count = 0;
+ for (uint32_t x = 0; x < size; x++) {
+ if (tlb[x].trieHandle != NULL)
+ tlb[x].serializeSection(cp, csprintf("Entry%d", _count++));
+ }
+}
-RiscvISA::PTE &
-TLB::index(bool advance)
+void
+TLB::unserialize(CheckpointIn &cp)
+{
+ // Do not allow to restore with a smaller tlb.
+ uint32_t _size;
+ UNSERIALIZE_SCALAR(_size);
+ if (_size > size) {
+ fatal("TLB size less than the one in checkpoint!");
+ }
+
+ UNSERIALIZE_SCALAR(lruSeq);
+
+ for (uint32_t x = 0; x < _size; x++) {
+ TlbEntry *newEntry = freeList.front();
+ freeList.pop_front();
+
+ newEntry->unserializeSection(cp, csprintf("Entry%d", x));
+ Addr key = buildKey(newEntry->vaddr, newEntry->asid);
+ newEntry->trieHandle = trie.insert(key,
+ TlbEntryTrie::MaxBits - newEntry->logBytes, newEntry);
+ }
+}
+
+void
+TLB::regStats()
{
- PTE *pte = &table[nlu];
+ BaseTLB::regStats();
+
+ read_hits
+ .name(name() + ".read_hits")
+ .desc("DTB read hits")
+ ;
+
+ read_misses
+ .name(name() + ".read_misses")
+ .desc("DTB read misses")
+ ;
+
+
+ read_accesses
+ .name(name() + ".read_accesses")
+ .desc("DTB read accesses")
+ ;
+
+ write_hits
+ .name(name() + ".write_hits")
+ .desc("DTB write hits")
+ ;
+
+ write_misses
+ .name(name() + ".write_misses")
+ .desc("DTB write misses")
+ ;
- if (advance)
- nextnlu();
- return *pte;
+ write_accesses
+ .name(name() + ".write_accesses")
+ .desc("DTB write accesses")
+ ;
+
+ hits
+ .name(name() + ".hits")
+ .desc("DTB hits")
+ ;
+
+ misses
+ .name(name() + ".misses")
+ .desc("DTB misses")
+ ;
+
+ accesses
+ .name(name() + ".accesses")
+ .desc("DTB accesses")
+ ;
+
+ hits = read_hits + write_hits;
+ misses = read_misses + write_misses;
+ accesses = read_accesses + write_accesses;
}
RiscvISA::TLB *
/*
* Copyright (c) 2001-2005 The Regents of The University of Michigan
* Copyright (c) 2007 MIPS Technologies, Inc.
+ * Copyright (c) 2020 Barkhausen Institut
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#ifndef __ARCH_RISCV_TLB_HH__
#define __ARCH_RISCV_TLB_HH__
-#include <map>
+#include <list>
#include "arch/generic/tlb.hh"
+#include "arch/riscv/isa.hh"
#include "arch/riscv/isa_traits.hh"
#include "arch/riscv/pagetable.hh"
#include "arch/riscv/utility.hh"
simply create an ITLB and DTLB that will point to the real TLB */
namespace RiscvISA {
+class Walker;
+
class TLB : public BaseTLB
{
- protected:
- typedef std::multimap<Addr, int> PageTable;
- PageTable lookupTable; // Quick lookup into page table
+ typedef std::list<TlbEntry *> EntryList;
- RiscvISA::PTE *table; // the Page Table
- int size; // TLB Size
- int nlu; // not last used entry (for replacement)
+ protected:
+ size_t size;
+ std::vector<TlbEntry> tlb; // our TLB
+ TlbEntryTrie trie; // for quick access
+ EntryList freeList; // free entries
+ uint64_t lruSeq;
- void nextnlu() { if (++nlu >= size) nlu = 0; }
- RiscvISA::PTE *lookup(Addr vpn, uint8_t asn) const;
+ Walker *walker;
mutable Stats::Scalar read_hits;
mutable Stats::Scalar read_misses;
typedef RiscvTLBParams Params;
TLB(const Params *p);
- int probeEntry(Addr vpn,uint8_t) const;
- RiscvISA::PTE *getEntry(unsigned) const;
- virtual ~TLB();
+ Walker *getWalker();
void takeOverFrom(BaseTLB *otlb) override {}
- int smallPages;
- int getsize() const { return size; }
-
- RiscvISA::PTE &index(bool advance = true);
- void insert(Addr vaddr, RiscvISA::PTE &pte);
- void insertAt(RiscvISA::PTE &pte, unsigned Index, int _smallPages);
+ TlbEntry *insert(Addr vpn, const TlbEntry &entry);
void flushAll() override;
- void demapPage(Addr vaddr, uint64_t asn) override
- {
- panic("demapPage unimplemented.\n");
- }
+ void demapPage(Addr vaddr, uint64_t asn) override;
- // static helper functions... really
- static bool validVirtualAddress(Addr vaddr);
+ Fault checkPermissions(ThreadContext *tc, Addr vaddr,
+ Mode mode, PTESv39 pte);
+ Fault createPagefault(Addr vaddr, Mode mode);
- static Fault checkCacheability(const RequestPtr &req);
+ PrivilegeMode getMemPriv(ThreadContext *tc, Mode mode);
// Checkpointing
void serialize(CheckpointOut &cp) const override;
void regStats() override;
- Fault translateAtomic(
- const RequestPtr &req, ThreadContext *tc, Mode mode) override;
- void translateTiming(
- const RequestPtr &req, ThreadContext *tc,
- Translation *translation, Mode mode) override;
- Fault translateFunctional(
- const RequestPtr &req, ThreadContext *tc, Mode mode) override;
- Fault finalizePhysical(
- const RequestPtr &req,
- ThreadContext *tc, Mode mode) const override;
+ Fault doTranslate(const RequestPtr &req, ThreadContext *tc,
+ Translation *translation, Mode mode, bool &delayed);
+
+ Fault translateAtomic(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) override;
+ void translateTiming(const RequestPtr &req, ThreadContext *tc,
+ Translation *translation, Mode mode) override;
+ Fault translateFunctional(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) override;
+ Fault finalizePhysical(const RequestPtr &req,
+ ThreadContext *tc, Mode mode) const override;
private:
- Fault translateInst(const RequestPtr &req, ThreadContext *tc);
- Fault translateData(const RequestPtr &req, ThreadContext *tc, bool write);
-};
+ uint64_t nextSeq() { return ++lruSeq; }
-}
+ TlbEntry *lookup(Addr vpn, uint16_t asid, Mode mode, bool hidden);
+ void evictLRU();
+ void remove(size_t idx);
+ Fault translate(const RequestPtr &req, ThreadContext *tc,
+ Translation *translation, Mode mode, bool &delayed);
+};
+
+}
#endif // __RISCV_MEMORY_HH__