accessDistance = p->accessDistance;
clock = p->clk_domain->clockPeriod();
- tlb.assign(size, GpuTlbEntry());
+ tlb.assign(size, TlbEntry());
freeList.resize(numSets);
entryList.resize(numSets);
}
}
- GpuTlbEntry*
- GpuTLB::insert(Addr vpn, GpuTlbEntry &entry)
+ TlbEntry*
+ GpuTLB::insert(Addr vpn, TlbEntry &entry)
{
- GpuTlbEntry *newEntry = nullptr;
+ TlbEntry *newEntry = nullptr;
/**
* vpn holds the virtual page address
return entry;
}
- GpuTlbEntry*
+ TlbEntry*
GpuTLB::lookup(Addr va, bool update_lru)
{
int set = (va >> TheISA::PageShift) & setMask;
for (int i = 0; i < numSets; ++i) {
while (!entryList[i].empty()) {
- GpuTlbEntry *entry = entryList[i].front();
+ TlbEntry *entry = entryList[i].front();
entryList[i].pop_front();
freeList[i].push_back(entry);
}
if (m5Reg.paging) {
DPRINTF(GPUTLB, "Paging enabled.\n");
//update LRU stack on a hit
- GpuTlbEntry *entry = lookup(vaddr, true);
+ TlbEntry *entry = lookup(vaddr, true);
if (entry)
tlb_hit = true;
if (m5Reg.paging) {
DPRINTF(GPUTLB, "Paging enabled.\n");
// The vaddr already has the segment base applied.
- GpuTlbEntry *entry = lookup(vaddr);
+ TlbEntry *entry = lookup(vaddr);
localNumTLBAccesses++;
if (!entry) {
DPRINTF(GPUTLB, "Mapping %#x to %#x\n",
alignedVaddr, pte->paddr);
- GpuTlbEntry gpuEntry(
- p->pTable->pid(), alignedVaddr,
- pte->paddr, true);
+ TlbEntry gpuEntry(p->pid(), alignedVaddr,
+ pte->paddr, false, false);
entry = insert(alignedVaddr, gpuEntry);
}
if (success) {
lookup_outcome = TLB_HIT;
// Put the entry in SenderState
- GpuTlbEntry *entry = lookup(tmp_req->getVaddr(), false);
+ TlbEntry *entry = lookup(tmp_req->getVaddr(), false);
assert(entry);
+ auto p = sender_state->tc->getProcessPtr();
sender_state->tlbEntry =
- new GpuTlbEntry(0, entry->vaddr, entry->paddr, entry->valid);
+ new TlbEntry(p->pid(), entry->vaddr, entry->paddr,
+ false, false);
if (update_stats) {
// the reqCnt has an entry per level, so its size tells us
*/
void
GpuTLB::pagingProtectionChecks(ThreadContext *tc, PacketPtr pkt,
- GpuTlbEntry * tlb_entry, Mode mode)
+ TlbEntry * tlb_entry, Mode mode)
{
HandyM5Reg m5Reg = tc->readMiscRegNoEffect(MISCREG_M5_REG);
uint32_t flags = pkt->req->getFlags();
ThreadContext *tc = sender_state->tc;
Mode mode = sender_state->tlbMode;
- GpuTlbEntry *local_entry, *new_entry;
+ TlbEntry *local_entry, *new_entry;
if (tlb_outcome == TLB_HIT) {
DPRINTF(GPUTLB, "Translation Done - TLB Hit for addr %#x\n", vaddr);
pte->paddr);
sender_state->tlbEntry =
- new GpuTlbEntry(0, virtPageAddr, pte->paddr, true);
+ new TlbEntry(p->pid(), virtPageAddr, pte->paddr, false,
+ false);
} else {
- sender_state->tlbEntry =
- new GpuTlbEntry(0, 0, 0, false);
+ sender_state->tlbEntry = nullptr;
}
handleTranslationReturn(virtPageAddr, TLB_MISS, pkt);
Mode mode = sender_state->tlbMode;
Addr vaddr = pkt->req->getVaddr();
- GpuTlbEntry *local_entry, *new_entry;
+ TlbEntry *local_entry, *new_entry;
if (tlb_outcome == TLB_HIT) {
DPRINTF(GPUTLB, "Functional Translation Done - TLB hit for addr "
"while paddr was %#x.\n", local_entry->vaddr,
local_entry->paddr);
- // Do paging checks if it's a normal functional access. If it's for a
- // prefetch, then sometimes you can try to prefetch something that won't
- // pass protection. We don't actually want to fault becuase there is no
- // demand access to deem this a violation. Just put it in the TLB and
- // it will fault if indeed a future demand access touches it in
- // violation.
- if (!sender_state->prefetch && sender_state->tlbEntry->valid)
+ /**
+ * Do paging checks if it's a normal functional access. If it's for a
+ * prefetch, then sometimes you can try to prefetch something that
+ * won't pass protection. We don't actually want to fault becuase there
+ * is no demand access to deem this a violation. Just put it in the
+ * TLB and it will fault if indeed a future demand access touches it in
+ * violation.
+ *
+ * This feature could be used to explore security issues around
+ * speculative memory accesses.
+ */
+ if (!sender_state->prefetch && sender_state->tlbEntry)
pagingProtectionChecks(tc, pkt, local_entry, mode);
int page_size = local_entry->size();
pte->paddr);
sender_state->tlbEntry =
- new GpuTlbEntry(0, virt_page_addr,
- pte->paddr, true);
+ new TlbEntry(p->pid(), virt_page_addr,
+ pte->paddr, false, false);
} else {
// If this was a prefetch, then do the normal thing if it
// was a successful translation. Otherwise, send an empty
pte->paddr);
sender_state->tlbEntry =
- new GpuTlbEntry(0, virt_page_addr,
- pte->paddr, true);
+ new TlbEntry(p->pid(), virt_page_addr,
+ pte->paddr, false, false);
} else {
DPRINTF(GPUPrefetch, "Prefetch failed %#x\n",
alignedVaddr);
- sender_state->tlbEntry = new GpuTlbEntry();
+ sender_state->tlbEntry = nullptr;
return;
}
DPRINTF(GPUPrefetch, "Functional Hit for vaddr %#x\n",
tlb->lookup(pkt->req->getVaddr()));
- GpuTlbEntry *entry = tlb->lookup(pkt->req->getVaddr(),
+ TlbEntry *entry = tlb->lookup(pkt->req->getVaddr(),
update_stats);
assert(entry);
+ auto p = sender_state->tc->getProcessPtr();
sender_state->tlbEntry =
- new GpuTlbEntry(0, entry->vaddr, entry->paddr, entry->valid);
+ new TlbEntry(p->pid(), entry->vaddr, entry->paddr,
+ false, false);
}
// This is the function that would populate pkt->req with the paddr of
// the translation. But if no translation happens (i.e Prefetch fails)
namespace X86ISA
{
- class GpuTlbEntry : public TlbEntry
- {
- public:
- GpuTlbEntry(Addr asn, Addr _vaddr, Addr _paddr, bool _valid)
- : TlbEntry(asn, _vaddr, _paddr, false, false), valid(_valid) { }
-
- GpuTlbEntry() : TlbEntry(), valid(false) { }
-
- bool valid;
- };
-
class GpuTLB : public MemObject
{
protected:
friend class Walker;
- typedef std::list<GpuTlbEntry*> EntryList;
+ typedef std::list<TlbEntry*> EntryList;
uint32_t configAddress;
};
void dumpAll();
- GpuTlbEntry *lookup(Addr va, bool update_lru=true);
+ TlbEntry *lookup(Addr va, bool update_lru=true);
void setConfigAddress(uint32_t addr);
protected:
*/
bool accessDistance;
- std::vector<GpuTlbEntry> tlb;
+ std::vector<TlbEntry> tlb;
/*
* It's a per-set list. As long as we have not reached
Tick doMmuRegRead(ThreadContext *tc, Packet *pkt);
Tick doMmuRegWrite(ThreadContext *tc, Packet *pkt);
- GpuTlbEntry *insert(Addr vpn, GpuTlbEntry &entry);
+ TlbEntry *insert(Addr vpn, TlbEntry &entry);
// Checkpointing
virtual void serialize(CheckpointOut& cp) const;
void handleFuncTranslationReturn(PacketPtr pkt, tlbOutcome outcome);
void pagingProtectionChecks(ThreadContext *tc, PacketPtr pkt,
- GpuTlbEntry *tlb_entry, Mode mode);
+ TlbEntry *tlb_entry, Mode mode);
- void updatePhysAddresses(Addr virt_page_addr, GpuTlbEntry *tlb_entry,
+ void updatePhysAddresses(Addr virt_page_addr, TlbEntry *tlb_entry,
Addr phys_page_addr);
void issueTLBLookup(PacketPtr pkt);
* previous TLBs. Equivalent to the data cache concept of
* "data return."
*/
- GpuTlbEntry *tlbEntry;
+ TlbEntry *tlbEntry;
// Is this a TLB prefetch request?
bool prefetch;
// When was the req for this translation issued