pkt->senderState = new DTLBPort::SenderState(gpuDynInst, index);
// This is the senderState needed by the TLB hierarchy to function
- TheISA::GpuTLB::TranslationState *translation_state =
- new TheISA::GpuTLB::TranslationState(TLB_mode, shader->gpuTc, false,
+ X86ISA::GpuTLB::TranslationState *translation_state =
+ new X86ISA::GpuTLB::TranslationState(TLB_mode, shader->gpuTc, false,
pkt->senderState);
pkt->senderState = translation_state;
delete pkt->senderState;
// Because it's atomic operation, only need TLB translation state
- pkt->senderState = new TheISA::GpuTLB::TranslationState(TLB_mode,
+ pkt->senderState = new X86ISA::GpuTLB::TranslationState(TLB_mode,
shader->gpuTc);
tlbPort[tlbPort_index].sendFunctional(pkt);
new_pkt->req->getPaddr());
// safe_cast the senderState
- TheISA::GpuTLB::TranslationState *sender_state =
- safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
+ X86ISA::GpuTLB::TranslationState *sender_state =
+ safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState);
delete sender_state->tlbEntry;
delete new_pkt;
new ComputeUnit::ScalarDTLBPort::SenderState(gpuDynInst);
pkt->senderState =
- new TheISA::GpuTLB::TranslationState(tlb_mode, shader->gpuTc, false,
+ new X86ISA::GpuTLB::TranslationState(tlb_mode, shader->gpuTc, false,
pkt->senderState);
if (scalarDTLBPort.isStalled()) {
computeUnit->tlbCycles += curTick();
// pop off the TLB translation state
- TheISA::GpuTLB::TranslationState *translation_state =
- safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
+ X86ISA::GpuTLB::TranslationState *translation_state =
+ safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState);
// no PageFaults are permitted for data accesses
if (!translation_state->tlbEntry) {
DPRINTF(GPUPrefetch, "CU[%d][%d][%d][%d]: %#x was last\n",
computeUnit->cu_id, simdId, wfSlotId, mp_index, last);
- int stride = last ? (roundDown(vaddr, TheISA::PageBytes) -
- roundDown(last, TheISA::PageBytes)) >> TheISA::PageShift
+ int stride = last ? (roundDown(vaddr, X86ISA::PageBytes) -
+ roundDown(last, X86ISA::PageBytes)) >> X86ISA::PageShift
: 0;
DPRINTF(GPUPrefetch, "Stride is %d\n", stride);
// Prefetch Next few pages atomically
for (int pf = 1; pf <= computeUnit->prefetchDepth; ++pf) {
DPRINTF(GPUPrefetch, "%d * %d: %#x\n", pf, stride,
- vaddr+stride*pf*TheISA::PageBytes);
+ vaddr + stride * pf * X86ISA::PageBytes);
if (!stride)
break;
RequestPtr prefetch_req = std::make_shared<Request>(
- vaddr + stride * pf * TheISA::PageBytes,
+ vaddr + stride * pf * X86ISA::PageBytes,
sizeof(uint8_t), 0,
computeUnit->requestorId(),
0, 0, nullptr);
// Because it's atomic operation, only need TLB translation state
prefetch_pkt->senderState =
- new TheISA::GpuTLB::TranslationState(TLB_mode,
+ new X86ISA::GpuTLB::TranslationState(TLB_mode,
computeUnit->shader->gpuTc, true);
// Currently prefetches are zero-latency, hence the sendFunctional
sendFunctional(prefetch_pkt);
/* safe_cast the senderState */
- TheISA::GpuTLB::TranslationState *tlb_state =
- safe_cast<TheISA::GpuTLB::TranslationState*>(
+ X86ISA::GpuTLB::TranslationState *tlb_state =
+ safe_cast<X86ISA::GpuTLB::TranslationState*>(
prefetch_pkt->senderState);
{
assert(pkt->senderState);
- TheISA::GpuTLB::TranslationState *translation_state =
- safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
+ X86ISA::GpuTLB::TranslationState *translation_state =
+ safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState);
// Page faults are not allowed
fatal_if(!translation_state->tlbEntry,
assert(pkt->senderState);
// pop off the TLB translation state
- TheISA::GpuTLB::TranslationState *translation_state
- = safe_cast<TheISA::GpuTLB::TranslationState*>(pkt->senderState);
+ X86ISA::GpuTLB::TranslationState *translation_state
+ = safe_cast<X86ISA::GpuTLB::TranslationState*>(pkt->senderState);
bool success = translation_state->tlbEntry != nullptr;
delete translation_state->tlbEntry;
void
ComputeUnit::updatePageDivergenceDist(Addr addr)
{
- Addr virt_page_addr = roundDown(addr, TheISA::PageBytes);
+ Addr virt_page_addr = roundDown(addr, X86ISA::PageBytes);
if (!pagesTouched.count(virt_page_addr))
pagesTouched[virt_page_addr] = 1;
* @warning: the set-associative version assumes you have a
* fixed page size of 4KB.
* If the page size is greather than 4KB (as defined in the
- * TheISA::PageBytes), then there are various issues w/ the current
+ * X86ISA::PageBytes), then there are various issues w/ the current
* implementation (you'd have the same 8KB page being replicated in
* different sets etc)
*/
assert(pkt->senderState);
Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
- TheISA::PageBytes);
+ X86ISA::PageBytes);
TranslationState *sender_state =
safe_cast<TranslationState*>(pkt->senderState);
local_entry = new_entry;
if (allocationPolicy) {
- Addr virt_page_addr = roundDown(vaddr, TheISA::PageBytes);
+ Addr virt_page_addr = roundDown(vaddr, X86ISA::PageBytes);
DPRINTF(GPUTLB, "allocating entry w/ addr %#x\n",
virt_page_addr);
bool update_stats = !sender_state->prefetch;
Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
- TheISA::PageBytes);
+ X86ISA::PageBytes);
if (update_stats)
tlb->updatePageFootprint(virt_page_addr);
GpuTLB::MemSidePort::recvTimingResp(PacketPtr pkt)
{
Addr virt_page_addr = roundDown(pkt->req->getVaddr(),
- TheISA::PageBytes);
+ X86ISA::PageBytes);
DPRINTF(GPUTLB, "MemSidePort recvTiming for virt_page_addr %#x\n",
virt_page_addr);
// Rule 1: Coalesce requests only if they
// fall within the same virtual page
Addr incoming_virt_page_addr = roundDown(incoming_pkt->req->getVaddr(),
- TheISA::PageBytes);
+ X86ISA::PageBytes);
Addr coalesced_virt_page_addr = roundDown(coalesced_pkt->req->getVaddr(),
- TheISA::PageBytes);
+ X86ISA::PageBytes);
if (incoming_virt_page_addr != coalesced_virt_page_addr)
return false;
void
TLBCoalescer::updatePhysAddresses(PacketPtr pkt)
{
- Addr virt_page_addr = roundDown(pkt->req->getVaddr(), TheISA::PageBytes);
+ Addr virt_page_addr = roundDown(pkt->req->getVaddr(), X86ISA::PageBytes);
DPRINTF(GPUTLB, "Update phys. addr. for %d coalesced reqs for page %#x\n",
issuedTranslationsTable[virt_page_addr].size(), virt_page_addr);
// print a warning message. This is a temporary caveat of
// the current simulator where atomic and timing requests can
// coexist. FIXME remove this check/warning in the future.
- Addr virt_page_addr = roundDown(pkt->req->getVaddr(), TheISA::PageBytes);
+ Addr virt_page_addr = roundDown(pkt->req->getVaddr(), X86ISA::PageBytes);
int map_count = coalescer->issuedTranslationsTable.count(virt_page_addr);
if (map_count) {
// compute virtual page address for this request
Addr virt_page_addr = roundDown(first_packet->req->getVaddr(),
- TheISA::PageBytes);
+ X86ISA::PageBytes);
// is there another outstanding request for the same page addr?
int pending_reqs =