miscRegName[misc_reg]);
auto req = std::make_shared<Request>(
- 0, val, 0, flags, Request::funcMasterId,
+ val, 0, flags, Request::funcMasterId,
tc->pcState().pc(), tc->contextId());
fault = getDTBPtr(tc)->translateFunctional(
warn("Translating via %s in functional mode! Fix Me!\n",
miscRegName[misc_reg]);
- req->setVirt(0, val, 0, flags, Request::funcMasterId,
- tc->pcState().pc());
+ req->setVirt(val, 0, flags, Request::funcMasterId,
+ tc->pcState().pc());
req->setContext(tc->contextId());
fault = getDTBPtr(tc)->translateFunctional(req, tc, mode,
tranType);
fault(NoFault), complete(false), selfDelete(false)
{
req = std::make_shared<Request>();
- req->setVirt(0, s1Te.pAddr(s1Req->getVaddr()), s1Req->getSize(),
+ req->setVirt(s1Te.pAddr(s1Req->getVaddr()), s1Req->getSize(),
s1Req->getFlags(), s1Req->masterId(), 0);
}
// translate to physical address using the second stage MMU
auto req = std::make_shared<Request>();
- req->setVirt(0, descAddr, numBytes, flags | Request::PT_WALK, masterId, 0);
+ req->setVirt(descAddr, numBytes, flags | Request::PT_WALK, masterId, 0);
if (isFunctional) {
fault = stage2Tlb()->translateFunctional(req, tc, BaseTLB::Read);
} else {
void setVirt(Addr vaddr, int size, Request::Flags flags, int masterId)
{
numBytes = size;
- req->setVirt(0, vaddr, size, flags, masterId, 0);
+ req->setVirt(vaddr, size, flags, masterId, 0);
}
void translateTiming(ThreadContext *tc)
ArmFault::TranMethod tranMethod = long_desc_format ? ArmFault::LpaeTran
: ArmFault::VmsaTran;
- req->setAsid(asid);
-
- DPRINTF(TLBVerbose, "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
+ DPRINTF(TLBVerbose,
+ "CPSR is priv:%d UserMode:%d secure:%d S1S2NsTran:%d\n",
isPriv, flags & UserMode, isSecure, tranType & S1S2NsTran);
DPRINTF(TLB, "translateFs addr %#x, mode %d, st2 %d, scr %#x sctlr %#x "
const RequestPtr &req = memReq;
ArmISA::TLB* dtb = static_cast<TLB*>(thread->getDTBPtr());
- req->setVirt(0, addr, size, flags, thread->pcState().instAddr(),
+ req->setVirt(addr, size, flags, thread->pcState().instAddr(),
Request::funcMasterId);
// Translate to physical address
Fault fault;
// Set up a functional memory Request to pass to the TLB
// to get it to translate the vaddr to a paddr
- auto req = std::make_shared<Request>(0, addr, 64, 0x40, -1, 0, 0);
+ auto req = std::make_shared<Request>(addr, 64, 0x40, -1, 0, 0);
BaseTLB *tlb;
// Check the TLBs for a translation
*d = gpuDynInst->wavefront()->ldsChunk->
read<c0>(vaddr);
} else {
- RequestPtr req = std::make_shared<Request>(0,
+ RequestPtr req = std::make_shared<Request>(
vaddr, sizeof(c0), 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
gpuDynInst->statusBitVector = VectorMask(1);
gpuDynInst->useContinuation = false;
// create request
- RequestPtr req = std::make_shared<Request>(0, 0, 0, 0,
+ RequestPtr req = std::make_shared<Request>(0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::ACQUIRE);
gpuDynInst->execContinuation = &GPUStaticInst::execSt;
gpuDynInst->useContinuation = true;
// create request
- RequestPtr req = std::make_shared<Request>(0, 0, 0, 0,
+ RequestPtr req = std::make_shared<Request>(0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::RELEASE);
*d);
} else {
RequestPtr req = std::make_shared<Request>(
- 0, vaddr, sizeof(c0), 0,
+ vaddr, sizeof(c0), 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
gpuDynInst->useContinuation = true;
// create request
- RequestPtr req = std::make_shared<Request>(0, 0, 0, 0,
+ RequestPtr req = std::make_shared<Request>(0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::RELEASE);
}
} else {
RequestPtr req =
- std::make_shared<Request>(0, vaddr, sizeof(c0), 0,
+ std::make_shared<Request>(vaddr, sizeof(c0), 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId,
gpuDynInst->makeAtomicOpFunctor<c0>(e,
// the acquire completes
gpuDynInst->useContinuation = false;
// create request
- RequestPtr req = std::make_shared<Request>(0, 0, 0, 0,
+ RequestPtr req = std::make_shared<Request>(0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
0, gpuDynInst->wfDynId);
req->setFlags(Request::ACQUIRE);
if (secondAddr > addr)
size = secondAddr - addr;
- req->setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
+ req->setVirt(addr, size, 0x0, dataMasterId(), tc->instAddr());
// translate to physical address
Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read);
/** The memory request flags (from translation). */
unsigned memReqFlags;
- /** data address space ID, for loads & stores. */
- short asid;
-
/** The size of the request */
unsigned effSize;
instFlags[MemAccPredicate] = val;
}
- /** Sets the ASID. */
- void setASID(short addr_space_id) { asid = addr_space_id; }
- short getASID() { return asid; }
-
/** Sets the thread id. */
void setTid(ThreadID tid) { threadNumber = tid; }
// Eventually make this a parameter.
threadNumber = 0;
- // Also make this a parameter, or perhaps get it from xc or cpu.
- asid = 0;
-
// Initialize the fault to be NoFault.
fault = NoFault;
size_left));
auto it_end = byte_enable.cbegin() + (size - size_left);
if (isAnyActiveElement(it_start, it_end)) {
- mem_req = std::make_shared<Request>(0, frag_addr, frag_size,
+ mem_req = std::make_shared<Request>(frag_addr, frag_size,
flags, masterId, thread->pcState().instAddr(),
tc->contextId());
mem_req->setByteEnable(std::vector<bool>(it_start, it_end));
}
} else {
- mem_req = std::make_shared<Request>(0, frag_addr, frag_size,
+ mem_req = std::make_shared<Request>(frag_addr, frag_size,
flags, masterId, thread->pcState().instAddr(),
tc->contextId());
}
if (!curMacroStaticInst) {
// set up memory request for instruction fetch
auto mem_req = std::make_shared<Request>(
- unverifiedInst->threadNumber, fetch_PC,
- sizeof(MachInst), 0, masterId, fetch_PC,
+ fetch_PC, sizeof(MachInst), 0, masterId, fetch_PC,
thread->contextId());
- mem_req->setVirt(0, fetch_PC, sizeof(MachInst),
+ mem_req->setVirt(fetch_PC, sizeof(MachInst),
Request::INST_FETCH, masterId,
thread->instAddr());
request_id, aligned_pc, thread.pc, line_offset, request_size);
request->request->setContext(cpu.threads[tid]->getTC()->contextId());
- request->request->setVirt(0 /* asid */,
+ request->request->setVirt(
aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
/* I've no idea why we need the PC, but give it */
thread.pc.instAddr());
fragment->setContext(request->contextId());
if (byte_enable.empty()) {
- fragment->setVirt(0 /* asid */,
+ fragment->setVirt(
fragment_addr, fragment_size, request->getFlags(),
- request->masterId(),
- request->getPC());
+ request->masterId(), request->getPC());
} else {
// Set up byte-enable mask for the current fragment
auto it_start = byte_enable.begin() +
auto it_end = byte_enable.begin() +
(fragment_addr - base_addr) + fragment_size;
if (isAnyActiveElement(it_start, it_end)) {
- fragment->setVirt(0 /* asid */,
+ fragment->setVirt(
fragment_addr, fragment_size, request->getFlags(),
- request->masterId(),
- request->getPC());
+ request->masterId(), request->getPC());
fragment->setByteEnable(std::vector<bool>(it_start, it_end));
} else {
disabled_fragment = true;
int cid = cpu.threads[inst->id.threadId]->getTC()->contextId();
request->request->setContext(cid);
- request->request->setVirt(0 /* asid */,
+ request->request->setVirt(
addr, size, flags, cpu.dataMasterId(),
/* I've no idea why we need the PC, but give it */
inst->pc.instAddr(), std::move(amo_op));
// Set the appropriate read size and flags as well.
// Build request here.
RequestPtr mem_req = std::make_shared<Request>(
- tid, fetchBufferBlockPC, fetchBufferSize,
+ fetchBufferBlockPC, fetchBufferSize,
Request::INST_FETCH, cpu->instMasterId(), pc,
cpu->thread[tid]->contextId());
new DynInst(staticInst, curMacroop, thisPC, nextPC, seq, cpu);
instruction->setTid(tid);
- instruction->setASID(tid);
-
instruction->setThreadState(cpu->thread[tid]);
DPRINTF(Fetch, "[tid:%i] Instruction PC %#x (%d) created "
{
if (byte_enable.empty() ||
isAnyActiveElement(byte_enable.begin(), byte_enable.end())) {
- auto request = std::make_shared<Request>(_inst->getASID(),
+ auto request = std::make_shared<Request>(
addr, size, _flags, _inst->masterId(),
_inst->instAddr(), _inst->contextId(),
std::move(_amo_op));
* For a previously allocated Request objects.
*/
void
- setVirt(int asid, Addr vaddr, unsigned size, Request::Flags flags_,
+ setVirt(Addr vaddr, unsigned size, Request::Flags flags_,
MasterID mid, Addr pc)
{
- request()->setVirt(asid, vaddr, size, flags_, mid, pc);
+ request()->setVirt(vaddr, size, flags_, mid, pc);
}
void
Addr final_addr = addrBlockAlign(_addr + _size, cacheLineSize);
uint32_t size_so_far = 0;
- mainReq = std::make_shared<Request>(_inst->getASID(), base_addr,
+ mainReq = std::make_shared<Request>(base_addr,
_size, _flags, _inst->masterId(),
_inst->instAddr(), _inst->contextId());
if (!_byteEnable.empty()) {
// Assign fields for creating a request in case of a load/store
new_record->reqFlags = head_inst->memReqFlags;
new_record->virtAddr = head_inst->effAddr;
- new_record->asid = head_inst->asid;
new_record->physAddr = head_inst->physEffAddr;
// Currently the tracing does not support split requests.
new_record->size = head_inst->effSize;
dep_pkt.set_p_addr(temp_ptr->physAddr);
// If tracing of virtual addresses is enabled, set the optional
// field for it
- if (traceVirtAddr) {
+ if (traceVirtAddr)
dep_pkt.set_v_addr(temp_ptr->virtAddr);
- dep_pkt.set_asid(temp_ptr->asid);
- }
dep_pkt.set_size(temp_ptr->size);
}
dep_pkt.set_comp_delay(temp_ptr->compDelay);
Addr physAddr;
/* Request virtual address in case of a load/store instruction */
Addr virtAddr;
- /* Address space id in case of a load/store instruction */
- uint32_t asid;
/* Request size in case of a load/store instruction */
unsigned size;
/** Default Constructor */
auto it_start = byte_enable.begin() + (size - (frag_size + size_left));
auto it_end = byte_enable.begin() + (size - size_left);
if (isAnyActiveElement(it_start, it_end)) {
- req->setVirt(0, frag_addr, frag_size, flags, dataMasterId(),
+ req->setVirt(frag_addr, frag_size, flags, dataMasterId(),
inst_addr);
req->setByteEnable(std::vector<bool>(it_start, it_end));
} else {
predicate = false;
}
} else {
- req->setVirt(0, frag_addr, frag_size, flags, dataMasterId(),
+ req->setVirt(frag_addr, frag_size, flags, dataMasterId(),
inst_addr);
req->setByteEnable(std::vector<bool>());
}
dcache_latency = 0;
req->taskId(taskId());
- req->setVirt(0, addr, size, flags, dataMasterId(),
+ req->setVirt(addr, size, flags, dataMasterId(),
thread->pcState().instAddr(), std::move(amo_op));
// translate to physical address
// set up memory request for instruction fetch
DPRINTF(Fetch, "Fetch: Inst PC:%08p, Fetch PC:%08p\n", instAddr, fetchPC);
- req->setVirt(0, fetchPC, sizeof(MachInst), Request::INST_FETCH,
+ req->setVirt(fetchPC, sizeof(MachInst), Request::INST_FETCH,
instMasterId(), instAddr);
}
SimpleThread* thread = t_info.thread;
Fault fault;
- const int asid = 0;
const Addr pc = thread->instAddr();
unsigned block_size = cacheLineSize();
BaseTLB::Mode mode = BaseTLB::Read;
traceData->setMem(addr, size, flags);
RequestPtr req = std::make_shared<Request>(
- asid, addr, size, flags, dataMasterId(), pc,
- thread->contextId());
+ addr, size, flags, dataMasterId(), pc, thread->contextId());
if (!byte_enable.empty()) {
req->setByteEnable(byte_enable);
}
SimpleThread* thread = t_info.thread;
uint8_t *newData = new uint8_t[size];
- const int asid = 0;
const Addr pc = thread->instAddr();
unsigned block_size = cacheLineSize();
BaseTLB::Mode mode = BaseTLB::Write;
traceData->setMem(addr, size, flags);
RequestPtr req = std::make_shared<Request>(
- asid, addr, size, flags, dataMasterId(), pc,
- thread->contextId());
+ addr, size, flags, dataMasterId(), pc, thread->contextId());
if (!byte_enable.empty()) {
req->setByteEnable(byte_enable);
}
SimpleThread* thread = t_info.thread;
Fault fault;
- const int asid = 0;
const Addr pc = thread->instAddr();
unsigned block_size = cacheLineSize();
BaseTLB::Mode mode = BaseTLB::Write;
if (traceData)
traceData->setMem(addr, size, flags);
- RequestPtr req = make_shared<Request>(asid, addr, size, flags,
+ RequestPtr req = make_shared<Request>(addr, size, flags,
dataMasterId(), pc, thread->contextId(),
std::move(amo_op));
requestType = MemCmd::ReadReq;
flags.set(Request::INST_FETCH);
req = std::make_shared<Request>(
- 0, 0x0, access_size, flags, masterId, 0x0, 0);
+ 0x0, access_size, flags, masterId, 0x0, 0);
req->setPaddr(paddr);
} else { // if (injReqType == 2)
// generate packet for virtual network 2
req->setReqInstSeqNum(node_ptr->seqNum);
req->setPC(node_ptr->pc);
- // If virtual address is valid, set the asid and virtual address fields
+ // If virtual address is valid, set the virtual address field
// of the request.
if (node_ptr->virtAddr != 0) {
- req->setVirt(node_ptr->asid, node_ptr->virtAddr, node_ptr->size,
- node_ptr->flags, masterID, node_ptr->pc);
+ req->setVirt(node_ptr->virtAddr, node_ptr->size,
+ node_ptr->flags, masterID, node_ptr->pc);
req->setPaddr(node_ptr->physAddr);
req->setReqInstSeqNum(node_ptr->seqNum);
}
else
element->virtAddr = 0;
- if (pkt_msg.has_asid())
- element->asid = pkt_msg.asid();
- else
- element->asid = 0;
-
if (pkt_msg.has_size())
element->size = pkt_msg.size();
else
/** The virtual address for the request if any */
Addr virtAddr;
- /** The address space id which is set if the virtual address is set */
- uint32_t asid;
-
/** Size of request if any */
uint32_t size;
if (!req) {
req = std::make_shared<Request>(
- 0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId);
+ 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId);
}
req->setPaddr(0);
if (kernelLaunch) {
break;
RequestPtr prefetch_req = std::make_shared<Request>(
- 0, vaddr + stride * pf * TheISA::PageBytes,
+ vaddr + stride * pf * TheISA::PageBytes,
sizeof(uint8_t), 0,
computeUnit->masterId(),
0, 0, nullptr);
// set up virtual request
RequestPtr req = std::make_shared<Request>(
- 0, vaddr, size, Request::INST_FETCH,
+ vaddr, size, Request::INST_FETCH,
computeUnit->masterId(), 0, 0, nullptr);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
!gen.done(); gen.next()) {
RequestPtr req = std::make_shared<Request>(
- 0, gen.addr(), gen.size(), 0,
+ gen.addr(), gen.size(), 0,
cuList[0]->masterId(), 0, 0, nullptr);
doFunctionalAccess(req, cmd, data_buf, suppress_func_errors, cu_id);
QueuedPrefetcher::createPrefetchRequest(Addr addr, PrefetchInfo const &pfi,
PacketPtr pkt)
{
- RequestPtr translation_req = std::make_shared<Request>(pkt->req->getAsid(),
+ RequestPtr translation_req = std::make_shared<Request>(
addr, blkSize, pkt->req->getFlags(), masterId, pfi.getPC(),
pkt->req->contextId());
translation_req->setFlags(Request::PREFETCH);
VALID_SIZE = 0x00000001,
/** Whether or not paddr is valid (has been written yet). */
VALID_PADDR = 0x00000002,
- /** Whether or not the vaddr & asid are valid. */
+ /** Whether or not the vaddr is valid. */
VALID_VADDR = 0x00000004,
/** Whether or not the instruction sequence number is valid. */
VALID_INST_SEQ_NUM = 0x00000008,
*/
uint32_t _taskId = ContextSwitchTaskId::Unknown;
- union {
- struct {
- /**
- * The stream ID uniquely identifies a device behind the
- * SMMU/IOMMU Each transaction arriving at the SMMU/IOMMU is
- * associated with exactly one stream ID.
- */
- uint32_t _streamId;
-
- /**
- * The substream ID identifies an "execution context" within a
- * device behind an SMMU/IOMMU. It's intended to map 1-to-1 to
- * PCIe PASID (Process Address Space ID). The presence of a
- * substream ID is optional.
- */
- uint32_t _substreamId;
- };
-
- /** The address space ID. */
- uint64_t _asid = 0;
- };
+ /**
+ * The stream ID uniquely identifies a device behind the
+ * SMMU/IOMMU Each transaction arriving at the SMMU/IOMMU is
+ * associated with exactly one stream ID.
+ */
+ uint32_t _streamId = 0;
+
+ /**
+ * The substream ID identifies an "execution context" within a
+ * device behind an SMMU/IOMMU. It's intended to map 1-to-1 to
+ * PCIe PASID (Process Address Space ID). The presence of a
+ * substream ID is optional.
+ */
+ uint32_t _substreamId = 0;
/** The virtual address of the request. */
Addr _vaddr = 0;
privateFlags.set(VALID_PADDR|VALID_SIZE);
}
- Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
+ Request(Addr vaddr, unsigned size, Flags flags,
MasterID mid, Addr pc, ContextID cid,
AtomicOpFunctorPtr atomic_op=nullptr)
{
- setVirt(asid, vaddr, size, flags, mid, pc, std::move(atomic_op));
+ setVirt(vaddr, size, flags, mid, pc, std::move(atomic_op));
setContext(cid);
}
_memSpaceConfigFlags(other._memSpaceConfigFlags),
privateFlags(other.privateFlags),
_time(other._time),
- _taskId(other._taskId), _asid(other._asid), _vaddr(other._vaddr),
+ _taskId(other._taskId), _vaddr(other._vaddr),
_extraData(other._extraData), _contextId(other._contextId),
_pc(other._pc), _reqInstSeqNum(other._reqInstSeqNum),
_localAccessor(other._localAccessor),
* allocated Request object.
*/
void
- setVirt(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
- MasterID mid, Addr pc, AtomicOpFunctorPtr amo_op=nullptr)
+ setVirt(Addr vaddr, unsigned size, Flags flags, MasterID mid, Addr pc,
+ AtomicOpFunctorPtr amo_op=nullptr)
{
- _asid = asid;
_vaddr = vaddr;
_size = size;
_masterId = mid;
_taskId = id;
}
- /** Accessor function for asid.*/
- uint64_t
- getAsid() const
- {
- assert(privateFlags.isSet(VALID_VADDR));
- return _asid;
- }
-
- /** Accessor function for asid.*/
- void
- setAsid(uint64_t asid)
- {
- _asid = asid;
- }
-
/** Accessor function for architecture-specific flags.*/
ArchFlagsType
getArchFlags() const