}
auto req = new Request(ev->getAddr(), ev->getSize(), flags, 0);
- req->setThreadContext(ev->getGroupId(), 0);
+ req->setContext(ev->getGroupId());
auto pkt = new Packet(req, cmdO);
pkt->allocate();
// with unexpected atomic snoop requests.
warn("Translating via MISCREG(%d) in functional mode! Fix Me!\n", misc_reg);
Request req(0, val, 1, flags, Request::funcMasterId,
- tc->pcState().pc(), tc->contextId(),
- tc->threadId());
+ tc->pcState().pc(), tc->contextId());
fault = tc->getDTBPtr()->translateFunctional(&req, tc, mode, tranType);
TTBCR ttbcr = readMiscRegNoEffect(MISCREG_TTBCR);
HCR hcr = readMiscRegNoEffect(MISCREG_HCR);
warn("Translating via MISCREG(%d) in functional mode! Fix Me!\n", misc_reg);
req->setVirt(0, val, 1, flags, Request::funcMasterId,
tc->pcState().pc());
- req->setThreadContext(tc->contextId(), tc->threadId());
+ req->setContext(tc->contextId());
fault = tc->getDTBPtr()->translateFunctional(req, tc, mode,
tranType);
Fault fault;
// Set up a functional memory Request to pass to the TLB
// to get it to translate the vaddr to a paddr
- Request req(0, addr, 64, 0x40, -1, 0, 0, 0);
+ Request req(0, addr, 64, 0x40, -1, 0, 0);
ArmISA::TLB *tlb;
// Check the TLBs for a translation
} else {
Request *req = new Request(0, vaddr, sizeof(c0), 0,
gpuDynInst->computeUnit()->masterId(),
- 0, gpuDynInst->wfDynId, i);
+ 0, gpuDynInst->wfDynId);
gpuDynInst->setRequestFlags(req);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
// create request
Request *req = new Request(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
- 0, gpuDynInst->wfDynId, -1);
+ 0, gpuDynInst->wfDynId);
req->setFlags(Request::ACQUIRE);
gpuDynInst->computeUnit()->injectGlobalMemFence(gpuDynInst, false, req);
}
// create request
Request *req = new Request(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
- 0, gpuDynInst->wfDynId, -1);
+ 0, gpuDynInst->wfDynId);
req->setFlags(Request::RELEASE);
gpuDynInst->computeUnit()->injectGlobalMemFence(gpuDynInst, false, req);
Request *req =
new Request(0, vaddr, sizeof(c0), 0,
gpuDynInst->computeUnit()->masterId(),
- 0, gpuDynInst->wfDynId, i);
+ 0, gpuDynInst->wfDynId);
gpuDynInst->setRequestFlags(req);
PacketPtr pkt = new Packet(req, MemCmd::WriteReq);
// create request
Request *req = new Request(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
- 0, gpuDynInst->wfDynId, -1);
+ 0, gpuDynInst->wfDynId);
req->setFlags(Request::RELEASE);
gpuDynInst->computeUnit()->injectGlobalMemFence(gpuDynInst, false, req);
Request *req =
new Request(0, vaddr, sizeof(c0), 0,
gpuDynInst->computeUnit()->masterId(),
- 0, gpuDynInst->wfDynId, i,
+ 0, gpuDynInst->wfDynId,
gpuDynInst->makeAtomicOpFunctor<c0>(e,
f, this->opType));
// create request
Request *req = new Request(0, 0, 0, 0,
gpuDynInst->computeUnit()->masterId(),
- 0, gpuDynInst->wfDynId, -1);
+ 0, gpuDynInst->wfDynId);
req->setFlags(Request::ACQUIRE);
gpuDynInst->computeUnit()->injectGlobalMemFence(gpuDynInst, false, req);
}
{
xc->setMiscReg(MISCREG_LLADDR, req->getPaddr() & ~0xf);
xc->setMiscReg(MISCREG_LLFLAG, true);
- DPRINTF(LLSC, "[tid:%i]: Load-Link Flag Set & Load-Link"
+ DPRINTF(LLSC, "[cid:%i]: Load-Link Flag Set & Load-Link"
" Address set to %x.\n",
- req->threadId(), req->getPaddr() & ~0xf);
+ req->contextId(), req->getPaddr() & ~0xf);
}
template <class XC>
}
if (!lock_flag){
- DPRINTF(LLSC, "[tid:%i]: Lock Flag Set, "
+ DPRINTF(LLSC, "[cid:%i]: Lock Flag Set, "
"Store Conditional Failed.\n",
- req->threadId());
+ req->contextId());
} else if ((req->getPaddr() & ~0xf) != lock_addr) {
- DPRINTF(LLSC, "[tid:%i]: Load-Link Address Mismatch, "
+ DPRINTF(LLSC, "[cid:%i]: Load-Link Address Mismatch, "
"Store Conditional Failed.\n",
- req->threadId());
+ req->contextId());
}
// store conditional failed already, so don't issue it to mem
return false;
/// Get the number of thread contexts available
unsigned numContexts() { return threadContexts.size(); }
+ /// Convert ContextID to threadID
+ ThreadID contextToThread(ContextID cid)
+ { return static_cast<ThreadID>(cid - threadContexts[0]->contextId()); }
+
public:
typedef BaseCPUParams Params;
const Params *params() const
sreqHigh = savedSreqHigh;
} else {
req = new Request(asid, addr, size, flags, masterId(), this->pc.instAddr(),
- thread->contextId(), threadNumber);
+ thread->contextId());
req->taskId(cpu->taskId());
sreqHigh = savedSreqHigh;
} else {
req = new Request(asid, addr, size, flags, masterId(), this->pc.instAddr(),
- thread->contextId(), threadNumber);
+ thread->contextId());
req->taskId(cpu->taskId());
// Need to account for multiple accesses like the Atomic and TimingSimple
while (1) {
memReq = new Request(0, addr, size, flags, masterId,
- thread->pcState().instAddr(), tc->contextId(), 0);
+ thread->pcState().instAddr(), tc->contextId());
// translate to physical address
fault = dtb->translateFunctional(memReq, tc, BaseTLB::Read);
// Need to account for a multiple access like Atomic and Timing CPUs
while (1) {
memReq = new Request(0, addr, size, flags, masterId,
- thread->pcState().instAddr(), tc->contextId(), 0);
+ thread->pcState().instAddr(), tc->contextId());
// translate to physical address
fault = dtb->translateFunctional(memReq, tc, BaseTLB::Write);
sizeof(MachInst),
0,
masterId,
- fetch_PC, thread->contextId(),
- unverifiedInst->threadNumber);
+ fetch_PC, thread->contextId());
memReq->setVirt(0, fetch_PC, sizeof(MachInst),
Request::INST_FETCH, masterId, thread->instAddr());
syncThreadContext();
Request mmio_req(paddr, size, Request::UNCACHEABLE, dataMasterId());
- mmio_req.setThreadContext(tc->contextId(), 0);
+ mmio_req.setContext(tc->contextId());
// Some architectures do need to massage physical addresses a bit
// before they are inserted into the memory system. This enables
// APIC accesses on x86 and m5ops where supported through a MMIO
Request io_req(pAddr, kvm_run.io.size, Request::UNCACHEABLE,
dataMasterId());
- io_req.setThreadContext(tc->contextId(), 0);
+ io_req.setContext(tc->contextId());
const MemCmd cmd(isWrite ? MemCmd::WriteReq : MemCmd::ReadReq);
// Temporarily lock and migrate to the event queue of the
"%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
request_id, aligned_pc, pc, line_offset, request_size);
- request->request.setThreadContext(cpu.threads[0]->getTC()->contextId(),
- /* thread id */ 0);
+ request->request.setContext(cpu.threads[0]->getTC()->contextId());
request->request.setVirt(0 /* asid */,
aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
/* I've no idea why we need the PC, but give it */
Request *fragment = new Request();
- fragment->setThreadContext(request.contextId(), /* thread id */ 0);
+ fragment->setContext(request.contextId());
fragment->setVirt(0 /* asid */,
fragment_addr, fragment_size, request.getFlags(),
request.masterId(),
if (request->request.isMmappedIpr()) {
ThreadContext *thread =
- cpu.getContext(request->request.threadId());
+ cpu.getContext(cpu.contextToThread(
+ request->request.contextId()));
if (request->isLoad) {
DPRINTF(MinorMem, "IPR read inst: %s\n", *(request->inst));
inst->traceData->setMem(addr, size, flags);
int cid = cpu.threads[inst->id.threadId]->getTC()->contextId();
- request->request.setThreadContext(cid, /* thread id */ 0);
+ request->request.setContext(cid);
request->request.setVirt(0 /* asid */,
addr, size, flags, cpu.dataMasterId(),
/* I've no idea why we need the PC, but give it */
void
DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
{
- ThreadID tid = pkt->req->threadId();
+ ThreadID tid = cpu->contextToThread(pkt->req->contextId());
DPRINTF(Fetch, "[tid:%u] Waking up from cache miss.\n", tid);
assert(!cpu->switchedOut());
RequestPtr mem_req =
new Request(tid, fetchBufferBlockPC, fetchBufferSize,
Request::INST_FETCH, cpu->instMasterId(), pc,
- cpu->thread[tid]->contextId(), tid);
+ cpu->thread[tid]->contextId());
mem_req->taskId(cpu->taskId());
void
DefaultFetch<Impl>::finishTranslation(const Fault &fault, RequestPtr mem_req)
{
- ThreadID tid = mem_req->threadId();
+ ThreadID tid = cpu->contextToThread(mem_req->contextId());
Addr fetchBufferBlockPC = mem_req->getVaddr();
assert(!cpu->switchedOut());
LSQ<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
int load_idx)
{
- ThreadID tid = req->threadId();
+ ThreadID tid = cpu->contextToThread(req->contextId());
return thread[tid].read(req, sreqLow, sreqHigh, load_idx);
}
LSQ<Impl>::write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
uint8_t *data, int store_idx)
{
- ThreadID tid = req->threadId();
+ ThreadID tid = cpu->contextToThread(req->contextId());
return thread[tid].write(req, sreqLow, sreqHigh, data, store_idx);
}
DPRINTF(LSQ, "Got error packet back for address: %#X\n",
pkt->getAddr());
- thread[pkt->req->threadId()].completeDataAccess(pkt);
+ thread[cpu->contextToThread(pkt->req->contextId())]
+ .completeDataAccess(pkt);
if (pkt->isInvalidate()) {
// This response also contains an invalidate; e.g. this can be the case
BaseSimpleCPU::init();
int cid = threadContexts[0]->contextId();
- ifetch_req.setThreadContext(cid, 0);
- data_read_req.setThreadContext(cid, 0);
- data_write_req.setThreadContext(cid, 0);
+ ifetch_req.setContext(cid);
+ data_read_req.setContext(cid);
+ data_write_req.setContext(cid);
}
AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
if (numThreads > 1) {
ContextID cid = threadContexts[curThread]->contextId();
- ifetch_req.setThreadContext(cid, curThread);
- data_read_req.setThreadContext(cid, curThread);
- data_write_req.setThreadContext(cid, curThread);
+ ifetch_req.setContext(cid);
+ data_read_req.setContext(cid);
+ data_write_req.setContext(cid);
}
SimpleExecContext& t_info = *threadInfo[curThread];
Fault fault;
const int asid = 0;
- const ThreadID tid = curThread;
const Addr pc = thread->instAddr();
unsigned block_size = cacheLineSize();
BaseTLB::Mode mode = BaseTLB::Read;
if (traceData)
traceData->setMem(addr, size, flags);
- RequestPtr req = new Request(asid, addr, size,
- flags, dataMasterId(), pc,
- thread->contextId(), tid);
+ RequestPtr req = new Request(asid, addr, size, flags, dataMasterId(), pc,
+ thread->contextId());
req->taskId(taskId());
uint8_t *newData = new uint8_t[size];
const int asid = 0;
- const ThreadID tid = curThread;
const Addr pc = thread->instAddr();
unsigned block_size = cacheLineSize();
BaseTLB::Mode mode = BaseTLB::Write;
if (traceData)
traceData->setMem(addr, size, flags);
- RequestPtr req = new Request(asid, addr, size,
- flags, dataMasterId(), pc,
- thread->contextId(), tid);
+ RequestPtr req = new Request(asid, addr, size, flags, dataMasterId(), pc,
+ thread->contextId());
req->taskId(taskId());
_status = BaseSimpleCPU::Running;
Request *ifetch_req = new Request();
ifetch_req->taskId(taskId());
- ifetch_req->setThreadContext(thread->contextId(), curThread);
+ ifetch_req->setContext(thread->contextId());
setupFetchRequest(ifetch_req);
DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
thread->itb->translateTiming(ifetch_req, thread->getTC(),
bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
!uncacheable;
Request *req = new Request(paddr, 1, flags, masterId);
- req->setThreadContext(id, 0);
+ req->setContext(id);
outstandingAddrs.insert(paddr);
// generate packet for virtual network 1
requestType = MemCmd::ReadReq;
flags.set(Request::INST_FETCH);
- req = new Request(0, 0x0, access_size, flags, masterId, 0x0, 0, 0);
+ req = new Request(0, 0x0, access_size, flags, masterId, 0x0, 0);
req->setPaddr(paddr);
} else { // if (randomReqType == 2)
// generate packet for virtual network 2
req = new Request(paddr, access_size, flags, masterId);
}
- req->setThreadContext(id,0);
+ req->setContext(id);
//No need to do functional simulation
//We just do timing simulation of the network
// Prefetches are assumed to be 0 sized
Request *req = new Request(m_address, 0, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
- req->setThreadContext(index, 0);
+ req->setContext(index);
PacketPtr pkt = new Packet(req, cmd);
// despite the oddity of the 0 size (questionable if this should
Request *req = new Request(writeAddr, 1, flags, m_tester_ptr->masterId(),
curTick(), m_pc);
- req->setThreadContext(index, 0);
+ req->setContext(index);
Packet::Command cmd;
// 1 out of 8 chance, issue an atomic rather than a write
Request *req = new Request(m_address, CHECK_SIZE, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
- req->setThreadContext(index, 0);
+ req->setContext(index);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
uint8_t *dataArray = new uint8_t[CHECK_SIZE];
pkt->dataDynamic(dataArray);
// Create a request and the packet containing request
Request* req = new Request(node_ptr->physAddr, node_ptr->size,
node_ptr->flags, masterID, node_ptr->seqNum,
- ContextID(0), ThreadID(0));
+ ContextID(0));
req->setPC(node_ptr->pc);
// If virtual address is valid, set the asid and virtual address fields
// of the request.
req->setPC(pc);
// If this is not done it triggers assert in L1 cache for invalid contextId
- req->setThreadContext(ContextID(0), ThreadID(0));
+ req->setContext(ContextID(0));
// Embed it in a packet
PacketPtr pkt = new Packet(req, cmd);
Request* req)
{
if (!req) {
- req = new Request(0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId, -1);
+ req = new Request(0, 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId);
}
req->setPaddr(0);
if (kernelLaunch) {
ndr->addrToNotify = (volatile bool*)curTask.addrToNotify;
ndr->numDispLeft = (volatile uint32_t*)curTask.numDispLeft;
ndr->dispatchId = nextId;
- ndr->curTid = pkt->req->threadId();
+ ndr->curCid = pkt->req->contextId();
DPRINTF(GPUDisp, "launching kernel %d\n",nextId);
execIds.push(nextId);
++nextId;
while (ndRangeMap[execId].wg_disp_rem) {
//update the thread context
- shader->updateThreadContext(ndRangeMap[execId].curTid);
+ shader->updateContext(ndRangeMap[execId].curCid);
// attempt to dispatch_workgroup
if (!shader->dispatch_workgroups(&ndRangeMap[execId])) {
volatile bool *addrToNotify;
volatile uint32_t *numDispLeft;
int dispatchId;
- int curTid; // Current thread id
+ int curCid; // Current context id
};
#endif // __NDRANGE_HH__
}
void
-Shader::updateThreadContext(int tid) {
- // thread context of the thread which dispatched work
+Shader::updateContext(int cid) {
+ // context of the thread which dispatched work
assert(cpuPointer);
- gpuTc = cpuPointer->getContext(tid);
+ gpuTc = cpuPointer->getContext(cid);
assert(gpuTc);
}
bool dispatch_workgroups(NDRange *ndr);
Addr mmap(int length);
void functionalTLBAccess(PacketPtr pkt, int cu_id, BaseTLB::Mode mode);
- void updateThreadContext(int tid);
+ void updateContext(int cid);
void hostWakeUp(BaseCPU *cpu);
};
pf_pkt->allocate();
if (pkt->req->hasContextId()) {
- pf_req->setThreadContext(pkt->req->contextId(),
- pkt->req->threadId());
+ pf_req->setContext(pkt->req->contextId());
}
if (tagPrefetch && pkt->req->hasPC()) {
VALID_PC = 0x00000010,
/** Whether or not the context ID is valid. */
VALID_CONTEXT_ID = 0x00000020,
- VALID_THREAD_ID = 0x00000040,
/** Whether or not the sc result is valid. */
VALID_EXTRA_DATA = 0x00000080,
/**
* These flags are *not* cleared when a Request object is reused
* (assigned a new address).
*/
- STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID | VALID_THREAD_ID
+ STICKY_PRIVATE_FLAGS = VALID_CONTEXT_ID
};
private:
* store conditional or the compare value for a CAS. */
uint64_t _extraData;
- /** The context ID (for statistics, typically). */
+ /** The context ID (for statistics, locks, and wakeups). */
ContextID _contextId;
- /** The thread ID (id within this CPU) */
- ThreadID _threadId;
/** program counter of initiating access; for tracing/debugging */
Addr _pc;
Request()
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(0),
+ _extraData(0), _contextId(0), _pc(0),
_reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
accessDelta(0), depth(0)
{}
Request(Addr paddr, unsigned size, Flags flags, MasterID mid,
- InstSeqNum seq_num, ContextID cid, ThreadID tid)
+ InstSeqNum seq_num, ContextID cid)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(0),
+ _extraData(0), _contextId(0), _pc(0),
_reqInstSeqNum(seq_num), atomicOpFunctor(nullptr), translateDelta(0),
accessDelta(0), depth(0)
{
setPhys(paddr, size, flags, mid, curTick());
- setThreadContext(cid, tid);
+ setContext(cid);
privateFlags.set(VALID_INST_SEQ_NUM);
}
Request(Addr paddr, unsigned size, Flags flags, MasterID mid)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(0),
+ _extraData(0), _contextId(0), _pc(0),
_reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
accessDelta(0), depth(0)
{
Request(Addr paddr, unsigned size, Flags flags, MasterID mid, Tick time)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(0),
+ _extraData(0), _contextId(0), _pc(0),
_reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
accessDelta(0), depth(0)
{
Addr pc)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(pc),
+ _extraData(0), _contextId(0), _pc(pc),
_reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
accessDelta(0), depth(0)
{
}
Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
- Addr pc, ContextID cid, ThreadID tid)
+ Addr pc, ContextID cid)
: _paddr(0), _size(0), _masterId(invldMasterId), _time(0),
_taskId(ContextSwitchTaskId::Unknown), _asid(0), _vaddr(0),
- _extraData(0), _contextId(0), _threadId(0), _pc(0),
+ _extraData(0), _contextId(0), _pc(0),
_reqInstSeqNum(0), atomicOpFunctor(nullptr), translateDelta(0),
accessDelta(0), depth(0)
{
setVirt(asid, vaddr, size, flags, mid, pc);
- setThreadContext(cid, tid);
+ setContext(cid);
}
- Request(int asid, Addr vaddr, int size, Flags flags, MasterID mid, Addr pc,
- int cid, ThreadID tid, AtomicOpFunctor *atomic_op)
+ Request(int asid, Addr vaddr, unsigned size, Flags flags, MasterID mid,
+ Addr pc, ContextID cid, AtomicOpFunctor *atomic_op)
: atomicOpFunctor(atomic_op)
{
setVirt(asid, vaddr, size, flags, mid, pc);
- setThreadContext(cid, tid);
+ setContext(cid);
}
~Request()
}
/**
- * Set up CPU and thread numbers.
+ * Set up Context numbers.
*/
void
- setThreadContext(ContextID context_id, ThreadID tid)
+ setContext(ContextID context_id)
{
_contextId = context_id;
- _threadId = tid;
- privateFlags.set(VALID_CONTEXT_ID|VALID_THREAD_ID);
+ privateFlags.set(VALID_CONTEXT_ID);
}
/**
return _contextId;
}
- /** Accessor function for thread ID. */
- ThreadID
- threadId() const
- {
- assert(privateFlags.isSet(VALID_THREAD_ID));
- return _threadId;
- }
-
void
setPC(Addr pc)
{