schedule(mem_req_event, curTick() + req_tick_latency);
} else {
- assert(gpuDynInst->isEndOfKernel());
+ // kernel end release must be enabled
+ assert(shader->impl_kern_end_rel);
+ assert(gpuDynInst->isEndOfKernel());
- req->setCacheCoherenceFlags(Request::RELEASE);
- req->setReqInstSeqNum(gpuDynInst->seqNum());
- req->setFlags(Request::KERNEL);
- pkt = new Packet(req, MemCmd::MemSyncReq);
- pkt->pushSenderState(
- new ComputeUnit::DataPort::SenderState(gpuDynInst, 0, nullptr));
+ req->setCacheCoherenceFlags(Request::WB_L2);
+ req->setReqInstSeqNum(gpuDynInst->seqNum());
+ req->setFlags(Request::KERNEL);
+ pkt = new Packet(req, MemCmd::MemSyncReq);
+ pkt->pushSenderState(
+ new ComputeUnit::DataPort::SenderState(gpuDynInst, 0, nullptr));
- EventFunctionWrapper *mem_req_event =
- memPort[0]->createMemReqEvent(pkt);
+ EventFunctionWrapper *mem_req_event =
+ memPort[0]->createMemReqEvent(pkt);
- DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x scheduling "
- "a release\n", cu_id, gpuDynInst->simdId,
- gpuDynInst->wfSlotId, 0, pkt->req->getPaddr());
+ DPRINTF(GPUPort, "CU%d: WF[%d][%d]: index %d, addr %#x scheduling "
+ "a release\n", cu_id, gpuDynInst->simdId,
+ gpuDynInst->wfSlotId, 0, pkt->req->getPaddr());
- schedule(mem_req_event, curTick() + req_tick_latency);
+ schedule(mem_req_event, curTick() + req_tick_latency);
}
} else {
gpuDynInst->setRequestFlags(req);
// flush has never been started, performed only once at kernel end
assert(_dispatcher.getOutstandingWbs(kernId) == 0);
- // iterate all cus, managed by the shader, to perform flush.
- for (int i_cu = 0; i_cu < n_cu; ++i_cu) {
- _dispatcher.updateWbCounter(kernId, +1);
- cuList[i_cu]->doFlush(gpuDynInst);
- }
+ // the first cu, managed by the shader, performs flush operation,
+ // assuming that L2 cache is shared by all cus in the shader
+ int i_cu = 0;
+ _dispatcher.updateWbCounter(kernId, +1);
+ cuList[i_cu]->doFlush(gpuDynInst);
}
bool
* See the AMD GCN3 ISA Architecture Manual for more
* details.
*
+ * INV_L1: L1 cache invalidation
+ * WB_L2: L2 cache writeback
+ *
* SLC: System Level Coherent. Accesses are forced to miss in
* the L2 cache and are coherent with system memory.
*
* between atomic return/no-return operations.
*/
enum : CacheCoherenceFlagsType {
+ /** mem_sync_op flags */
+ INV_L1 = 0x00000001,
+ WB_L2 = 0x00000020,
+ /** user-policy flags */
/** user-policy flags */
SLC_BIT = 0x00000080,
GLC_BIT = 0x00000100,