/*
- * Copyright (c) 2011,2013 ARM Limited
+ * Copyright (c) 2011,2013,2017 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
// If the request is to ZERO a cache block, there is no data to check
// against, but it's all zero. We need something to compare to, so use a
// const set of zeros.
- if (flags & Request::CACHE_BLOCK_ZERO) {
+ if (flags & Request::STORE_NO_DATA) {
assert(!data);
assert(sizeof(zero_data) <= fullSize);
data = zero_data;
/*
- * Copyright (c) 2013-2014 ARM Limited
+ * Copyright (c) 2013-2014,2017 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
while (ret == NoAddrRangeCoverage && i != slots.rend()) {
LSQRequestPtr slot = *i;
+ /* Cache maintenance instructions go down via the store path *
+ * but they carry no data and they shouldn't be considered for
+ * forwarding */
if (slot->packet &&
- slot->inst->id.threadId == request->inst->id.threadId) {
+ slot->inst->id.threadId == request->inst->id.threadId &&
+ !slot->packet->req->isCacheMaintenance()) {
AddrRangeCoverage coverage = slot->containsAddrRangeOf(request);
if (coverage != NoAddrRangeCoverage) {
/* request_data becomes the property of a ...DataRequest (see below)
* and destroyed by its destructor */
request_data = new uint8_t[size];
- if (flags & Request::CACHE_BLOCK_ZERO) {
+ if (flags & Request::STORE_NO_DATA) {
/* For cache zeroing, just use zeroed data */
std::memset(request_data, 0, size);
} else {
if (sender_state)
ret->pushSenderState(sender_state);
- if (isLoad)
+ if (isLoad) {
ret->allocate();
- else
+ } else if (!request.isCacheMaintenance()) {
+ // CMOs are treated as stores but they don't have data. All
+ // stores otherwise need to allocate for data.
ret->dataDynamic(data);
+ }
return ret;
}
/*
- * Copyright (c) 2012-2014 ARM Limited
+ * Copyright (c) 2012-2014,2017 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
store_size = storeQueue[store_idx].size;
- if (store_size == 0)
- continue;
- else if (storeQueue[store_idx].inst->strictlyOrdered())
+ if (!store_size || storeQueue[store_idx].inst->strictlyOrdered() ||
+ (storeQueue[store_idx].req &&
+ storeQueue[store_idx].req->isCacheMaintenance())) {
+ // Cache maintenance instructions go down via the store
+ // path but they carry no data and they shouldn't be
+ // considered for forwarding
continue;
+ }
assert(storeQueue[store_idx].inst->effAddrValid());
storeQueue[store_idx].sreqHigh = sreqHigh;
unsigned size = req->getSize();
storeQueue[store_idx].size = size;
- storeQueue[store_idx].isAllZeros = req->getFlags() & Request::CACHE_BLOCK_ZERO;
- assert(size <= sizeof(storeQueue[store_idx].data) ||
- (req->getFlags() & Request::CACHE_BLOCK_ZERO));
+ bool store_no_data = req->getFlags() & Request::STORE_NO_DATA;
+ storeQueue[store_idx].isAllZeros = store_no_data;
+ assert(size <= sizeof(storeQueue[store_idx].data) || store_no_data);
// Split stores can only occur in ISAs with unaligned memory accesses. If
// a store request has been split, sreqLow and sreqHigh will be non-null.
storeQueue[store_idx].isSplit = true;
}
- if (!(req->getFlags() & Request::CACHE_BLOCK_ZERO))
+ if (!(req->getFlags() & Request::CACHE_BLOCK_ZERO) && \
+ !req->isCacheMaintenance())
memcpy(storeQueue[store_idx].data, data, size);
// This function only writes the data to the store queue, so no fault
if (data == NULL) {
assert(size <= 64);
- assert(flags & Request::CACHE_BLOCK_ZERO);
+ assert(flags & Request::STORE_NO_DATA);
// This must be a cache block cleaning request
data = zero_array;
}
// Now do the access.
if (fault == NoFault) {
- MemCmd cmd = MemCmd::WriteReq; // default
bool do_access = true; // flag to suppress cache access
if (req->isLLSC()) {
- cmd = MemCmd::StoreCondReq;
do_access = TheISA::handleLockedWrite(thread, req, dcachePort.cacheBlockMask);
} else if (req->isSwap()) {
- cmd = MemCmd::SwapReq;
if (req->isCondSwap()) {
assert(res);
req->setExtraData(*res);
}
if (do_access && !req->getFlags().isSet(Request::NO_ACCESS)) {
- Packet pkt = Packet(req, cmd);
+ Packet pkt(req, Packet::makeWriteCmd(req));
pkt.dataStatic(data);
if (req->isMmappedIpr()) {
BaseTLB::Mode mode = BaseTLB::Write;
if (data == NULL) {
- assert(flags & Request::CACHE_BLOCK_ZERO);
+ assert(flags & Request::STORE_NO_DATA);
// This must be a cache block cleaning request
memset(newData, 0, size);
} else {
*/
STICKY_FLAGS = INST_FETCH
};
+ static const FlagsType STORE_NO_DATA = CACHE_BLOCK_ZERO |
+ CLEAN | INVALIDATE;
/** Master Ids that are statically allocated
* @{*/