void writeCallback(Addr, DataBlock, bool, MachineType);
void writeCallback(Addr, DataBlock, bool, MachineType,
Cycles, Cycles, Cycles);
+ void writeUniqueCallback(Addr, DataBlock);
// ll/sc support
void writeCallbackScFail(Addr, DataBlock);
const bool externalHit, const MachineType mach,
const Cycles initialRequestTime,
const Cycles forwardRequestTime,
- const Cycles firstResponseTime)
+ const Cycles firstResponseTime,
+ const bool noCoales)
{
//
// Free the whole list as we assume we have had the exclusive access
int aliased_loads = 0;
while (!seq_req_list.empty()) {
SequencerRequest &seq_req = seq_req_list.front();
+
+ if (noCoales && !ruby_request) {
+ // Do not process follow-up requests
+ // (e.g. if full line no present)
+ // Reissue to the cache hierarchy
+ issueRequest(seq_req.pkt, seq_req.m_second_type);
+ break;
+ }
+
if (ruby_request) {
assert(seq_req.m_type != RubyRequestType_LD);
assert(seq_req.m_type != RubyRequestType_Load_Linked);
const MachineType mach = MachineType_NUM,
const Cycles initialRequestTime = Cycles(0),
const Cycles forwardRequestTime = Cycles(0),
- const Cycles firstResponseTime = Cycles(0));
+ const Cycles firstResponseTime = Cycles(0),
+ const bool noCoales = false);
+
+ // Write callback that prevents coalescing
+ void writeUniqueCallback(Addr address, DataBlock& data)
+ {
+ writeCallback(address, data, true, MachineType_NUM, Cycles(0),
+ Cycles(0), Cycles(0), true);
+ }
void readCallback(Addr address,
DataBlock& data,