: Sequencer * sequencer;
CacheMemory * L1Icache;
CacheMemory * L1Dcache;
- Cycles request_latency := 2;
+ Cycles request_latency := 1;
+ Cycles response_latency := 1;
Cycles use_timeout_latency := 50;
bool send_evictions;
return State:I;
}
+ // L1 hit latency
+ Cycles mandatoryQueueLatency(RubyRequestType type) {
+ if (type == RubyRequestType:IFETCH) {
+ return L1Icache.getTagLatency();
+ } else {
+ return L1Dcache.getTagLatency();
+ }
+ }
+
+ // Latency for responses that fetch data from cache
+ Cycles cacheResponseLatency() {
+ if (L1Dcache.getTagLatency() > response_latency) {
+ return L1Dcache.getTagLatency();
+ } else {
+ return response_latency;
+ }
+ }
+
void setState(TBE tbe, Entry cache_entry, Addr addr, State state) {
assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
peek(requestNetwork_in, RequestMsg) {
assert(is_valid(cache_entry));
if (in_msg.RequestorMachine == MachineType:L2Cache) {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
DPRINTF(RubySlicc, "Sending data to L2: %#x\n", in_msg.addr);
}
else {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
}
action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
assert(is_valid(cache_entry));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
peek(requestNetwork_in, RequestMsg) {
assert(is_valid(cache_entry));
if (in_msg.RequestorMachine == MachineType:L2Cache) {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
DPRINTF(RubySlicc, "Sending exclusive data to L2\n");
}
else {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
action(f_sendAck, "f", desc="Send ack from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L1Cache) {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
}
}
else {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
}
action(g_sendUnblock, "g", desc="Send unblock to memory") {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:UNBLOCK;
out_msg.Sender := machineID;
}
action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
out_msg.Sender := machineID;
action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DMA_ACK;
out_msg.Sender := machineID;
assert(is_valid(tbe));
if (in_msg.RequestorMachine == MachineType:L1Cache ||
in_msg.RequestorMachine == MachineType:DMA) {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
}
}
else {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
peek(requestNetwork_in, RequestMsg) {
assert(is_valid(tbe));
if (in_msg.RequestorMachine == MachineType:L1Cache) {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
}
}
else {
- enqueue(responseNetwork_out, ResponseMsg, request_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, response_latency) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
machine(MachineType:L2Cache, "Token protocol")
: CacheMemory * L2cache;
- Cycles response_latency := 2;
- Cycles request_latency := 2;
+ Cycles response_latency := 1;
+ Cycles request_latency := 1;
// L2 BANK QUEUES
// From local bank of L2 cache TO the network
MachineID mapAddressToMachine(Addr addr, MachineType mtype);
void wakeUpAllBuffers(Addr a);
+ // Latency for responses that fetch data from cache
+ Cycles cacheResponseLatency() {
+ if (L2cache.getTagLatency() > response_latency) {
+ return L2cache.getTagLatency();
+ }
+ else {
+ return response_latency;
+ }
+ }
+
Entry getCacheEntry(Addr address), return_by_pointer="yes" {
return static_cast(Entry, "pointer", L2cache[address]);
}
action(d_sendDataToL1GETS, "d", desc="Send data directly to L1 requestor") {
assert(is_valid(cache_entry));
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
action(d_sendDataToL1GETX, "\d", desc="Send data and a token from TBE to L1 requestor") {
assert(is_valid(cache_entry));
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
assert(is_valid(tbe));
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
action(dd_sendDataToFwdGETX, "dd", desc="send data") {
assert(is_valid(cache_entry));
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
action(dd_sendDataToFwdGETS, "\dd", desc="send data") {
assert(is_valid(cache_entry));
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
action(dd_sendExclusiveDataToFwdGETS, "\d\d", desc="send data") {
assert(is_valid(cache_entry));
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, response_latency) {
+ enqueue(responseNetwork_out, ResponseMsg, cacheResponseLatency()) {
out_msg.addr := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;