TBETable TBEs, template="<L0Cache_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
// Messages for this L0 cache from the L1 cache
in_port(messgeBuffer_in, CoherenceMsg, bufferFromL1, rank = 1) {
- if (messgeBuffer_in.isReady()) {
+ if (messgeBuffer_in.isReady(clockEdge())) {
peek(messgeBuffer_in, CoherenceMsg, block_on="addr") {
assert(in_msg.Dest == machineID);
// Mandatory Queue betweens Node's CPU and it's L0 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popRequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, messgeBuffer_in.dequeue());
+ Tick delay := messgeBuffer_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(o_popIncomingResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, messgeBuffer_in.dequeue());
+ Tick delay := messgeBuffer_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
// Response From the L2 Cache to this L1 cache
in_port(responseNetwork_in, ResponseMsg, responseFromL2, rank = 3) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
// Request to this L1 cache from the shared L2
in_port(requestNetwork_in, RequestMsg, requestFromL2, rank = 2) {
- if(requestNetwork_in.isReady()) {
+ if(requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
// Requests to this L1 cache from the L0 cache.
in_port(messageBufferFromL0_in, CoherenceMsg, bufferFromL0, rank = 0) {
- if (messageBufferFromL0_in.isReady()) {
+ if (messageBufferFromL0_in.isReady(clockEdge())) {
peek(messageBufferFromL0_in, CoherenceMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
}
action(k_popL0RequestQueue, "k", desc="Pop mandatory queue.") {
- messageBufferFromL0_in.dequeue();
+ messageBufferFromL0_in.dequeue(clockEdge());
}
action(l_popL2RequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, requestNetwork_in.dequeue());
+ Tick delay := requestNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(o_popL2ResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, responseNetwork_in.dequeue());
+ Tick delay := responseNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
// searches of all entries in the queue, not just the head msg. All
// msgs in the structure can be invalidated if a demand miss matches.
in_port(optionalQueue_in, RubyRequest, optionalQueue, desc="...", rank = 3) {
- if (optionalQueue_in.isReady()) {
+ if (optionalQueue_in.isReady(clockEdge())) {
peek(optionalQueue_in, RubyRequest) {
// Instruction Prefetch
if (in_msg.Type == RubyRequestType:IFETCH) {
// Response L1 Network - response msg to this L1 cache
in_port(responseL1Network_in, ResponseMsg, responseToL1Cache, rank = 2) {
- if (responseL1Network_in.isReady()) {
+ if (responseL1Network_in.isReady(clockEdge())) {
peek(responseL1Network_in, ResponseMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
// Request InterChip network - request from this L1 cache to the shared L2
in_port(requestL1Network_in, RequestMsg, requestToL1Cache, rank = 1) {
- if(requestL1Network_in.isReady()) {
+ if(requestL1Network_in.isReady(clockEdge())) {
peek(requestL1Network_in, RequestMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank = 0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popRequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, requestL1Network_in.dequeue());
+ Tick delay := requestL1Network_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(o_popIncomingResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, responseL1Network_in.dequeue());
+ Tick delay := responseL1Network_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
}
action(pq_popPrefetchQueue, "\pq", desc="Pop the prefetch request queue") {
- optionalQueue_in.dequeue();
+ optionalQueue_in.dequeue(clockEdge());
}
action(mp_markPrefetched, "mp", desc="Write data from response queue to cache") {
TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
+ Cycles ticksToCycles(Tick t);
+
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE a);
in_port(L1unblockNetwork_in, ResponseMsg, unblockToL2Cache, rank = 2) {
- if(L1unblockNetwork_in.isReady()) {
+ if(L1unblockNetwork_in.isReady(clockEdge())) {
peek(L1unblockNetwork_in, ResponseMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
// Response L2 Network - response msg to this particular L2 bank
in_port(responseL2Network_in, ResponseMsg, responseToL2Cache, rank = 1) {
- if (responseL2Network_in.isReady()) {
+ if (responseL2Network_in.isReady(clockEdge())) {
peek(responseL2Network_in, ResponseMsg) {
// test wether it's from a local L1 or an off chip source
assert(in_msg.Destination.isElement(machineID));
// L1 Request
in_port(L1RequestL2Network_in, RequestMsg, L1RequestToL2Cache, rank = 0) {
- if(L1RequestL2Network_in.isReady()) {
+ if(L1RequestL2Network_in.isReady(clockEdge())) {
peek(L1RequestL2Network_in, RequestMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
}
action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
- profileMsgDelay(0, L1RequestL2Network_in.dequeue());
+ Tick delay := L1RequestL2Network_in.dequeue(clockEdge());
+ profileMsgDelay(0, ticksToCycles(delay));
}
action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
- profileMsgDelay(0, L1unblockNetwork_in.dequeue());
+ Tick delay := L1unblockNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(0, ticksToCycles(delay));
}
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
- profileMsgDelay(1, responseL2Network_in.dequeue());
+ Tick delay := responseL2Network_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
}
action(zn_recycleResponseNetwork, "zn", desc="recycle memory request") {
- responseL2Network_in.recycle();
+ responseL2Network_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE tbe);
void unset_tbe();
void wakeUpBuffers(Addr a);
// ** IN_PORTS **
in_port(requestNetwork_in, RequestMsg, requestToDir, rank = 0) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
}
in_port(responseNetwork_in, ResponseMsg, responseToDir, rank = 1) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory, rank = 2) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming request queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(kd_wakeUpDependents, "kd", desc="wake-up dependents") {
}
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
}
action(zz_recycleDMAQueue, "zz", desc="recycle DMA queue") {
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
}
State cur_state;
+ Tick clockEdge();
State getState(Addr addr) {
return cur_state;
out_port(requestToDir_out, RequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
}
in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, makeLineAddress(in_msg.addr));
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
// PROTOTYPES
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
void set_cache_entry(AbstractCacheEntry a);
void unset_cache_entry();
void set_tbe(TBE b);
out_port(responseNetwork_out, ResponseMsg, responseFromCache);
in_port(forwardRequestNetwork_in, RequestMsg, forwardToCache) {
- if (forwardRequestNetwork_in.isReady()) {
+ if (forwardRequestNetwork_in.isReady(clockEdge())) {
peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
}
in_port(responseNetwork_in, ResponseMsg, responseToCache) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
Entry cache_entry := getCacheEntry(in_msg.LineAddress);
}
action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(n_popResponseQueue, "n", desc="Pop the response queue") {
- profileMsgDelay(1, responseNetwork_in.dequeue());
+ Tick delay := responseNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(1, ticksToCycles(delay));
}
action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
- profileMsgDelay(2, forwardRequestNetwork_in.dequeue());
+ Tick delay := forwardRequestNetwork_in.dequeue(clockEdge());
+ profileMsgDelay(2, ticksToCycles(delay));
}
action(p_profileMiss, "pi", desc="Profile cache miss") {
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE b);
void unset_tbe();
// ** IN_PORTS **
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == DMARequestType:READ) {
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETS) {
//added by SS
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(p_popIncomingDMARequestQueue, "p", desc="Pop incoming DMA queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
}
action(z_recycleRequestQueue, "z", desc="recycle request queue") {
- requestQueue_in.recycle();
+ requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(y_recycleDMARequestQueue, "y", desc="recycle dma request queue") {
- dmaRequestQueue_in.recycle();
+ dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
// TRANSITIONS
State cur_state;
+ Tick clockEdge();
+ Cycles ticksToCycles(Tick t);
+
State getState(Addr addr) {
return cur_state;
}
out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
bool isPresent(Addr);
}
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
// Use Timer
in_port(useTimerTable_in, Addr, useTimerTable) {
- if (useTimerTable_in.isReady()) {
- trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()),
- TBEs[useTimerTable.readyAddress()]);
+ if (useTimerTable_in.isReady(clockEdge())) {
+ Addr readyAddress := useTimerTable.nextAddress();
+ trigger(Event:Use_Timeout, readyAddress, getCacheEntry(readyAddress),
+ TBEs.lookup(readyAddress));
}
}
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks, in_msg.addr,
// Request Network
in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
DPRINTF(RubySlicc, "L1 received: %s\n", in_msg.Type);
// Response Network
in_port(responseToL1Cache_in, ResponseMsg, responseToL1Cache) {
- if (responseToL1Cache_in.isReady()) {
+ if (responseToL1Cache_in.isReady(clockEdge())) {
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr,
// Nothing from the unblock network
// Mandatory Queue betweens Node's CPU and it's L1 caches
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
}
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(jj_unsetUseTimer, "\jj", desc="Unset use timer.") {
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToL1Cache_in.dequeue();
+ responseToL1Cache_in.dequeue(clockEdge());
}
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
}
action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
- useTimerTable.set(address, use_timeout_latency);
+ useTimerTable.set(address,
+ clockEdge() + cyclesToTicks(use_timeout_latency));
}
action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
}
action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(zz_recycleMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
- mandatoryQueue_in.recycle();
+ mandatoryQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
//*****************************************************
TBETable TBEs, template="<L2Cache_TBE>", constructor="m_number_of_TBEs";
PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.addr,
// Request Network
in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID) {
}
in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
- if (L1requestNetwork_in.isReady()) {
+ if (L1requestNetwork_in.isReady(clockEdge())) {
peek(L1requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETX) {
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:ACK) {
}
action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(m_decrementNumberOfMessagesInt, "\m", desc="Decrement the number of messages for which we're waiting") {
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(n_popTriggerQueue, "\n", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
- L1requestNetwork_in.dequeue();
+ L1requestNetwork_in.dequeue(clockEdge());
}
peek(L1requestNetwork_in, RequestMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
}
- L1requestNetwork_in.recycle();
+ L1requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(zz_recycleRequestQueue, "\zz", desc="Send the head of the mandatory queue to the back of the queue.") {
peek(requestNetwork_in, RequestMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
}
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(zz_recycleResponseQueue, "\z\z", desc="Send the head of the mandatory queue to the back of the queue.") {
peek(responseNetwork_in, ResponseMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Sender);
}
- responseNetwork_in.recycle();
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(da_sendDmaAckUnblock, "da", desc="Send dma ack to global directory") {
// ** OBJECTS **
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE b);
void unset_tbe();
// ** IN_PORTS **
in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
- if (unblockNetwork_in.isReady()) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
peek(unblockNetwork_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
}
in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
- unblockNetwork_in.dequeue();
+ unblockNetwork_in.dequeue(clockEdge());
}
action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
}
action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
}
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
- requestQueue_in.recycle();
+ requestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
TBETable TBEs, template="<DMA_TBE>", constructor="m_number_of_TBEs";
State cur_state;
+ Tick clockEdge();
void set_tbe(TBE b);
void unset_tbe();
out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress,
}
in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(t_updateTBEData, "t", desc="Update TBE Data") {
int countReadStarvingForAddress(Addr);
}
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
// Use Timer
in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
- if (useTimerTable_in.isReady()) {
- TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
-
- if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
- (persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
- if (persistentTable.typeOfSmallest(useTimerTable.readyAddress()) == AccessType:Write) {
- trigger(Event:Use_TimeoutStarverX, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ if (useTimerTable_in.isReady(clockEdge())) {
+ Addr readyAddress := useTimerTable.nextAddress();
+ TBE tbe := L1_TBEs.lookup(readyAddress);
+
+ if (persistentTable.isLocked(readyAddress) &&
+ (persistentTable.findSmallest(readyAddress) != machineID)) {
+ if (persistentTable.typeOfSmallest(readyAddress) == AccessType:Write) {
+ trigger(Event:Use_TimeoutStarverX, readyAddress,
+ getCacheEntry(readyAddress), tbe);
} else {
- trigger(Event:Use_TimeoutStarverS, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ trigger(Event:Use_TimeoutStarverS, readyAddress,
+ getCacheEntry(readyAddress), tbe);
}
} else {
if (no_mig_atomic && IsAtomic(tbe)) {
- trigger(Event:Use_TimeoutNoStarvers_NoMig, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ trigger(Event:Use_TimeoutNoStarvers_NoMig, readyAddress,
+ getCacheEntry(readyAddress), tbe);
} else {
- trigger(Event:Use_TimeoutNoStarvers, useTimerTable.readyAddress(),
- getCacheEntry(useTimerTable.readyAddress()), tbe);
+ trigger(Event:Use_TimeoutNoStarvers, readyAddress,
+ getCacheEntry(readyAddress), tbe);
}
}
}
// Reissue Timer
in_port(reissueTimerTable_in, Addr, reissueTimerTable, rank=4) {
- if (reissueTimerTable_in.isReady()) {
- trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
- getCacheEntry(reissueTimerTable.readyAddress()),
- L1_TBEs[reissueTimerTable.readyAddress()]);
+ Tick current_time := clockEdge();
+ if (reissueTimerTable_in.isReady(current_time)) {
+ Addr addr := reissueTimerTable.nextAddress();
+ trigger(Event:Request_Timeout, addr, getCacheEntry(addr),
+ L1_TBEs.lookup(addr));
}
}
// Persistent Network
in_port(persistentNetwork_in, PersistentMsg, persistentToL1Cache, rank=3) {
- if (persistentNetwork_in.isReady()) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
peek(persistentNetwork_in, PersistentMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToL1Cache, rank=2) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
// Request Network
in_port(requestNetwork_in, RequestMsg, requestToL1Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg, block_on="addr") {
assert(in_msg.Destination.isElement(machineID));
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
}
} else {
// Set a wakeup timer
if (dynamic_timeout_enabled) {
- reissueTimerTable.set(address, (5 * averageLatencyEstimate()) / 4);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
} else {
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
}
}
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(reissue_wakeup_latency));
}
} else {
// Set a wakeup timer
if (dynamic_timeout_enabled) {
- reissueTimerTable.set(address, (5 * averageLatencyEstimate()) / 4);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(averageLatencyEstimate()));
} else {
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(
+ address, clockEdge() + cyclesToTicks(fixed_timeout_latency));
}
}
}
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
- persistentNetwork_in.dequeue();
+ persistentNetwork_in.dequeue(clockEdge());
}
action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
- useTimerTable.set(address, use_timeout_latency);
+ useTimerTable.set(
+ address, clockEdge() + cyclesToTicks(use_timeout_latency));
}
action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
PersistentTable persistentTable;
PerfectCacheMemory localDirectory, template="<L2Cache_DirEntry>";
+ Tick clockEdge();
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
// Persistent Network
in_port(persistentNetwork_in, PersistentMsg, persistentToL2Cache) {
- if (persistentNetwork_in.isReady()) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
peek(persistentNetwork_in, PersistentMsg) {
assert(in_msg.Destination.isElement(machineID));
// Request Network
in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
}
in_port(L1requestNetwork_in, RequestMsg, L1RequestToL2Cache) {
- if (L1requestNetwork_in.isReady()) {
+ if (L1requestNetwork_in.isReady(clockEdge())) {
peek(L1requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
// Response Network
in_port(responseNetwork_in, ResponseMsg, responseToL2Cache) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
}
action(l_popPersistentQueue, "l", desc="Pop persistent queue.") {
- persistentNetwork_in.dequeue();
+ persistentNetwork_in.dequeue(clockEdge());
}
action(m_popRequestQueue, "m", desc="Pop request queue.") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(o_popL1RequestQueue, "o", desc="Pop L1 request queue.") {
- L1requestNetwork_in.dequeue();
+ L1requestNetwork_in.dequeue(clockEdge());
}
bool starving, default="false";
int l2_select_low_bit, default="RubySystem::getBlockSizeBits()";
+ Tick clockEdge();
+ Tick cyclesToTicks(Cycles c);
void set_tbe(TBE b);
void unset_tbe();
// ** IN_PORTS **
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
// Reissue Timer
in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
- if (reissueTimerTable_in.isReady()) {
- trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
- TBEs[reissueTimerTable.readyAddress()]);
+ Tick current_time := clockEdge();
+ if (reissueTimerTable_in.isReady(current_time)) {
+ Addr addr := reissueTimerTable.nextAddress();
+ trigger(Event:Request_Timeout, addr, TBEs.lookup(addr));
}
}
in_port(responseNetwork_in, ResponseMsg, responseToDir) {
- if (responseNetwork_in.isReady()) {
+ if (responseNetwork_in.isReady(clockEdge())) {
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (getDirectoryEntry(in_msg.addr).Tokens + in_msg.Tokens == max_tokens()) {
}
in_port(persistentNetwork_in, PersistentMsg, persistentToDir) {
- if (persistentNetwork_in.isReady()) {
+ if (persistentNetwork_in.isReady(clockEdge())) {
peek(persistentNetwork_in, PersistentMsg) {
assert(in_msg.Destination.isElement(machineID));
}
in_port(requestNetwork_in, RequestMsg, requestToDir) {
- if (requestNetwork_in.isReady()) {
+ if (requestNetwork_in.isReady(clockEdge())) {
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETS) {
}
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, DMARequestMsg) {
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(address, cyclesToTicks(reissue_wakeup_latency));
}
}
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, reissue_wakeup_latency);
+ reissueTimerTable.set(address, cyclesToTicks(reissue_wakeup_latency));
}
}
}
action(j_popIncomingRequestQueue, "j", desc="Pop incoming request queue") {
- requestNetwork_in.dequeue();
+ requestNetwork_in.dequeue(clockEdge());
}
action(z_recycleRequest, "z", desc="Recycle the request queue") {
- requestNetwork_in.recycle();
+ requestNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(k_popIncomingResponseQueue, "k", desc="Pop incoming response queue") {
- responseNetwork_in.dequeue();
+ responseNetwork_in.dequeue(clockEdge());
}
action(kz_recycleResponse, "kz", desc="Recycle incoming response queue") {
- responseNetwork_in.recycle();
+ responseNetwork_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(l_popIncomingPersistentQueue, "l", desc="Pop incoming persistent queue") {
- persistentNetwork_in.dequeue();
+ persistentNetwork_in.dequeue(clockEdge());
}
action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(y_recycleDmaRequestQueue, "y", desc="recycle dma request queue") {
- dmaRequestQueue_in.recycle();
+ dmaRequestQueue_in.recycle(clockEdge(), cyclesToTicks(recycle_latency));
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(r_bounceResponse, "r", desc="Bounce response to starving processor") {
//
if (reissueTimerTable.isSet(address)) {
reissueTimerTable.unset(address);
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(address, cyclesToTicks(fixed_timeout_latency));
}
}
//
// currently only support a fixed timeout latency
//
- reissueTimerTable.set(address, fixed_timeout_latency);
+ reissueTimerTable.set(address, cyclesToTicks(fixed_timeout_latency));
}
action(ut_unsetReissueTimer, "ut", desc="Unset reissue timer.") {
State cur_state;
+ Tick clockEdge();
+
State getState(Addr addr) {
return cur_state;
}
out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
TBETable TBEs, template="<L1Cache_TBE>", constructor="m_number_of_TBEs";
+ Tick clockEdge();
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE b);
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=3) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
// Response Network
in_port(responseToCache_in, ResponseMsg, responseToCache, rank=2) {
- if (responseToCache_in.isReady()) {
+ if (responseToCache_in.isReady(clockEdge())) {
peek(responseToCache_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
// Forward Network
in_port(forwardToCache_in, RequestMsg, forwardToCache, rank=1) {
- if (forwardToCache_in.isReady()) {
+ if (forwardToCache_in.isReady(clockEdge())) {
peek(forwardToCache_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...", rank=0) {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
}
action(j_popTriggerQueue, "j", desc="Pop trigger queue.") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(k_popMandatoryQueue, "k", desc="Pop mandatory queue.") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(l_popForwardQueue, "l", desc="Pop forwareded request queue.") {
- forwardToCache_in.dequeue();
+ forwardToCache_in.dequeue(clockEdge());
}
action(hp_copyFromTBEToL2, "li", desc="Copy data from TBE to L2 cache entry.") {
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToCache_in.dequeue();
+ responseToCache_in.dequeue(clockEdge());
}
action(ll_L2toL1Transfer, "ll", desc="") {
bool isPresent(Addr);
}
+ Tick clockEdge();
void set_cache_entry(AbstractCacheEntry b);
void unset_cache_entry();
void set_tbe(TBE a);
// Trigger Queue
in_port(triggerQueue_in, TriggerMsg, triggerQueue, rank=5) {
- if (triggerQueue_in.isReady()) {
+ if (triggerQueue_in.isReady(clockEdge())) {
peek(triggerQueue_in, TriggerMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
}
in_port(unblockNetwork_in, ResponseMsg, unblockToDir, rank=4) {
- if (unblockNetwork_in.isReady()) {
+ if (unblockNetwork_in.isReady(clockEdge())) {
peek(unblockNetwork_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
// Response Network
in_port(responseToDir_in, ResponseMsg, responseToDir, rank=3) {
- if (responseToDir_in.isReady()) {
+ if (responseToDir_in.isReady(clockEdge())) {
peek(responseToDir_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
// off-chip memory request/response is done
in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=2) {
- if (memQueue_in.isReady()) {
+ if (memQueue_in.isReady(clockEdge())) {
peek(memQueue_in, MemoryMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
}
in_port(requestQueue_in, RequestMsg, requestToDir, rank=1) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
TBE tbe := TBEs[in_msg.addr];
}
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir, rank=0) {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, DMARequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
TBE tbe := TBEs[in_msg.LineAddress];
}
action(n_popResponseQueue, "n", desc="Pop response queue") {
- responseToDir_in.dequeue();
+ responseToDir_in.dequeue(clockEdge());
}
action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
}
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
peek(unblockNetwork_in, ResponseMsg) {
APPEND_TRANSITION_COMMENT(in_msg.Sender);
}
- unblockNetwork_in.dequeue();
+ unblockNetwork_in.dequeue(clockEdge());
}
action(k_wakeUpDependents, "k", desc="wake-up dependents") {
}
action(l_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
+ memQueue_in.dequeue(clockEdge());
}
action(g_popTriggerQueue, "g", desc="Pop trigger queue") {
- triggerQueue_in.dequeue();
+ triggerQueue_in.dequeue(clockEdge());
}
action(p_popDmaRequestQueue, "pd", desc="pop dma request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(zd_stallAndWaitDMARequest, "zd", desc="Stall and wait the dma request queue") {
State cur_state;
+ Tick clockEdge();
+
State getState(Addr addr) {
return cur_state;
}
out_port(requestToDir_out, DMARequestMsg, requestToDir, desc="...");
in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
- if (dmaRequestQueue_in.isReady()) {
+ if (dmaRequestQueue_in.isReady(clockEdge())) {
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
}
in_port(dmaResponseQueue_in, DMAResponseMsg, responseFromDir, desc="...") {
- if (dmaResponseQueue_in.isReady()) {
+ if (dmaResponseQueue_in.isReady(clockEdge())) {
peek( dmaResponseQueue_in, DMAResponseMsg) {
if (in_msg.Type == DMAResponseType:ACK) {
trigger(Event:Ack, in_msg.LineAddress);
}
action(p_popRequestQueue, "p", desc="Pop request queue") {
- dmaRequestQueue_in.dequeue();
+ dmaRequestQueue_in.dequeue(clockEdge());
}
action(p_popResponseQueue, "\p", desc="Pop request queue") {
- dmaResponseQueue_in.dequeue();
+ dmaResponseQueue_in.dequeue(clockEdge());
}
transition(READY, ReadRequest, BUSY_RD) {
}
// FUNCTIONS
+ Tick clockEdge();
// cpu/testers/networktest/networktest.cc generates packets of the type
// ReadReq, INST_FETCH, and WriteReq.
// Mandatory Queue
in_port(mandatoryQueue_in, RubyRequest, mandatoryQueue, desc="...") {
- if (mandatoryQueue_in.isReady()) {
+ if (mandatoryQueue_in.isReady(clockEdge())) {
peek(mandatoryQueue_in, RubyRequest) {
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, getCacheEntry(in_msg.LineAddress));
}
action(m_popMandatoryQueue, "m", desc="Pop the mandatory request queue") {
- mandatoryQueue_in.dequeue();
+ mandatoryQueue_in.dequeue(clockEdge());
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
DataBlock DataBlk, desc="data for the block";
}
- // ** OBJECTS **
+ // ** FUNCTIONS **
+ Tick clockEdge();
+
State getState(Addr addr) {
return State:I;
}
// ** IN_PORTS **
in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
+ if (requestQueue_in.isReady(clockEdge())) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Request, in_msg.addr);
}
}
in_port(forwardQueue_in, RequestMsg, forwardToDir) {
- if (forwardQueue_in.isReady()) {
+ if (forwardQueue_in.isReady(clockEdge())) {
peek(forwardQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Forward, in_msg.addr);
}
}
in_port(responseQueue_in, RequestMsg, responseToDir) {
- if (responseQueue_in.isReady()) {
+ if (responseQueue_in.isReady(clockEdge())) {
peek(responseQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:MSG) {
trigger(Event:Receive_Response, in_msg.addr);
// Actions
action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
+ requestQueue_in.dequeue(clockEdge());
}
action(f_popIncomingForwardQueue, "f", desc="Pop incoming forward queue") {
- forwardQueue_in.dequeue();
+ forwardQueue_in.dequeue(clockEdge());
}
action(r_popIncomingResponseQueue, "r", desc="Pop incoming response queue") {
- responseQueue_in.dequeue();
+ responseQueue_in.dequeue(clockEdge());
}
// TRANSITIONS
-
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
NodeID version;
MachineID machineID;
NodeID clusterID;
+Cycles recycle_latency;
// Functions implemented in the AbstractController class for
// making timing access to the memory maintained by the
external_type(Packet, primitive="yes");
external_type(Addr, primitive="yes");
external_type(Cycles, primitive="yes", default="Cycles(0)");
+external_type(Tick, primitive="yes", default="0");
structure(DataBlock, external = "yes", desc="..."){
void clear();
external_type(Scalar, primitive="yes");
structure(InPort, external = "yes", primitive="yes") {
- bool isReady();
- Cycles dequeue();
- void recycle();
+ bool isReady(Tick current_time);
+ Tick dequeue(Tick current_time);
+ void recycle(Tick current_time, Tick recycle_latency);
bool isEmpty();
bool isStallMapEmpty();
int getStallMapSize();
}
structure (TimerTable, inport="yes", external = "yes") {
- bool isReady();
- Addr readyAddress();
- void set(Addr, Cycles);
+ bool isReady(Tick);
+ Addr nextAddress();
+ void set(Addr, Tick);
void unset(Addr);
bool isSet(Addr);
}
using m5::stl_helpers::operator<<;
MessageBuffer::MessageBuffer(const Params *p)
- : SimObject(p), m_recycle_latency(p->recycle_latency),
+ : SimObject(p),
m_max_size(p->buffer_size), m_time_last_time_size_checked(0),
m_time_last_time_enqueue(0), m_time_last_time_pop(0),
m_last_arrival_time(0), m_strict_fifo(p->ordered),
{
m_msg_counter = 0;
m_consumer = NULL;
- m_sender = NULL;
- m_receiver = NULL;
-
m_size_last_time_size_checked = 0;
m_size_at_cycle_start = 0;
m_msgs_this_cycle = 0;
}
unsigned int
-MessageBuffer::getSize()
+MessageBuffer::getSize(Tick curTime)
{
- if (m_time_last_time_size_checked != m_receiver->curCycle()) {
- m_time_last_time_size_checked = m_receiver->curCycle();
+ if (m_time_last_time_size_checked != curTime) {
+ m_time_last_time_size_checked = curTime;
m_size_last_time_size_checked = m_prio_heap.size();
}
}
bool
-MessageBuffer::areNSlotsAvailable(unsigned int n)
+MessageBuffer::areNSlotsAvailable(unsigned int n, Tick current_time)
{
// fast path when message buffers have infinite size
// size immediately
unsigned int current_size = 0;
- if (m_time_last_time_pop < m_sender->clockEdge()) {
+ if (m_time_last_time_pop < current_time) {
// no pops this cycle - heap size is correct
current_size = m_prio_heap.size();
} else {
- if (m_time_last_time_enqueue < m_sender->curCycle()) {
+ if (m_time_last_time_enqueue < current_time) {
// no enqueues this cycle - m_size_at_cycle_start is correct
current_size = m_size_at_cycle_start;
} else {
MessageBuffer::peek() const
{
DPRINTF(RubyQueue, "Peeking at head of queue.\n");
- assert(isReady());
-
const Message* msg_ptr = m_prio_heap.front().get();
assert(msg_ptr);
}
// FIXME - move me somewhere else
-Cycles
+Tick
random_time()
{
- Cycles time(1);
- time += Cycles(random_mt.random(0, 3)); // [0...3]
+ Tick time = 1;
+ time += random_mt.random(0, 3); // [0...3]
if (random_mt.random(0, 7) == 0) { // 1 in 8 chance
- time += Cycles(100 + random_mt.random(1, 15)); // 100 + [1...15]
+ time += 100 + random_mt.random(1, 15); // 100 + [1...15]
}
return time;
}
void
-MessageBuffer::enqueue(MsgPtr message, Cycles delta)
+MessageBuffer::enqueue(MsgPtr message, Tick current_time, Tick delta)
{
// record current time incase we have a pop that also adjusts my size
- if (m_time_last_time_enqueue < m_sender->curCycle()) {
+ if (m_time_last_time_enqueue < current_time) {
m_msgs_this_cycle = 0; // first msg this cycle
- m_time_last_time_enqueue = m_sender->curCycle();
+ m_time_last_time_enqueue = current_time;
}
m_msg_counter++;
// Calculate the arrival time of the message, that is, the first
// cycle the message can be dequeued.
assert(delta > 0);
- Tick current_time = m_sender->clockEdge();
Tick arrival_time = 0;
if (!RubySystem::getRandomization() || !m_randomization) {
// No randomization
- arrival_time = current_time + delta * m_sender->clockPeriod();
+ arrival_time = current_time + delta;
} else {
// Randomization - ignore delta
if (m_strict_fifo) {
if (m_last_arrival_time < current_time) {
m_last_arrival_time = current_time;
}
- arrival_time = m_last_arrival_time +
- random_time() * m_sender->clockPeriod();
+ arrival_time = m_last_arrival_time + random_time();
} else {
- arrival_time = current_time +
- random_time() * m_sender->clockPeriod();
+ arrival_time = current_time + random_time();
}
}
if (arrival_time < m_last_arrival_time) {
panic("FIFO ordering violated: %s name: %s current time: %d "
"delta: %d arrival_time: %d last arrival_time: %d\n",
- *this, name(), current_time,
- delta * m_sender->clockPeriod(),
- arrival_time, m_last_arrival_time);
+ *this, name(), current_time, delta, arrival_time,
+ m_last_arrival_time);
}
}
Message* msg_ptr = message.get();
assert(msg_ptr != NULL);
- assert(m_sender->clockEdge() >= msg_ptr->getLastEnqueueTime() &&
+ assert(current_time >= msg_ptr->getLastEnqueueTime() &&
"ensure we aren't dequeued early");
- msg_ptr->updateDelayedTicks(m_sender->clockEdge());
+ msg_ptr->updateDelayedTicks(current_time);
msg_ptr->setLastEnqueueTime(arrival_time);
msg_ptr->setMsgCounter(m_msg_counter);
m_consumer->storeEventInfo(m_vnet_id);
}
-Cycles
-MessageBuffer::dequeue()
+Tick
+MessageBuffer::dequeue(Tick current_time)
{
DPRINTF(RubyQueue, "Popping\n");
- assert(isReady());
+ assert(isReady(current_time));
// get MsgPtr of the message about to be dequeued
MsgPtr message = m_prio_heap.front();
// get the delay cycles
- message->updateDelayedTicks(m_receiver->clockEdge());
- Cycles delayCycles =
- m_receiver->ticksToCycles(message->getDelayedTicks());
+ message->updateDelayedTicks(current_time);
+ Tick delay = message->getDelayedTicks();
// record previous size and time so the current buffer size isn't
// adjusted until schd cycle
- if (m_time_last_time_pop < m_receiver->clockEdge()) {
+ if (m_time_last_time_pop < current_time) {
m_size_at_cycle_start = m_prio_heap.size();
- m_time_last_time_pop = m_receiver->clockEdge();
+ m_time_last_time_pop = current_time;
}
- pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
- greater<MsgPtr>());
+ pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
m_prio_heap.pop_back();
- return delayCycles;
+ return delay;
}
void
m_prio_heap.clear();
m_msg_counter = 0;
- m_time_last_time_enqueue = Cycles(0);
+ m_time_last_time_enqueue = 0;
m_time_last_time_pop = 0;
m_size_at_cycle_start = 0;
m_msgs_this_cycle = 0;
}
void
-MessageBuffer::recycle()
+MessageBuffer::recycle(Tick current_time, Tick recycle_latency)
{
DPRINTF(RubyQueue, "Recycling.\n");
- assert(isReady());
+ assert(isReady(current_time));
MsgPtr node = m_prio_heap.front();
pop_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
- node->setLastEnqueueTime(m_receiver->clockEdge(m_recycle_latency));
+ Tick future_time = current_time + recycle_latency;
+ node->setLastEnqueueTime(future_time);
+
m_prio_heap.back() = node;
push_heap(m_prio_heap.begin(), m_prio_heap.end(), greater<MsgPtr>());
- m_consumer->
- scheduleEventAbsolute(m_receiver->clockEdge(m_recycle_latency));
+ m_consumer->scheduleEventAbsolute(future_time);
}
void
}
void
-MessageBuffer::reanalyzeMessages(Addr addr)
+MessageBuffer::reanalyzeMessages(Addr addr, Tick current_time)
{
DPRINTF(RubyQueue, "ReanalyzeMessages %s\n", addr);
assert(m_stall_msg_map.count(addr) > 0);
- Tick curTick = m_receiver->clockEdge();
//
// Put all stalled messages associated with this address back on the
// scheduled for the current cycle so that the previously stalled messages
// will be observed before any younger messages that may arrive this cycle
//
- reanalyzeList(m_stall_msg_map[addr], curTick);
+ reanalyzeList(m_stall_msg_map[addr], current_time);
m_stall_msg_map.erase(addr);
}
void
-MessageBuffer::reanalyzeAllMessages()
+MessageBuffer::reanalyzeAllMessages(Tick current_time)
{
DPRINTF(RubyQueue, "ReanalyzeAllMessages\n");
- Tick curTick = m_receiver->clockEdge();
//
// Put all stalled messages associated with this address back on the
//
for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
map_iter != m_stall_msg_map.end(); ++map_iter) {
- reanalyzeList(map_iter->second, curTick);
+ reanalyzeList(map_iter->second, current_time);
}
m_stall_msg_map.clear();
}
void
-MessageBuffer::stallMessage(Addr addr)
+MessageBuffer::stallMessage(Addr addr, Tick current_time)
{
DPRINTF(RubyQueue, "Stalling due to %s\n", addr);
- assert(isReady());
+ assert(isReady(current_time));
assert(getOffset(addr) == 0);
MsgPtr message = m_prio_heap.front();
- dequeue();
+ dequeue(current_time);
//
// Note: no event is scheduled to analyze the map at a later time.
}
bool
-MessageBuffer::isReady() const
+MessageBuffer::isReady(Tick current_time) const
{
return ((m_prio_heap.size() > 0) &&
- (m_prio_heap.front()->getLastEnqueueTime() <= m_receiver->clockEdge()));
+ (m_prio_heap.front()->getLastEnqueueTime() <= current_time));
}
bool
typedef MessageBufferParams Params;
MessageBuffer(const Params *p);
- void reanalyzeMessages(Addr addr);
- void reanalyzeAllMessages();
- void stallMessage(Addr addr);
+ void reanalyzeMessages(Addr addr, Tick current_time);
+ void reanalyzeAllMessages(Tick current_time);
+ void stallMessage(Addr addr, Tick current_time);
// TRUE if head of queue timestamp <= SystemTime
- bool isReady() const;
+ bool isReady(Tick current_time) const;
void
- delayHead()
+ delayHead(Tick current_time, Tick delta)
{
MsgPtr m = m_prio_heap.front();
std::pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
std::greater<MsgPtr>());
m_prio_heap.pop_back();
- enqueue(m, Cycles(1));
+ enqueue(m, current_time, delta);
}
- bool areNSlotsAvailable(unsigned int n);
+ bool areNSlotsAvailable(unsigned int n, Tick curTime);
int getPriority() { return m_priority_rank; }
void setPriority(int rank) { m_priority_rank = rank; }
void setConsumer(Consumer* consumer)
m_consumer = consumer;
}
- void setSender(ClockedObject* obj)
- {
- DPRINTF(RubyQueue, "Setting sender: %s\n", obj->name());
- assert(m_sender == NULL || m_sender == obj);
- m_sender = obj;
- }
-
- void setReceiver(ClockedObject* obj)
- {
- DPRINTF(RubyQueue, "Setting receiver: %s\n", obj->name());
- assert(m_receiver == NULL || m_receiver == obj);
- m_receiver = obj;
- }
-
Consumer* getConsumer() { return m_consumer; }
bool getOrdered() { return m_strict_fifo; }
//! message queue. The function assumes that the queue is nonempty.
const Message* peek() const;
- const MsgPtr&
- peekMsgPtr() const
- {
- assert(isReady());
- return m_prio_heap.front();
- }
+ const MsgPtr &peekMsgPtr() const { return m_prio_heap.front(); }
- void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); }
- void enqueue(MsgPtr message, Cycles delta);
+ void enqueue(MsgPtr message, Tick curTime, Tick delta);
//! Updates the delay cycles of the message at the head of the queue,
//! removes it from the queue and returns its total delay.
- Cycles dequeue();
+ Tick dequeue(Tick current_time);
- void recycle();
+ void recycle(Tick current_time, Tick recycle_latency);
bool isEmpty() const { return m_prio_heap.size() == 0; }
bool isStallMapEmpty() { return m_stall_msg_map.size() == 0; }
unsigned int getStallMapSize() { return m_stall_msg_map.size(); }
- unsigned int getSize();
+ unsigned int getSize(Tick curTime);
void clear();
void print(std::ostream& out) const;
uint32_t functionalWrite(Packet *pkt);
private:
- //added by SS
- const Cycles m_recycle_latency;
-
void reanalyzeList(std::list<MsgPtr> &, Tick);
private:
// Data Members (m_ prefix)
- //! The two ends of the buffer.
- ClockedObject* m_sender;
- ClockedObject* m_receiver;
-
//! Consumer to signal a wakeup(), can be NULL
Consumer* m_consumer;
std::vector<MsgPtr> m_prio_heap;
StallMsgMapType m_stall_msg_map;
const unsigned int m_max_size;
- Cycles m_time_last_time_size_checked;
+ Tick m_time_last_time_size_checked;
unsigned int m_size_last_time_size_checked;
// variables used so enqueues appear to happen immediately, while
// pop happen the next cycle
- Cycles m_time_last_time_enqueue;
+ Tick m_time_last_time_enqueue;
Tick m_time_last_time_pop;
Tick m_last_arrival_time;
int m_vnet_id;
};
-Cycles random_time();
+Tick random_time();
inline std::ostream&
operator<<(std::ostream& out, const MessageBuffer& obj)
ordered = Param.Bool(False, "Whether the buffer is ordered")
buffer_size = Param.Unsigned(0, "Maximum number of entries to buffer \
(0 allows infinite entries)")
- recycle_latency = Param.Cycles(Parent.recycle_latency, "")
randomization = Param.Bool(False, "")
master = MasterPort("Master port to MessageBuffer receiver")
for (auto& it : in) {
if (it != nullptr) {
it->setConsumer(this);
- it->setReceiver(this);
- }
- }
-
- for (auto& it : out) {
- if (it != nullptr) {
- it->setSender(this);
}
}
}
DPRINTF(RubyNetwork, "m_id: %d woke up at time: %lld", m_id, curCycle());
MsgPtr msg_ptr;
+ Tick curTime = clockEdge();
// Checking for messages coming from the protocol
// can pick up a message/cycle for each virtual net
continue;
}
- while (b->isReady()) { // Is there a message waiting
+ while (b->isReady(curTime)) { // Is there a message waiting
msg_ptr = b->peekMsgPtr();
if (flitisizeMessage(msg_ptr, vnet)) {
- b->dequeue();
+ b->dequeue(curTime);
} else {
break;
}
free_signal = true;
outNode_ptr[t_flit->get_vnet()]->enqueue(
- t_flit->get_msg_ptr(), Cycles(1));
+ t_flit->get_msg_ptr(), curTime, cyclesToTicks(Cycles(1)));
}
// Simply send a credit back since we are not buffering
// this flit in the NI
continue;
}
- while (it->isReady()) { // Is there a message waiting
+ while (it->isReady(clockEdge())) { // Is there a message waiting
scheduleEvent(Cycles(1));
return;
}
for (auto& it: in) {
if (it != nullptr) {
it->setConsumer(this);
- it->setReceiver(this);
- }
- }
-
- for (auto& it : out) {
- if (it != nullptr) {
- it->setSender(this);
}
}
}
continue;
}
- while (b->isReady()) { // Is there a message waiting
+ while (b->isReady(clockEdge())) { // Is there a message waiting
msg_ptr = b->peekMsgPtr();
if (flitisizeMessage(msg_ptr, vnet)) {
- b->dequeue();
+ b->dequeue(clockEdge());
} else {
break;
}
m_id, curCycle());
outNode_ptr[t_flit->get_vnet()]->enqueue(
- t_flit->get_msg_ptr(), Cycles(1));
+ t_flit->get_msg_ptr(), clockEdge(), cyclesToTicks(Cycles(1)));
// signal the upstream router that this vc can be freed now
inNetLink->release_vc_link(t_flit->get_vc(),
continue;
}
- while (it->isReady()) { // Is there a message waiting
+ while (it->isReady(clockEdge())) { // Is there a message waiting
scheduleEvent(Cycles(1));
return;
}
// temporary vectors to store the routing results
vector<LinkID> output_links;
vector<NetDest> output_link_destinations;
+ Tick current_time = m_switch->clockEdge();
- while (buffer->isReady()) {
+ while (buffer->isReady(current_time)) {
DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
// Peek at message
for (int out = 0; out < m_out.size(); out++) {
int out_queue_length = 0;
for (int v = 0; v < m_virtual_networks; v++) {
- out_queue_length += m_out[out][v]->getSize();
+ out_queue_length += m_out[out][v]->getSize(current_time);
}
int value =
(out_queue_length << 8) |
for (int i = 0; i < output_links.size(); i++) {
int outgoing = output_links[i];
- if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
+ if (!m_out[outgoing][vnet]->areNSlotsAvailable(1, current_time))
enough = false;
DPRINTF(RubyNetwork, "Checking if node is blocked ..."
}
// Dequeue msg
- buffer->dequeue();
+ buffer->dequeue(current_time);
m_pending_message_count[vnet]--;
// Enqueue it - for all outgoing queues
"inport[%d][%d] to outport [%d][%d].\n",
incoming, vnet, outgoing, vnet);
- m_out[outgoing][vnet]->enqueue(msg_ptr);
+ m_out[outgoing][vnet]->enqueue(msg_ptr, current_time,
+ m_switch->cyclesToTicks(Cycles(1)));
}
}
}
endpoint_bandwidth = Param.Int(1000, "bandwidth adjustment factor");
adaptive_routing = Param.Bool(False, "enable adaptive routing");
int_link_buffers = VectorParam.MessageBuffer("Buffers for int_links")
- # int_links do not recycle buffers, so this parameter is not used.
- # TODO: Move recycle_latency out of MessageBuffers and into controllers
- recycle_latency = Param.Cycles(0, "")
def setup_buffers(self):
# Note that all SimpleNetwork MessageBuffers are currently ordered
virt_nets = Param.Int(Parent.number_of_virtual_networks,
"number of virtual networks")
port_buffers = VectorParam.MessageBuffer("Port buffers")
- # Ports do not recycle buffers, so this parameter is not used.
- # TODO: Move recycle_latency out of MessageBuffers and into controllers
- recycle_latency = Param.Cycles(0, "")
Switch::addInPort(const vector<MessageBuffer*>& in)
{
m_perfect_switch->addInPort(in);
-
- for (auto& it : in) {
- if (it != nullptr) {
- it->setReceiver(this);
- }
- }
}
void
vector<MessageBuffer*> intermediateBuffers;
for (int i = 0; i < out.size(); ++i) {
- if (out[i] != nullptr) {
- out[i]->setSender(this);
- }
-
assert(m_num_connected_buffers < m_port_buffers.size());
MessageBuffer* buffer_ptr = m_port_buffers[m_num_connected_buffers];
m_num_connected_buffers++;
intermediateBuffers.push_back(buffer_ptr);
-
- buffer_ptr->setSender(this);
- buffer_ptr->setReceiver(this);
}
// Hook the queues to the PerfectSwitch
if (out == nullptr || in == nullptr) {
return;
}
- assert(m_units_remaining[vnet] >= 0);
- while (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
- out->areNSlotsAvailable(1)) {
+ assert(m_units_remaining[vnet] >= 0);
+ Tick current_time = m_switch->clockEdge();
+ while (bw_remaining > 0 && (in->isReady(current_time) ||
+ m_units_remaining[vnet] > 0) &&
+ out->areNSlotsAvailable(1, current_time)) {
// See if we are done transferring the previous message on
// this virtual network
- if (m_units_remaining[vnet] == 0 && in->isReady()) {
+ if (m_units_remaining[vnet] == 0 && in->isReady(current_time)) {
// Find the size of the message we are moving
MsgPtr msg_ptr = in->peekMsgPtr();
Message *net_msg_ptr = msg_ptr.get();
m_ruby_system->curCycle());
// Move the message
- in->dequeue();
- out->enqueue(msg_ptr, m_link_latency);
+ in->dequeue(current_time);
+ out->enqueue(msg_ptr, current_time,
+ m_switch->cyclesToTicks(m_link_latency));
// Count the message
m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
bw_remaining = max(0, -diff);
}
- if (bw_remaining > 0 && (in->isReady() || m_units_remaining[vnet] > 0) &&
- !out->areNSlotsAvailable(1)) {
+ if (bw_remaining > 0 && (in->isReady(current_time) ||
+ m_units_remaining[vnet] > 0) &&
+ !out->areNSlotsAvailable(1, current_time)) {
DPRINTF(RubyNetwork, "vnet: %d", vnet);
// schedule me to wakeup again because I'm waiting for my
m_delayVCHistogram.push_back(new Stats::Histogram());
m_delayVCHistogram[i]->init(10);
}
- if (getMemoryQueue()) {
- getMemoryQueue()->setSender(this);
- }
}
void
in_port_rank >= 0;
in_port_rank--) {
if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
- (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
+ (*(m_waiting_buffers[addr]))[in_port_rank]->
+ reanalyzeMessages(addr, clockEdge());
}
}
delete m_waiting_buffers[addr];
in_port_rank >= 0;
in_port_rank--) {
if ((*(m_waiting_buffers[addr]))[in_port_rank] != NULL) {
- (*(m_waiting_buffers[addr]))[in_port_rank]->reanalyzeMessages(addr);
+ (*(m_waiting_buffers[addr]))[in_port_rank]->
+ reanalyzeMessages(addr, clockEdge());
}
}
delete m_waiting_buffers[addr];
//
if (*vec_iter != NULL &&
(wokeUpMsgBufs.count(*vec_iter) == 0)) {
- (*vec_iter)->reanalyzeAllMessages();
+ (*vec_iter)->reanalyzeAllMessages(clockEdge());
wokeUpMsgBufs.insert(*vec_iter);
}
}
panic("Incorrect packet type received from memory controller!");
}
- getMemoryQueue()->enqueue(msg);
+ getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
delete pkt;
}
void allocate(Addr address);
void deallocate(Addr address);
bool
- areNSlotsAvailable(int n) const
+ areNSlotsAvailable(int n, Tick current_time) const
{
return (m_number_of_TBEs - m_map.size()) >= n;
}
- ENTRY* lookup(Addr address);
+ ENTRY *lookup(Addr address);
// Print cache contents
void print(std::ostream& out) const;
: m_next_time(0)
{
m_consumer_ptr = NULL;
- m_clockobj_ptr = NULL;
-
m_next_valid = false;
m_next_address = 0;
}
bool
-TimerTable::isReady() const
+TimerTable::isReady(Tick curTime) const
{
if (m_map.empty())
return false;
updateNext();
}
assert(m_next_valid);
- return (m_clockobj_ptr->curCycle() >= m_next_time);
+ return (curTime >= m_next_time);
}
Addr
-TimerTable::readyAddress() const
+TimerTable::nextAddress() const
{
- assert(isReady());
-
if (!m_next_valid) {
updateNext();
}
}
void
-TimerTable::set(Addr address, Cycles relative_latency)
+TimerTable::set(Addr address, Tick ready_time)
{
assert(address == makeLineAddress(address));
- assert(relative_latency > 0);
assert(!m_map.count(address));
- Cycles ready_time = m_clockobj_ptr->curCycle() + relative_latency;
m_map[address] = ready_time;
assert(m_consumer_ptr != NULL);
- m_consumer_ptr->
- scheduleEventAbsolute(m_clockobj_ptr->clockPeriod() * ready_time);
+ m_consumer_ptr->scheduleEventAbsolute(ready_time);
m_next_valid = false;
// Don't always recalculate the next ready address
m_consumer_ptr = consumer_ptr;
}
- void setClockObj(ClockedObject* obj)
- {
- assert(m_clockobj_ptr == NULL);
- m_clockobj_ptr = obj;
- }
-
void
setDescription(const std::string& name)
{
m_name = name;
}
- bool isReady() const;
- Addr readyAddress() const;
+ bool isReady(Tick curTime) const;
+ Addr nextAddress() const;
bool isSet(Addr address) const { return !!m_map.count(address); }
- void set(Addr address, Cycles relative_latency);
- void set(Addr address, uint64_t relative_latency)
- { set(address, Cycles(relative_latency)); }
-
+ void set(Addr address, Tick ready_time);
void unset(Addr address);
void print(std::ostream& out) const;
// use a std::map for the address map as this container is sorted
// and ensures a well-defined iteration order
- typedef std::map<Addr, Cycles> AddressMap;
+ typedef std::map<Addr, Tick> AddressMap;
AddressMap m_map;
mutable bool m_next_valid;
- mutable Cycles m_next_time; // Only valid if m_next_valid is true
+ mutable Tick m_next_time; // Only valid if m_next_valid is true
mutable Addr m_next_address; // Only valid if m_next_valid is true
- //! Object used for querying time.
- ClockedObject* m_clockobj_ptr;
//! Consumer to signal a wakeup()
Consumer* m_consumer_ptr;
MemObject::init();
assert(m_controller != NULL);
m_mandatory_q_ptr = m_controller->getMandatoryQueue();
- m_mandatory_q_ptr->setSender(this);
m_is_busy = false;
m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
}
assert(m_mandatory_q_ptr != NULL);
- m_mandatory_q_ptr->enqueue(msg);
+ m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
active_request.bytes_issued += msg->getLen();
return RequestStatus_Issued;
}
assert(m_mandatory_q_ptr != NULL);
- m_mandatory_q_ptr->enqueue(msg);
+ m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
active_request.bytes_issued += msg->getLen();
DPRINTF(RubyDma,
"DMA request bytes issued %d, bytes completed %d, total len %d\n",
{
assert(m_controller != NULL);
m_mandatory_q_ptr = m_controller->getMandatoryQueue();
- m_mandatory_q_ptr->setSender(this);
}
BaseMasterPort &
assert(latency > 0);
assert(m_mandatory_q_ptr != NULL);
- m_mandatory_q_ptr->enqueue(msg, latency);
+ m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(latency));
}
template <class KEY, class VALUE>
if self.latexpr != None:
ret_type, rcode = self.latexpr.inline(True)
code("(${{self.queue_name.var.code}}).enqueue(" \
- "out_msg, Cycles($rcode));")
+ "out_msg, clockEdge(), cyclesToTicks(Cycles($rcode)));")
else:
- code("(${{self.queue_name.var.code}}).enqueue(out_msg);")
+ code("(${{self.queue_name.var.code}}).enqueue(out_msg, "\
+ "clockEdge(), cyclesToTicks(Cycles(1)));")
# End scope
self.symtab.popFrame()
c_code = "m_machineID"
elif self.ident == "clusterID":
c_code = "m_clusterID"
+ elif self.ident == "recycle_latency":
+ c_code = "m_recycle_latency"
else:
c_code = "(*m_%s_ptr)" % (self.ident)
if (m_is_blocking &&
(m_block_map.count(in_msg_ptr->m_$address_field) == 1) &&
(m_block_map[in_msg_ptr->m_$address_field] != &$qcode)) {
- $qcode.delayHead();
+ $qcode.delayHead(clockEdge(), cyclesToTicks(Cycles(1)));
continue;
}
''')
address_code = self.address.var.code
code('''
stallBuffer(&($in_port_code), $address_code);
- $in_port_code.stallMessage($address_code);
+ $in_port_code.stallMessage($address_code, clockEdge());
''')
m_net_ptr->set${network}NetQueue(m_version + base, $vid->getOrdered(), $vnet,
"$vnet_type", $vid);
''')
- # Set the end
- if network == "To":
- code('$vid->setSender(this);')
- else:
- code('$vid->setReceiver(this);')
-
# Set Priority
if "rank" in var:
code('$vid->setPriority(${{var["rank"]}})')
- else:
- if var.type_ast.type.c_ident == "MessageBuffer":
- code('$vid->setReceiver(this);')
- if var.ident.find("triggerQueue") >= 0:
- code('$vid->setSender(this);')
- elif var.ident.find("optionalQueue") >= 0:
- code('$vid->setSender(this);')
-
code.dedent()
code('''
}
comment = "Type %s default" % vtype.ident
code('*$vid = ${{vtype["default"]}}; // $comment')
- if vtype.c_ident == "TimerTable":
- code('$vid->setClockObj(this);')
-
# Set the prefetchers
code()
for prefetcher in self.prefetchers:
res = trans.resources
for key,val in res.iteritems():
val = '''
-if (!%s.areNSlotsAvailable(%s))
+if (!%s.areNSlotsAvailable(%s, clockEdge()))
return TransitionResult_ResourceStall;
''' % (key.code, val)
case_sorter.append(val)