action(l_popRequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, messgeBuffer_in.dequeue_getDelayCycles());
+ profileMsgDelay(2, messgeBuffer_in.dequeue());
}
action(o_popIncomingResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, messgeBuffer_in.dequeue_getDelayCycles());
+ profileMsgDelay(1, messgeBuffer_in.dequeue());
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
action(l_popL2RequestQueue, "l",
desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, requestNetwork_in.dequeue_getDelayCycles());
+ profileMsgDelay(2, requestNetwork_in.dequeue());
}
action(o_popL2ResponseQueue, "o",
desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
+ profileMsgDelay(1, responseNetwork_in.dequeue());
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
mandatoryQueue_in.dequeue();
}
- action(l_popRequestQueue, "l", desc="Pop incoming request queue and profile the delay within this virtual network") {
- profileMsgDelay(2, requestL1Network_in.dequeue_getDelayCycles());
+ action(l_popRequestQueue, "l",
+ desc="Pop incoming request queue and profile the delay within this virtual network") {
+ profileMsgDelay(2, requestL1Network_in.dequeue());
}
- action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue and profile the delay within this virtual network") {
- profileMsgDelay(1, responseL1Network_in.dequeue_getDelayCycles());
+ action(o_popIncomingResponseQueue, "o",
+ desc="Pop Incoming Response queue and profile the delay within this virtual network") {
+ profileMsgDelay(1, responseL1Network_in.dequeue());
}
action(s_deallocateTBE, "s", desc="Deallocate TBE") {
}
action(jj_popL1RequestQueue, "\j", desc="Pop incoming L1 request queue") {
- profileMsgDelay(0, L1RequestL2Network_in.dequeue_getDelayCycles());
+ profileMsgDelay(0, L1RequestL2Network_in.dequeue());
}
action(k_popUnblockQueue, "k", desc="Pop incoming unblock queue") {
- profileMsgDelay(0, L1unblockNetwork_in.dequeue_getDelayCycles());
+ profileMsgDelay(0, L1unblockNetwork_in.dequeue());
}
action(o_popIncomingResponseQueue, "o", desc="Pop Incoming Response queue") {
- profileMsgDelay(1, responseL2Network_in.dequeue_getDelayCycles());
+ profileMsgDelay(1, responseL2Network_in.dequeue());
}
action(m_writeDataToCache, "m", desc="Write data from response queue to cache") {
}
action(n_popResponseQueue, "n", desc="Pop the response queue") {
- profileMsgDelay(1, responseNetwork_in.dequeue_getDelayCycles());
+ profileMsgDelay(1, responseNetwork_in.dequeue());
}
action(o_popForwardedRequestQueue, "o", desc="Pop the forwarded request queue") {
- profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
+ profileMsgDelay(2, forwardRequestNetwork_in.dequeue());
}
action(p_profileMiss, "pi", desc="Profile cache miss") {
structure(InPort, external = "yes", primitive="yes") {
bool isReady();
- void dequeue();
- Cycles dequeue_getDelayCycles();
+ Cycles dequeue();
void recycle();
bool isEmpty();
}
}
Cycles
-MessageBuffer::dequeue_getDelayCycles()
+MessageBuffer::dequeue()
{
+ DPRINTF(RubyQueue, "Popping\n");
+ assert(isReady());
+
// get MsgPtr of the message about to be dequeued
MsgPtr message = m_prio_heap.front().m_msgptr;
message->updateDelayedTicks(m_receiver->clockEdge());
Cycles delayCycles =
m_receiver->ticksToCycles(message->getDelayedTicks());
- dequeue();
-
- return delayCycles;
-}
-
-void
-MessageBuffer::dequeue()
-{
- DPRINTF(RubyQueue, "Popping\n");
- assert(isReady());
// record previous size and time so the current buffer size isn't
// adjusted until next cycle
pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
m_prio_heap.pop_back();
+
+ return delayCycles;
}
void
void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); }
void enqueue(MsgPtr message, Cycles delta);
- //! Updates the delay cycles of the message at the of the queue,
+ //! Updates the delay cycles of the message at the head of the queue,
//! removes it from the queue and returns its total delay.
- Cycles dequeue_getDelayCycles();
-
- void dequeue();
+ Cycles dequeue();
void recycle();
bool isEmpty() const { return m_prio_heap.size() == 0; }
unmodified_msg_ptr = msg_ptr->clone();
}
+ // Dequeue msg
+ m_in[incoming][vnet]->dequeue();
+ m_pending_message_count[vnet]--;
+
// Enqueue it - for all outgoing queues
for (int i=0; i<output_links.size(); i++) {
int outgoing = output_links[i];
m_out[outgoing][vnet]->enqueue(msg_ptr);
}
-
- // Dequeue msg
- m_in[incoming][vnet]->dequeue();
- m_pending_message_count[vnet]--;
}
}
}
g_system_ptr->curCycle());
// Move the message
- m_out[vnet]->enqueue(m_in[vnet]->peekMsgPtr(), m_link_latency);
m_in[vnet]->dequeue();
+ m_out[vnet]->enqueue(msg_ptr, m_link_latency);
// Count the message
m_msg_counts[net_msg_ptr->getMessageSize()][vnet]++;
# The return type must be void
if actual_type != self.symtab.find("void", Type):
- self.expr.error("Non-void return must not be ignored, " + \
- "return type is '%s'", actual_type.ident)
+ self.expr.warning("Non-void return ignored, " + \
+ "return type is '%s'", actual_type.ident)
def findResources(self, resources):
self.expr.findResources(resources)