ruby: move stall and wakeup functions to AbstractController
[gem5.git] / src / mem / ruby / buffers / MessageBuffer.cc
1 /*
2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <cassert>
30
31 #include "base/cprintf.hh"
32 #include "base/misc.hh"
33 #include "base/stl_helpers.hh"
34 #include "debug/RubyQueue.hh"
35 #include "mem/ruby/buffers/MessageBuffer.hh"
36 #include "mem/ruby/system/System.hh"
37
38 using namespace std;
39 using m5::stl_helpers::operator<<;
40
41 MessageBuffer::MessageBuffer(const string &name)
42 : m_time_last_time_size_checked(0), m_time_last_time_enqueue(0),
43 m_time_last_time_pop(0), m_last_arrival_time(0)
44 {
45 m_msg_counter = 0;
46 m_consumer_ptr = NULL;
47 m_sender_ptr = NULL;
48 m_receiver_ptr = NULL;
49
50 m_ordering_set = false;
51 m_strict_fifo = true;
52 m_size = 0;
53 m_max_size = -1;
54 m_randomization = true;
55 m_size_last_time_size_checked = 0;
56 m_size_at_cycle_start = 0;
57 m_msgs_this_cycle = 0;
58 m_not_avail_count = 0;
59 m_priority_rank = 0;
60 m_name = name;
61
62 m_stall_msg_map.clear();
63 m_input_link_id = 0;
64 m_vnet_id = 0;
65 }
66
67 int
68 MessageBuffer::getSize()
69 {
70 if (m_time_last_time_size_checked == m_receiver_ptr->curCycle()) {
71 return m_size_last_time_size_checked;
72 } else {
73 m_time_last_time_size_checked = m_receiver_ptr->curCycle();
74 m_size_last_time_size_checked = m_size;
75 return m_size;
76 }
77 }
78
79 bool
80 MessageBuffer::areNSlotsAvailable(int n)
81 {
82
83 // fast path when message buffers have infinite size
84 if (m_max_size == -1) {
85 return true;
86 }
87
88 // determine my correct size for the current cycle
89 // pop operations shouldn't effect the network's visible size
90 // until next cycle, but enqueue operations effect the visible
91 // size immediately
92 int current_size = max(m_size_at_cycle_start, m_size);
93 if (m_time_last_time_pop < m_receiver_ptr->curCycle()) {
94 // no pops this cycle - m_size is correct
95 current_size = m_size;
96 } else {
97 if (m_time_last_time_enqueue < m_receiver_ptr->curCycle()) {
98 // no enqueues this cycle - m_size_at_cycle_start is correct
99 current_size = m_size_at_cycle_start;
100 } else {
101 // both pops and enqueues occured this cycle - add new
102 // enqueued msgs to m_size_at_cycle_start
103 current_size = m_size_at_cycle_start+m_msgs_this_cycle;
104 }
105 }
106
107 // now compare the new size with our max size
108 if (current_size + n <= m_max_size) {
109 return true;
110 } else {
111 DPRINTF(RubyQueue, "n: %d, current_size: %d, m_size: %d, "
112 "m_max_size: %d\n",
113 n, current_size, m_size, m_max_size);
114 m_not_avail_count++;
115 return false;
116 }
117 }
118
119 const MsgPtr
120 MessageBuffer::getMsgPtrCopy() const
121 {
122 assert(isReady());
123
124 return m_prio_heap.front().m_msgptr->clone();
125 }
126
127 const Message*
128 MessageBuffer::peekAtHeadOfQueue() const
129 {
130 DPRINTF(RubyQueue, "Peeking at head of queue.\n");
131 assert(isReady());
132
133 const Message* msg_ptr = m_prio_heap.front().m_msgptr.get();
134 assert(msg_ptr);
135
136 DPRINTF(RubyQueue, "Message: %s\n", (*msg_ptr));
137 return msg_ptr;
138 }
139
140 // FIXME - move me somewhere else
141 Cycles
142 random_time()
143 {
144 Cycles time(1);
145 time += Cycles(random() & 0x3); // [0...3]
146 if ((random() & 0x7) == 0) { // 1 in 8 chance
147 time += Cycles(100 + (random() % 0xf)); // 100 + [1...15]
148 }
149 return time;
150 }
151
152 void
153 MessageBuffer::enqueue(MsgPtr message, Cycles delay)
154 {
155 m_msg_counter++;
156 m_size++;
157
158 // record current time incase we have a pop that also adjusts my size
159 if (m_time_last_time_enqueue < m_receiver_ptr->curCycle()) {
160 m_msgs_this_cycle = 0; // first msg this cycle
161 m_time_last_time_enqueue = m_receiver_ptr->curCycle();
162 }
163 m_msgs_this_cycle++;
164
165 if (!m_ordering_set) {
166 panic("Ordering property of %s has not been set", m_name);
167 }
168
169 // Calculate the arrival time of the message, that is, the first
170 // cycle the message can be dequeued.
171 assert(delay > 0);
172 Cycles delta = m_receiver_ptr->ticksToCycles(delay *
173 m_sender_ptr->clockPeriod());
174
175 Cycles current_time(m_receiver_ptr->curCycle());
176 Cycles arrival_time(0);
177
178 if (!RubySystem::getRandomization() || (m_randomization == false)) {
179 // No randomization
180 arrival_time = current_time + delta;
181 } else {
182 // Randomization - ignore delta
183 if (m_strict_fifo) {
184 if (m_last_arrival_time < current_time) {
185 m_last_arrival_time = current_time;
186 }
187 arrival_time = m_last_arrival_time + random_time();
188 } else {
189 arrival_time = current_time + random_time();
190 }
191 }
192
193 // Check the arrival time
194 assert(arrival_time > current_time);
195 if (m_strict_fifo) {
196 if (arrival_time < m_last_arrival_time) {
197 panic("FIFO ordering violated: %s name: %s current time: %d "
198 "delta: %d arrival_time: %d last arrival_time: %d\n",
199 *this, m_name, current_time * m_receiver_ptr->clockPeriod(),
200 delta * m_receiver_ptr->clockPeriod(),
201 arrival_time * m_receiver_ptr->clockPeriod(),
202 m_last_arrival_time * m_receiver_ptr->clockPeriod());
203 }
204 }
205
206 // If running a cache trace, don't worry about the last arrival checks
207 if (!g_system_ptr->m_warmup_enabled) {
208 m_last_arrival_time = arrival_time;
209 }
210
211 // compute the delay cycles and set enqueue time
212 Message* msg_ptr = message.get();
213 assert(msg_ptr != NULL);
214
215 assert(m_receiver_ptr->clockEdge() >= msg_ptr->getLastEnqueueTime() &&
216 "ensure we aren't dequeued early");
217
218 msg_ptr->setDelayedTicks(m_receiver_ptr->clockEdge() -
219 msg_ptr->getLastEnqueueTime() +
220 msg_ptr->getDelayedTicks());
221 msg_ptr->setLastEnqueueTime(arrival_time * m_receiver_ptr->clockPeriod());
222
223 // Insert the message into the priority heap
224 MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
225 m_prio_heap.push_back(thisNode);
226 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
227 greater<MessageBufferNode>());
228
229 DPRINTF(RubyQueue, "Enqueue arrival_time: %lld, Message: %s\n",
230 arrival_time * m_receiver_ptr->clockPeriod(), *(message.get()));
231
232 // Schedule the wakeup
233 if (m_consumer_ptr != NULL) {
234 m_consumer_ptr->scheduleEventAbsolute(arrival_time);
235 m_consumer_ptr->storeEventInfo(m_vnet_id);
236 } else {
237 panic("No consumer: %s name: %s\n", *this, m_name);
238 }
239 }
240
241 Cycles
242 MessageBuffer::dequeue_getDelayCycles(MsgPtr& message)
243 {
244 dequeue(message);
245 return setAndReturnDelayCycles(message);
246 }
247
248 void
249 MessageBuffer::dequeue(MsgPtr& message)
250 {
251 DPRINTF(RubyQueue, "Dequeueing\n");
252 message = m_prio_heap.front().m_msgptr;
253
254 pop();
255 DPRINTF(RubyQueue, "Enqueue message is %s\n", (*(message.get())));
256 }
257
258 Cycles
259 MessageBuffer::dequeue_getDelayCycles()
260 {
261 // get MsgPtr of the message about to be dequeued
262 MsgPtr message = m_prio_heap.front().m_msgptr;
263
264 // get the delay cycles
265 Cycles delayCycles = setAndReturnDelayCycles(message);
266 dequeue();
267
268 return delayCycles;
269 }
270
271 void
272 MessageBuffer::pop()
273 {
274 DPRINTF(RubyQueue, "Popping\n");
275 assert(isReady());
276 pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
277 greater<MessageBufferNode>());
278 m_prio_heap.pop_back();
279
280 // record previous size and time so the current buffer size isn't
281 // adjusted until next cycle
282 if (m_time_last_time_pop < m_receiver_ptr->curCycle()) {
283 m_size_at_cycle_start = m_size;
284 m_time_last_time_pop = m_receiver_ptr->curCycle();
285 }
286 m_size--;
287 }
288
289 void
290 MessageBuffer::clear()
291 {
292 m_prio_heap.clear();
293
294 m_msg_counter = 0;
295 m_size = 0;
296 m_time_last_time_enqueue = Cycles(0);
297 m_time_last_time_pop = Cycles(0);
298 m_size_at_cycle_start = 0;
299 m_msgs_this_cycle = 0;
300 }
301
302 void
303 MessageBuffer::recycle()
304 {
305 DPRINTF(RubyQueue, "Recycling.\n");
306 assert(isReady());
307 MessageBufferNode node = m_prio_heap.front();
308 pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
309 greater<MessageBufferNode>());
310
311 node.m_time = m_receiver_ptr->curCycle() + m_recycle_latency;
312 m_prio_heap.back() = node;
313 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
314 greater<MessageBufferNode>());
315 m_consumer_ptr->scheduleEventAbsolute(m_receiver_ptr->curCycle() +
316 m_recycle_latency);
317 }
318
319 void
320 MessageBuffer::reanalyzeMessages(const Address& addr)
321 {
322 DPRINTF(RubyQueue, "ReanalyzeMessages\n");
323 assert(m_stall_msg_map.count(addr) > 0);
324 Cycles nextCycle = m_receiver_ptr->curCycle() + Cycles(1);
325
326 //
327 // Put all stalled messages associated with this address back on the
328 // prio heap
329 //
330 while(!m_stall_msg_map[addr].empty()) {
331 m_msg_counter++;
332 MessageBufferNode msgNode(nextCycle, m_msg_counter,
333 m_stall_msg_map[addr].front());
334
335 m_prio_heap.push_back(msgNode);
336 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
337 greater<MessageBufferNode>());
338
339 m_consumer_ptr->scheduleEventAbsolute(msgNode.m_time);
340 m_stall_msg_map[addr].pop_front();
341 }
342 m_stall_msg_map.erase(addr);
343 }
344
345 void
346 MessageBuffer::reanalyzeAllMessages()
347 {
348 DPRINTF(RubyQueue, "ReanalyzeAllMessages %s\n");
349 Cycles nextCycle = m_receiver_ptr->curCycle() + Cycles(1);
350
351 //
352 // Put all stalled messages associated with this address back on the
353 // prio heap
354 //
355 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
356 map_iter != m_stall_msg_map.end();
357 ++map_iter) {
358
359 while(!(map_iter->second).empty()) {
360 m_msg_counter++;
361 MessageBufferNode msgNode(nextCycle, m_msg_counter,
362 (map_iter->second).front());
363
364 m_prio_heap.push_back(msgNode);
365 push_heap(m_prio_heap.begin(), m_prio_heap.end(),
366 greater<MessageBufferNode>());
367
368 m_consumer_ptr->scheduleEventAbsolute(msgNode.m_time);
369 (map_iter->second).pop_front();
370 }
371 }
372 m_stall_msg_map.clear();
373 }
374
375 void
376 MessageBuffer::stallMessage(const Address& addr)
377 {
378 DPRINTF(RubyQueue, "Stalling due to %s\n", addr);
379 assert(isReady());
380 assert(addr.getOffset() == 0);
381 MsgPtr message = m_prio_heap.front().m_msgptr;
382
383 pop();
384
385 //
386 // Note: no event is scheduled to analyze the map at a later time.
387 // Instead the controller is responsible to call reanalyzeMessages when
388 // these addresses change state.
389 //
390 (m_stall_msg_map[addr]).push_back(message);
391 }
392
393 Cycles
394 MessageBuffer::setAndReturnDelayCycles(MsgPtr msg_ptr)
395 {
396 // get the delay cycles of the message at the top of the queue
397
398 // this function should only be called on dequeue
399 // ensure the msg hasn't been enqueued
400 assert(msg_ptr->getLastEnqueueTime() <= m_receiver_ptr->clockEdge());
401
402 msg_ptr->setDelayedTicks(m_receiver_ptr->clockEdge() -
403 msg_ptr->getLastEnqueueTime() +
404 msg_ptr->getDelayedTicks());
405
406 return m_receiver_ptr->ticksToCycles(msg_ptr->getDelayedTicks());
407 }
408
409 void
410 MessageBuffer::print(ostream& out) const
411 {
412 ccprintf(out, "[MessageBuffer: ");
413 if (m_consumer_ptr != NULL) {
414 ccprintf(out, " consumer-yes ");
415 }
416
417 vector<MessageBufferNode> copy(m_prio_heap);
418 sort_heap(copy.begin(), copy.end(), greater<MessageBufferNode>());
419 ccprintf(out, "%s] %s", copy, m_name);
420 }
421
422 void
423 MessageBuffer::printStats(ostream& out)
424 {
425 out << "MessageBuffer: " << m_name << " stats - msgs:" << m_msg_counter
426 << " full:" << m_not_avail_count << endl;
427 }
428
429 bool
430 MessageBuffer::isReady() const
431 {
432 return ((m_prio_heap.size() > 0) &&
433 (m_prio_heap.front().m_time <= m_receiver_ptr->curCycle()));
434 }
435
436 bool
437 MessageBuffer::functionalRead(Packet *pkt)
438 {
439 // Check the priority heap and read any messages that may
440 // correspond to the address in the packet.
441 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
442 Message *msg = m_prio_heap[i].m_msgptr.get();
443 if (msg->functionalRead(pkt)) return true;
444 }
445
446 // Read the messages in the stall queue that correspond
447 // to the address in the packet.
448 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
449 map_iter != m_stall_msg_map.end();
450 ++map_iter) {
451
452 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
453 it != (map_iter->second).end(); ++it) {
454
455 Message *msg = (*it).get();
456 if (msg->functionalRead(pkt)) return true;
457 }
458 }
459 return false;
460 }
461
462 uint32_t
463 MessageBuffer::functionalWrite(Packet *pkt)
464 {
465 uint32_t num_functional_writes = 0;
466
467 // Check the priority heap and write any messages that may
468 // correspond to the address in the packet.
469 for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
470 Message *msg = m_prio_heap[i].m_msgptr.get();
471 if (msg->functionalWrite(pkt)) {
472 num_functional_writes++;
473 }
474 }
475
476 // Check the stall queue and write any messages that may
477 // correspond to the address in the packet.
478 for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
479 map_iter != m_stall_msg_map.end();
480 ++map_iter) {
481
482 for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
483 it != (map_iter->second).end(); ++it) {
484
485 Message *msg = (*it).get();
486 if (msg->functionalWrite(pkt)) {
487 num_functional_writes++;
488 }
489 }
490 }
491
492 return num_functional_writes;
493 }