ruby: deal with printf warnings and convert some to cprintf
[gem5.git] / src / mem / ruby / system / MemoryControl.cc
1
2 /*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * MemoryControl.C
32 *
33 * Description: This module simulates a basic DDR-style memory controller
34 * (and can easily be extended to do FB-DIMM as well).
35 *
36 * This module models a single channel, connected to any number of
37 * DIMMs with any number of ranks of DRAMs each. If you want multiple
38 * address/data channels, you need to instantiate multiple copies of
39 * this module.
40 *
41 * Each memory request is placed in a queue associated with a specific
42 * memory bank. This queue is of finite size; if the queue is full
43 * the request will back up in an (infinite) common queue and will
44 * effectively throttle the whole system. This sort of behavior is
45 * intended to be closer to real system behavior than if we had an
46 * infinite queue on each bank. If you want the latter, just make
47 * the bank queues unreasonably large.
48 *
49 * The head item on a bank queue is issued when all of the
50 * following are true:
51 * the bank is available
52 * the address path to the DIMM is available
53 * the data path to or from the DIMM is available
54 *
55 * Note that we are not concerned about fixed offsets in time. The bank
56 * will not be used at the same moment as the address path, but since
57 * there is no queue in the DIMM or the DRAM it will be used at a constant
58 * number of cycles later, so it is treated as if it is used at the same
59 * time.
60 *
61 * We are assuming closed bank policy; that is, we automatically close
62 * each bank after a single read or write. Adding an option for open
63 * bank policy is for future work.
64 *
65 * We are assuming "posted CAS"; that is, we send the READ or WRITE
66 * immediately after the ACTIVATE. This makes scheduling the address
67 * bus trivial; we always schedule a fixed set of cycles. For DDR-400,
68 * this is a set of two cycles; for some configurations such as
69 * DDR-800 the parameter tRRD forces this to be set to three cycles.
70 *
71 * We assume a four-bit-time transfer on the data wires. This is
72 * the minimum burst length for DDR-2. This would correspond
73 * to (for example) a memory where each DIMM is 72 bits wide
74 * and DIMMs are ganged in pairs to deliver 64 bytes at a shot.
75 * This gives us the same occupancy on the data wires as on the
76 * address wires (for the two-address-cycle case).
77 *
78 * The only non-trivial scheduling problem is the data wires.
79 * A write will use the wires earlier in the operation than a read
80 * will; typically one cycle earlier as seen at the DRAM, but earlier
81 * by a worst-case round-trip wire delay when seen at the memory controller.
82 * So, while reads from one rank can be scheduled back-to-back
83 * every two cycles, and writes (to any rank) scheduled every two cycles,
84 * when a read is followed by a write we need to insert a bubble.
85 * Furthermore, consecutive reads from two different ranks may need
86 * to insert a bubble due to skew between when one DRAM stops driving the
87 * wires and when the other one starts. (These bubbles are parameters.)
88 *
89 * This means that when some number of reads and writes are at the
90 * heads of their queues, reads could starve writes, and/or reads
91 * to the same rank could starve out other requests, since the others
92 * would never see the data bus ready.
93 * For this reason, we have implemented an anti-starvation feature.
94 * A group of requests is marked "old", and a counter is incremented
95 * each cycle as long as any request from that batch has not issued.
96 * if the counter reaches twice the bank busy time, we hold off any
97 * newer requests until all of the "old" requests have issued.
98 *
99 * We also model tFAW. This is an obscure DRAM parameter that says
100 * that no more than four activate requests can happen within a window
101 * of a certain size. For most configurations this does not come into play,
102 * or has very little effect, but it could be used to throttle the power
103 * consumption of the DRAM. In this implementation (unlike in a DRAM
104 * data sheet) TFAW is measured in memory bus cycles; i.e. if TFAW = 16
105 * then no more than four activates may happen within any 16 cycle window.
106 * Refreshes are included in the activates.
107 *
108 *
109 * $Id: $
110 *
111 */
112
113 #include <list>
114
115 #include "base/cprintf.hh"
116 #include "mem/ruby/common/Global.hh"
117 #include "mem/gems_common/Map.hh"
118 #include "mem/ruby/common/Address.hh"
119 #include "mem/ruby/profiler/Profiler.hh"
120 #include "mem/ruby/slicc_interface/AbstractChip.hh"
121 #include "mem/ruby/system/System.hh"
122 #include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
123 #include "mem/ruby/slicc_interface/NetworkMessage.hh"
124 #include "mem/ruby/network/Network.hh"
125 #include "mem/ruby/common/Consumer.hh"
126 #include "mem/ruby/system/MemoryControl.hh"
127
128 class Consumer;
129
130 // Value to reset watchdog timer to.
131 // If we're idle for this many memory control cycles,
132 // shut down our clock (our rescheduling of ourselves).
133 // Refresh shuts down as well.
134 // When we restart, we'll be in a different phase
135 // with respect to ruby cycles, so this introduces
136 // a slight inaccuracy. But it is necessary or the
137 // ruby tester never terminates because the event
138 // queue is never empty.
139 #define IDLECOUNT_MAX_VALUE 1000
140
141 // Output operator definition
142
143 ostream& operator<<(ostream& out, const MemoryControl& obj)
144 {
145 obj.print(out);
146 out << flush;
147 return out;
148 }
149
150
151 // ****************************************************************
152
153 // CONSTRUCTOR
154
155 MemoryControl::MemoryControl (AbstractChip* chip_ptr, int version) {
156 m_chip_ptr = chip_ptr;
157 m_version = version;
158 m_msg_counter = 0;
159
160 m_debug = 0;
161 //if (m_version == 0) m_debug = 1;
162
163 m_mem_bus_cycle_multiplier = RubyConfig::memBusCycleMultiplier();
164 m_banks_per_rank = RubyConfig::banksPerRank();
165 m_ranks_per_dimm = RubyConfig::ranksPerDimm();
166 m_dimms_per_channel = RubyConfig::dimmsPerChannel();
167 m_bank_bit_0 = RubyConfig::bankBit0();
168 m_rank_bit_0 = RubyConfig::rankBit0();
169 m_dimm_bit_0 = RubyConfig::dimmBit0();
170 m_bank_queue_size = RubyConfig::bankQueueSize();
171 m_bank_busy_time = RubyConfig::bankBusyTime();
172 m_rank_rank_delay = RubyConfig::rankRankDelay();
173 m_read_write_delay = RubyConfig::readWriteDelay();
174 m_basic_bus_busy_time = RubyConfig::basicBusBusyTime();
175 m_mem_ctl_latency = RubyConfig::memCtlLatency();
176 m_refresh_period = RubyConfig::refreshPeriod();
177 m_memRandomArbitrate = RubyConfig::memRandomArbitrate();
178 m_tFaw = RubyConfig::tFaw();
179 m_memFixedDelay = RubyConfig::memFixedDelay();
180
181 assert(m_tFaw <= 62); // must fit in a uint64 shift register
182
183 m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
184 m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
185 m_refresh_period_system = m_refresh_period / m_total_banks;
186
187 m_bankQueues = new list<MemoryNode> [m_total_banks];
188 assert(m_bankQueues);
189
190 m_bankBusyCounter = new int [m_total_banks];
191 assert(m_bankBusyCounter);
192
193 m_oldRequest = new int [m_total_banks];
194 assert(m_oldRequest);
195
196 for (int i=0; i<m_total_banks; i++) {
197 m_bankBusyCounter[i] = 0;
198 m_oldRequest[i] = 0;
199 }
200
201 m_busBusyCounter_Basic = 0;
202 m_busBusyCounter_Write = 0;
203 m_busBusyCounter_ReadNewRank = 0;
204 m_busBusy_WhichRank = 0;
205
206 m_roundRobin = 0;
207 m_refresh_count = 1;
208 m_need_refresh = 0;
209 m_refresh_bank = 0;
210 m_awakened = 0;
211 m_idleCount = 0;
212 m_ageCounter = 0;
213
214 // Each tfaw shift register keeps a moving bit pattern
215 // which shows when recent activates have occurred.
216 // m_tfaw_count keeps track of how many 1 bits are set
217 // in each shift register. When m_tfaw_count is >= 4,
218 // new activates are not allowed.
219 m_tfaw_shift = new uint64 [m_total_ranks];
220 m_tfaw_count = new int [m_total_ranks];
221 for (int i=0; i<m_total_ranks; i++) {
222 m_tfaw_shift[i] = 0;
223 m_tfaw_count[i] = 0;
224 }
225 }
226
227
228 // DESTRUCTOR
229
230 MemoryControl::~MemoryControl () {
231 delete [] m_bankQueues;
232 delete [] m_bankBusyCounter;
233 delete [] m_oldRequest;
234 }
235
236
237 // PUBLIC METHODS
238
239 // enqueue new request from directory
240
241 void MemoryControl::enqueue (const MsgPtr& message, int latency) {
242 Time current_time = g_eventQueue_ptr->getTime();
243 Time arrival_time = current_time + latency;
244 const MemoryMsg* memMess = dynamic_cast<const MemoryMsg*>(message.ref());
245 physical_address_t addr = memMess->getAddress().getAddress();
246 MemoryRequestType type = memMess->getType();
247 bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
248 MemoryNode thisReq(arrival_time, message, addr, is_mem_read, !is_mem_read);
249 enqueueMemRef(thisReq);
250 }
251
252 // Alternate entry point used when we already have a MemoryNode structure built.
253
254 void MemoryControl::enqueueMemRef (MemoryNode& memRef) {
255 m_msg_counter++;
256 memRef.m_msg_counter = m_msg_counter;
257 Time arrival_time = memRef.m_time;
258 uint64 at = arrival_time;
259 bool is_mem_read = memRef.m_is_mem_read;
260 physical_address_t addr = memRef.m_addr;
261 int bank = getBank(addr);
262 if (m_debug) {
263 cprintf("New memory request%7d: %#08x %c arrived at %10d bank =%3x\n",
264 m_msg_counter, addr, is_mem_read? 'R':'W', at, bank);
265 }
266 g_system_ptr->getProfiler()->profileMemReq(bank);
267 m_input_queue.push_back(memRef);
268 if (!m_awakened) {
269 g_eventQueue_ptr->scheduleEvent(this, 1);
270 m_awakened = 1;
271 }
272 }
273
274
275
276 // dequeue, peek, and isReady are used to transfer completed requests
277 // back to the directory
278
279 void MemoryControl::dequeue () {
280 assert(isReady());
281 m_response_queue.pop_front();
282 }
283
284
285 const Message* MemoryControl::peek () {
286 MemoryNode node = peekNode();
287 Message* msg_ptr = node.m_msgptr.ref();
288 assert(msg_ptr != NULL);
289 return msg_ptr;
290 }
291
292
293 MemoryNode MemoryControl::peekNode () {
294 assert(isReady());
295 MemoryNode req = m_response_queue.front();
296 uint64 returnTime = req.m_time;
297 if (m_debug) {
298 cprintf("Old memory request%7d: %#08x %c peeked at %10d\n",
299 req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', returnTime);
300 }
301 return req;
302 }
303
304
305 bool MemoryControl::isReady () {
306 return ((!m_response_queue.empty()) &&
307 (m_response_queue.front().m_time <= g_eventQueue_ptr->getTime()));
308 }
309
310 void MemoryControl::setConsumer (Consumer* consumer_ptr) {
311 m_consumer_ptr = consumer_ptr;
312 }
313
314 void MemoryControl::print (ostream& out) const {
315 }
316
317
318 void MemoryControl::printConfig (ostream& out) {
319 out << "Memory Control " << m_version << ":" << endl;
320 out << " Ruby cycles per memory cycle: " << m_mem_bus_cycle_multiplier << endl;
321 out << " Basic read latency: " << m_mem_ctl_latency << endl;
322 if (m_memFixedDelay) {
323 out << " Fixed Latency mode: Added cycles = " << m_memFixedDelay << endl;
324 } else {
325 out << " Bank busy time: " << BANK_BUSY_TIME << " memory cycles" << endl;
326 out << " Memory channel busy time: " << m_basic_bus_busy_time << endl;
327 out << " Dead cycles between reads to different ranks: " << m_rank_rank_delay << endl;
328 out << " Dead cycle between a read and a write: " << m_read_write_delay << endl;
329 out << " tFaw (four-activate) window: " << m_tFaw << endl;
330 }
331 out << " Banks per rank: " << m_banks_per_rank << endl;
332 out << " Ranks per DIMM: " << m_ranks_per_dimm << endl;
333 out << " DIMMs per channel: " << m_dimms_per_channel << endl;
334 out << " LSB of bank field in address: " << m_bank_bit_0 << endl;
335 out << " LSB of rank field in address: " << m_rank_bit_0 << endl;
336 out << " LSB of DIMM field in address: " << m_dimm_bit_0 << endl;
337 out << " Max size of each bank queue: " << m_bank_queue_size << endl;
338 out << " Refresh period (within one bank): " << m_refresh_period << endl;
339 out << " Arbitration randomness: " << m_memRandomArbitrate << endl;
340 }
341
342
343 void MemoryControl::setDebug (int debugFlag) {
344 m_debug = debugFlag;
345 }
346
347
348 // ****************************************************************
349
350 // PRIVATE METHODS
351
352 // Queue up a completed request to send back to directory
353
354 void MemoryControl::enqueueToDirectory (MemoryNode req, int latency) {
355 Time arrival_time = g_eventQueue_ptr->getTime()
356 + (latency * m_mem_bus_cycle_multiplier);
357 req.m_time = arrival_time;
358 m_response_queue.push_back(req);
359
360 // schedule the wake up
361 g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
362 }
363
364
365
366 // getBank returns an integer that is unique for each
367 // bank across this memory controller.
368
369 int MemoryControl::getBank (physical_address_t addr) {
370 int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1);
371 int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1);
372 int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1);
373 return (dimm * m_ranks_per_dimm * m_banks_per_rank)
374 + (rank * m_banks_per_rank)
375 + bank;
376 }
377
378 // getRank returns an integer that is unique for each rank
379 // and independent of individual bank.
380
381 int MemoryControl::getRank (int bank) {
382 int rank = (bank / m_banks_per_rank);
383 assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
384 return rank;
385 }
386
387
388 // queueReady determines if the head item in a bank queue
389 // can be issued this cycle
390
391 bool MemoryControl::queueReady (int bank) {
392 if ((m_bankBusyCounter[bank] > 0) && !m_memFixedDelay) {
393 g_system_ptr->getProfiler()->profileMemBankBusy();
394 //if (m_debug) cprintf(" bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
395 return false;
396 }
397 if (m_memRandomArbitrate >= 2) {
398 if ((random() % 100) < m_memRandomArbitrate) {
399 g_system_ptr->getProfiler()->profileMemRandBusy();
400 return false;
401 }
402 }
403 if (m_memFixedDelay) return true;
404 if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) {
405 g_system_ptr->getProfiler()->profileMemNotOld();
406 return false;
407 }
408 if (m_busBusyCounter_Basic == m_basic_bus_busy_time) {
409 // Another bank must have issued this same cycle.
410 // For profiling, we count this as an arb wait rather than
411 // a bus wait. This is a little inaccurate since it MIGHT
412 // have also been blocked waiting for a read-write or a
413 // read-read instead, but it's pretty close.
414 g_system_ptr->getProfiler()->profileMemArbWait(1);
415 return false;
416 }
417 if (m_busBusyCounter_Basic > 0) {
418 g_system_ptr->getProfiler()->profileMemBusBusy();
419 return false;
420 }
421 int rank = getRank(bank);
422 if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) {
423 g_system_ptr->getProfiler()->profileMemTfawBusy();
424 return false;
425 }
426 bool write = !m_bankQueues[bank].front().m_is_mem_read;
427 if (write && (m_busBusyCounter_Write > 0)) {
428 g_system_ptr->getProfiler()->profileMemReadWriteBusy();
429 return false;
430 }
431 if (!write && (rank != m_busBusy_WhichRank)
432 && (m_busBusyCounter_ReadNewRank > 0)) {
433 g_system_ptr->getProfiler()->profileMemDataBusBusy();
434 return false;
435 }
436 return true;
437 }
438
439
440 // issueRefresh checks to see if this bank has a refresh scheduled
441 // and, if so, does the refresh and returns true
442
443 bool MemoryControl::issueRefresh (int bank) {
444 if (!m_need_refresh || (m_refresh_bank != bank)) return false;
445 if (m_bankBusyCounter[bank] > 0) return false;
446 // Note that m_busBusyCounter will prevent multiple issues during
447 // the same cycle, as well as on different but close cycles:
448 if (m_busBusyCounter_Basic > 0) return false;
449 int rank = getRank(bank);
450 if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) return false;
451
452 // Issue it:
453
454 //if (m_debug) {
455 //uint64 current_time = g_eventQueue_ptr->getTime();
456 //cprintf(" Refresh bank %3x at %d\n", bank, current_time);
457 //}
458 g_system_ptr->getProfiler()->profileMemRefresh();
459 m_need_refresh--;
460 m_refresh_bank++;
461 if (m_refresh_bank >= m_total_banks) m_refresh_bank = 0;
462 m_bankBusyCounter[bank] = m_bank_busy_time;
463 m_busBusyCounter_Basic = m_basic_bus_busy_time;
464 m_busBusyCounter_Write = m_basic_bus_busy_time;
465 m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
466 markTfaw(rank);
467 return true;
468 }
469
470
471 // Mark the activate in the tFaw shift register
472 void MemoryControl::markTfaw (int rank) {
473 if (m_tFaw) {
474 m_tfaw_shift[rank] |= (1 << (m_tFaw-1));
475 m_tfaw_count[rank]++;
476 }
477 }
478
479
480 // Issue a memory request: Activate the bank,
481 // reserve the address and data buses, and queue
482 // the request for return to the requesting
483 // processor after a fixed latency.
484
485 void MemoryControl::issueRequest (int bank) {
486 int rank = getRank(bank);
487 MemoryNode req = m_bankQueues[bank].front();
488 m_bankQueues[bank].pop_front();
489 if (m_debug) {
490 uint64 current_time = g_eventQueue_ptr->getTime();
491 cprintf(" Mem issue request%7d: %#08x %c at %10d bank =%3x\n",
492 req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', current_time, bank);
493 }
494 if (req.m_msgptr.ref() != NULL) { // don't enqueue L3 writebacks
495 enqueueToDirectory(req, m_mem_ctl_latency + m_memFixedDelay);
496 }
497 m_oldRequest[bank] = 0;
498 markTfaw(rank);
499 m_bankBusyCounter[bank] = m_bank_busy_time;
500 m_busBusy_WhichRank = rank;
501 if (req.m_is_mem_read) {
502 g_system_ptr->getProfiler()->profileMemRead();
503 m_busBusyCounter_Basic = m_basic_bus_busy_time;
504 m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay;
505 m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time + m_rank_rank_delay;
506 } else {
507 g_system_ptr->getProfiler()->profileMemWrite();
508 m_busBusyCounter_Basic = m_basic_bus_busy_time;
509 m_busBusyCounter_Write = m_basic_bus_busy_time;
510 m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
511 }
512 }
513
514
515 // executeCycle: This function is called once per memory clock cycle
516 // to simulate all the periodic hardware.
517
518 void MemoryControl::executeCycle () {
519 // Keep track of time by counting down the busy counters:
520 for (int bank=0; bank < m_total_banks; bank++) {
521 if (m_bankBusyCounter[bank] > 0) m_bankBusyCounter[bank]--;
522 }
523 if (m_busBusyCounter_Write > 0) m_busBusyCounter_Write--;
524 if (m_busBusyCounter_ReadNewRank > 0) m_busBusyCounter_ReadNewRank--;
525 if (m_busBusyCounter_Basic > 0) m_busBusyCounter_Basic--;
526
527 // Count down the tFAW shift registers:
528 for (int rank=0; rank < m_total_ranks; rank++) {
529 if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--;
530 m_tfaw_shift[rank] >>= 1;
531 }
532
533 // After time period expires, latch an indication that we need a refresh.
534 // Disable refresh if in memFixedDelay mode.
535 if (!m_memFixedDelay) m_refresh_count--;
536 if (m_refresh_count == 0) {
537 m_refresh_count = m_refresh_period_system;
538 assert (m_need_refresh < 10); // Are we overrunning our ability to refresh?
539 m_need_refresh++;
540 }
541
542 // If this batch of requests is all done, make a new batch:
543 m_ageCounter++;
544 int anyOld = 0;
545 for (int bank=0; bank < m_total_banks; bank++) {
546 anyOld |= m_oldRequest[bank];
547 }
548 if (!anyOld) {
549 for (int bank=0; bank < m_total_banks; bank++) {
550 if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1;
551 }
552 m_ageCounter = 0;
553 }
554
555 // If randomness desired, re-randomize round-robin position each cycle
556 if (m_memRandomArbitrate) {
557 m_roundRobin = random() % m_total_banks;
558 }
559
560
561 // For each channel, scan round-robin, and pick an old, ready
562 // request and issue it. Treat a refresh request as if it
563 // were at the head of its bank queue. After we issue something,
564 // keep scanning the queues just to gather statistics about
565 // how many are waiting. If in memFixedDelay mode, we can issue
566 // more than one request per cycle.
567
568 int queueHeads = 0;
569 int banksIssued = 0;
570 for (int i = 0; i < m_total_banks; i++) {
571 m_roundRobin++;
572 if (m_roundRobin >= m_total_banks) m_roundRobin = 0;
573 issueRefresh(m_roundRobin);
574 int qs = m_bankQueues[m_roundRobin].size();
575 if (qs > 1) {
576 g_system_ptr->getProfiler()->profileMemBankQ(qs-1);
577 }
578 if (qs > 0) {
579 m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is queued
580 queueHeads++;
581 if (queueReady(m_roundRobin)) {
582 issueRequest(m_roundRobin);
583 banksIssued++;
584 if (m_memFixedDelay) {
585 g_system_ptr->getProfiler()->profileMemWaitCycles(m_memFixedDelay);
586 }
587 }
588 }
589 }
590
591 // memWaitCycles is a redundant catch-all for the specific counters in queueReady
592 g_system_ptr->getProfiler()->profileMemWaitCycles(queueHeads - banksIssued);
593
594 // Check input queue and move anything to bank queues if not full.
595 // Since this is done here at the end of the cycle, there will always
596 // be at least one cycle of latency in the bank queue.
597 // We deliberately move at most one request per cycle (to simulate
598 // typical hardware). Note that if one bank queue fills up, other
599 // requests can get stuck behind it here.
600
601 if (!m_input_queue.empty()) {
602 m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is pending
603 MemoryNode req = m_input_queue.front();
604 int bank = getBank(req.m_addr);
605 if (m_bankQueues[bank].size() < m_bank_queue_size) {
606 m_input_queue.pop_front();
607 m_bankQueues[bank].push_back(req);
608 }
609 g_system_ptr->getProfiler()->profileMemInputQ(m_input_queue.size());
610 }
611 }
612
613
614 // wakeup: This function is called once per memory controller clock cycle.
615
616 void MemoryControl::wakeup () {
617
618 // execute everything
619 executeCycle();
620
621 m_idleCount--;
622 if (m_idleCount <= 0) {
623 m_awakened = 0;
624 } else {
625 // Reschedule ourselves so that we run every memory cycle:
626 g_eventQueue_ptr->scheduleEvent(this, m_mem_bus_cycle_multiplier);
627 }
628 }
629
630