merge
[gem5.git] / src / mem / ruby / system / Sequencer.cc
1
2 /*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include "mem/ruby/common/Global.hh"
31 #include "mem/ruby/system/Sequencer.hh"
32 #include "mem/ruby/system/System.hh"
33 #include "mem/protocol/Protocol.hh"
34 #include "mem/ruby/profiler/Profiler.hh"
35 #include "mem/ruby/system/CacheMemory.hh"
36 #include "mem/protocol/CacheMsg.hh"
37 #include "mem/ruby/recorder/Tracer.hh"
38 #include "mem/ruby/common/SubBlock.hh"
39 #include "mem/protocol/Protocol.hh"
40 #include "mem/gems_common/Map.hh"
41 #include "mem/ruby/buffers/MessageBuffer.hh"
42 #include "mem/ruby/slicc_interface/AbstractController.hh"
43
44 //Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
45
46 #define LLSC_FAIL -2
47
48 Sequencer::Sequencer(const string & name)
49 :RubyPort(name)
50 {
51 }
52
53 void Sequencer::init(const vector<string> & argv)
54 {
55 m_deadlock_check_scheduled = false;
56 m_outstanding_count = 0;
57
58 m_max_outstanding_requests = 0;
59 m_deadlock_threshold = 0;
60 m_version = -1;
61 m_instCache_ptr = NULL;
62 m_dataCache_ptr = NULL;
63 m_controller = NULL;
64 m_servicing_atomic = 200;
65 m_atomics_counter = 0;
66 for (size_t i=0; i<argv.size(); i+=2) {
67 if ( argv[i] == "controller") {
68 m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
69 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
70 } else if ( argv[i] == "icache")
71 m_instCache_ptr = RubySystem::getCache(argv[i+1]);
72 else if ( argv[i] == "dcache")
73 m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
74 else if ( argv[i] == "version")
75 m_version = atoi(argv[i+1].c_str());
76 else if ( argv[i] == "max_outstanding_requests")
77 m_max_outstanding_requests = atoi(argv[i+1].c_str());
78 else if ( argv[i] == "deadlock_threshold")
79 m_deadlock_threshold = atoi(argv[i+1].c_str());
80 else {
81 cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
82 assert(false);
83 }
84 }
85 assert(m_max_outstanding_requests > 0);
86 assert(m_deadlock_threshold > 0);
87 assert(m_version > -1);
88 assert(m_instCache_ptr != NULL);
89 assert(m_dataCache_ptr != NULL);
90 assert(m_controller != NULL);
91 }
92
93 Sequencer::~Sequencer() {
94
95 }
96
97 void Sequencer::wakeup() {
98 // Check for deadlock of any of the requests
99 Time current_time = g_eventQueue_ptr->getTime();
100
101 // Check across all outstanding requests
102 int total_outstanding = 0;
103
104 Vector<Address> keys = m_readRequestTable.keys();
105 for (int i=0; i<keys.size(); i++) {
106 SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
107 if (current_time - request->issue_time >= m_deadlock_threshold) {
108 WARN_MSG("Possible Deadlock detected");
109 WARN_EXPR(request);
110 WARN_EXPR(m_version);
111 WARN_EXPR(request->ruby_request.paddr);
112 WARN_EXPR(keys.size());
113 WARN_EXPR(current_time);
114 WARN_EXPR(request->issue_time);
115 WARN_EXPR(current_time - request->issue_time);
116 ERROR_MSG("Aborting");
117 }
118 }
119
120 keys = m_writeRequestTable.keys();
121 for (int i=0; i<keys.size(); i++) {
122 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
123 if (current_time - request->issue_time >= m_deadlock_threshold) {
124 WARN_MSG("Possible Deadlock detected");
125 WARN_EXPR(request);
126 WARN_EXPR(m_version);
127 WARN_EXPR(current_time);
128 WARN_EXPR(request->issue_time);
129 WARN_EXPR(current_time - request->issue_time);
130 WARN_EXPR(keys.size());
131 ERROR_MSG("Aborting");
132 }
133 }
134 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
135
136 assert(m_outstanding_count == total_outstanding);
137
138 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
139 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
140 } else {
141 m_deadlock_check_scheduled = false;
142 }
143 }
144
145 void Sequencer::printProgress(ostream& out) const{
146 /*
147 int total_demand = 0;
148 out << "Sequencer Stats Version " << m_version << endl;
149 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
150 out << "---------------" << endl;
151 out << "outstanding requests" << endl;
152
153 Vector<Address> rkeys = m_readRequestTable.keys();
154 int read_size = rkeys.size();
155 out << "proc " << m_version << " Read Requests = " << read_size << endl;
156 // print the request table
157 for(int i=0; i < read_size; ++i){
158 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
159 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
160 total_demand++;
161 }
162
163 Vector<Address> wkeys = m_writeRequestTable.keys();
164 int write_size = wkeys.size();
165 out << "proc " << m_version << " Write Requests = " << write_size << endl;
166 // print the request table
167 for(int i=0; i < write_size; ++i){
168 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
169 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
170 if( request.getPrefetch() == PrefetchBit_No ){
171 total_demand++;
172 }
173 }
174
175 out << endl;
176
177 out << "Total Number Outstanding: " << m_outstanding_count << endl;
178 out << "Total Number Demand : " << total_demand << endl;
179 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
180 out << endl;
181 out << endl;
182 */
183 }
184
185 void Sequencer::printConfig(ostream& out) const {
186 out << "Seqeuncer config: " << m_name << endl;
187 out << " controller: " << m_controller->getName() << endl;
188 out << " version: " << m_version << endl;
189 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
190 out << " deadlock_threshold: " << m_deadlock_threshold << endl;
191 }
192
193 // Insert the request on the correct request table. Return true if
194 // the entry was already present.
195 bool Sequencer::insertRequest(SequencerRequest* request) {
196 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
197
198 assert(m_outstanding_count == total_outstanding);
199
200 // See if we should schedule a deadlock check
201 if (m_deadlock_check_scheduled == false) {
202 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
203 m_deadlock_check_scheduled = true;
204 }
205
206 Address line_addr(request->ruby_request.paddr);
207 line_addr.makeLineAddress();
208 if ((request->ruby_request.type == RubyRequestType_ST) ||
209 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
210 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
211 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
212 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
213 if (m_writeRequestTable.exist(line_addr)) {
214 m_writeRequestTable.lookup(line_addr) = request;
215 // return true;
216 assert(0); // drh5: isn't this an error? do you lose the initial request?
217 }
218 m_writeRequestTable.allocate(line_addr);
219 m_writeRequestTable.lookup(line_addr) = request;
220 m_outstanding_count++;
221 } else {
222 if (m_readRequestTable.exist(line_addr)) {
223 m_readRequestTable.lookup(line_addr) = request;
224 // return true;
225 assert(0); // drh5: isn't this an error? do you lose the initial request?
226 }
227 m_readRequestTable.allocate(line_addr);
228 m_readRequestTable.lookup(line_addr) = request;
229 m_outstanding_count++;
230 }
231
232 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
233
234 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
235 assert(m_outstanding_count == total_outstanding);
236
237 return false;
238 }
239
240 void Sequencer::removeRequest(SequencerRequest* srequest) {
241
242 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
243
244 const RubyRequest & ruby_request = srequest->ruby_request;
245 Address line_addr(ruby_request.paddr);
246 line_addr.makeLineAddress();
247 if ((ruby_request.type == RubyRequestType_ST) ||
248 (ruby_request.type == RubyRequestType_RMW_Read) ||
249 (ruby_request.type == RubyRequestType_RMW_Write) ||
250 (ruby_request.type == RubyRequestType_Locked_Read) ||
251 (ruby_request.type == RubyRequestType_Locked_Write)) {
252 m_writeRequestTable.deallocate(line_addr);
253 } else {
254 m_readRequestTable.deallocate(line_addr);
255 }
256 m_outstanding_count--;
257
258 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
259 }
260
261 void Sequencer::writeCallback(const Address& address, DataBlock& data) {
262
263 assert(address == line_address(address));
264 assert(m_writeRequestTable.exist(line_address(address)));
265
266 SequencerRequest* request = m_writeRequestTable.lookup(address);
267 removeRequest(request);
268
269 assert((request->ruby_request.type == RubyRequestType_ST) ||
270 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
271 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
272 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
273 (request->ruby_request.type == RubyRequestType_Locked_Write));
274 // POLINA: the assumption is that atomics are only on data cache and not instruction cache
275 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
276 m_dataCache_ptr->setLocked(address, m_version);
277 }
278 else if (request->ruby_request.type == RubyRequestType_RMW_Read) {
279 m_controller->set_atomic(address);
280 }
281 else if (request->ruby_request.type == RubyRequestType_RMW_Write) {
282 m_controller->clear_atomic();
283 }
284
285 hitCallback(request, data);
286 }
287
288 void Sequencer::readCallback(const Address& address, DataBlock& data) {
289
290 assert(address == line_address(address));
291 assert(m_readRequestTable.exist(line_address(address)));
292
293 SequencerRequest* request = m_readRequestTable.lookup(address);
294 removeRequest(request);
295
296 assert((request->ruby_request.type == RubyRequestType_LD) ||
297 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
298 (request->ruby_request.type == RubyRequestType_IFETCH));
299
300 hitCallback(request, data);
301 }
302
303 void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
304 const RubyRequest & ruby_request = srequest->ruby_request;
305 Address request_address(ruby_request.paddr);
306 Address request_line_address(ruby_request.paddr);
307 request_line_address.makeLineAddress();
308 RubyRequestType type = ruby_request.type;
309 Time issued_time = srequest->issue_time;
310
311 // Set this cache entry to the most recently used
312 if (type == RubyRequestType_IFETCH) {
313 if (m_instCache_ptr->isTagPresent(request_line_address) )
314 m_instCache_ptr->setMRU(request_line_address);
315 } else {
316 if (m_dataCache_ptr->isTagPresent(request_line_address) )
317 m_dataCache_ptr->setMRU(request_line_address);
318 }
319
320 assert(g_eventQueue_ptr->getTime() >= issued_time);
321 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
322
323 // Profile the miss latency for all non-zero demand misses
324 if (miss_latency != 0) {
325 g_system_ptr->getProfiler()->missLatency(miss_latency, type);
326
327 if (Debug::getProtocolTrace()) {
328 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
329 "", "Done", "", int_to_string(miss_latency)+" cycles");
330 }
331 }
332 /*
333 if (request.getPrefetch() == PrefetchBit_Yes) {
334 return; // Ignore the prefetch
335 }
336 */
337
338 // update the data
339 if (ruby_request.data != NULL) {
340 if ((type == RubyRequestType_LD) ||
341 (type == RubyRequestType_IFETCH) ||
342 (type == RubyRequestType_RMW_Read)) {
343 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
344 } else {
345 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
346 }
347 }
348 if (type == RubyRequestType_RMW_Write) {
349 if (m_servicing_atomic != ruby_request.proc_id) {
350 assert(0);
351 }
352 assert(m_atomics_counter > 0);
353 m_atomics_counter--;
354 if (m_atomics_counter == 0) {
355 m_servicing_atomic = 200;
356 }
357 }
358 m_hit_callback(srequest->id);
359 delete srequest;
360 }
361
362 // Returns true if the sequencer already has a load or store outstanding
363 bool Sequencer::isReady(const RubyRequest& request, bool dont_set) {
364 // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
365 // to simulate stalling of the front-end
366 // Do we stall all the sequencers? If it is atomic instruction - yes!
367 if (m_outstanding_count >= m_max_outstanding_requests) {
368 return false;
369 }
370
371 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
372 m_readRequestTable.exist(line_address(Address(request.paddr))) ){
373 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
374 //printProgress(cout);
375 return false;
376 }
377
378 assert(request.proc_id != 100);
379 if (m_servicing_atomic != 200 && m_servicing_atomic != request.proc_id) {
380 assert(m_atomics_counter > 0);
381 return false;
382 }
383 else {
384 if (!dont_set) {
385 if (request.type == RubyRequestType_RMW_Read) {
386 if (m_servicing_atomic == 200) {
387 assert(m_atomics_counter == 0);
388 m_servicing_atomic = request.proc_id;
389 }
390 else {
391 assert(m_servicing_atomic == request.proc_id);
392 }
393 m_atomics_counter++;
394 }
395 else {
396 if (m_servicing_atomic == request.proc_id) {
397 if (request.type != RubyRequestType_RMW_Write) {
398 m_servicing_atomic = 200;
399 m_atomics_counter = 0;
400 }
401 }
402 }
403 }
404 }
405
406 return true;
407 }
408
409 bool Sequencer::empty() const {
410 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
411 }
412
413
414 int64_t Sequencer::makeRequest(const RubyRequest & request)
415 {
416 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
417 if (isReady(request)) {
418 int64_t id = makeUniqueRequestID();
419 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
420 bool found = insertRequest(srequest);
421 if (!found) {
422 if (request.type == RubyRequestType_Locked_Write) {
423 // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
424 // ensuring that nothing comes between checking the flag and servicing the store
425 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
426 return LLSC_FAIL;
427 }
428 else {
429 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
430 }
431 }
432 if (request.type == RubyRequestType_RMW_Write) {
433 m_controller->started_writes();
434 }
435 issueRequest(request);
436
437 // TODO: issue hardware prefetches here
438 return id;
439 }
440 else {
441 assert(0);
442 }
443 }
444 else {
445 return -1;
446 }
447 }
448
449 void Sequencer::issueRequest(const RubyRequest& request) {
450
451 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
452 CacheRequestType ctype;
453 switch(request.type) {
454 case RubyRequestType_IFETCH:
455 ctype = CacheRequestType_IFETCH;
456 break;
457 case RubyRequestType_LD:
458 ctype = CacheRequestType_LD;
459 break;
460 case RubyRequestType_ST:
461 ctype = CacheRequestType_ST;
462 break;
463 case RubyRequestType_Locked_Read:
464 ctype = CacheRequestType_ST;
465 break;
466 case RubyRequestType_Locked_Write:
467 ctype = CacheRequestType_ST;
468 break;
469 case RubyRequestType_RMW_Read:
470 ctype = CacheRequestType_ATOMIC;
471 break;
472 case RubyRequestType_RMW_Write:
473 ctype = CacheRequestType_ATOMIC;
474 break;
475 default:
476 assert(0);
477 }
478 AccessModeType amtype;
479 switch(request.access_mode){
480 case RubyAccessMode_User:
481 amtype = AccessModeType_UserMode;
482 break;
483 case RubyAccessMode_Supervisor:
484 amtype = AccessModeType_SupervisorMode;
485 break;
486 case RubyAccessMode_Device:
487 amtype = AccessModeType_UserMode;
488 break;
489 default:
490 assert(0);
491 }
492 Address line_addr(request.paddr);
493 line_addr.makeLineAddress();
494 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No, request.proc_id);
495
496 if (Debug::getProtocolTrace()) {
497 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
498 "", "Begin", "", RubyRequestType_to_string(request.type));
499 }
500
501 if (g_system_ptr->getTracer()->traceEnabled()) {
502 g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
503 request.type, g_eventQueue_ptr->getTime());
504 }
505
506 Time latency = 0; // initialzed to an null value
507
508 if (request.type == RubyRequestType_IFETCH)
509 latency = m_instCache_ptr->getLatency();
510 else
511 latency = m_dataCache_ptr->getLatency();
512
513 // Send the message to the cache controller
514 assert(latency > 0);
515
516
517 m_mandatory_q_ptr->enqueue(msg, latency);
518 }
519 /*
520 bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
521 AccessModeType access_mode,
522 int size, DataBlock*& data_ptr) {
523 if (type == CacheRequestType_IFETCH) {
524 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
525 } else {
526 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
527 }
528 }
529 */
530
531 void Sequencer::print(ostream& out) const {
532 out << "[Sequencer: " << m_version
533 << ", outstanding requests: " << m_outstanding_count;
534
535 out << ", read request table: " << m_readRequestTable
536 << ", write request table: " << m_writeRequestTable;
537 out << "]";
538 }
539
540 // this can be called from setState whenever coherence permissions are upgraded
541 // when invoked, coherence violations will be checked for the given block
542 void Sequencer::checkCoherence(const Address& addr) {
543 #ifdef CHECK_COHERENCE
544 g_system_ptr->checkGlobalCoherenceInvariant(addr);
545 #endif
546 }
547