merge
[gem5.git] / src / mem / ruby / system / Sequencer.cc
1
2 /*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include "mem/ruby/common/Global.hh"
31 #include "mem/ruby/system/Sequencer.hh"
32 #include "mem/ruby/system/System.hh"
33 #include "mem/protocol/Protocol.hh"
34 #include "mem/ruby/profiler/Profiler.hh"
35 #include "mem/ruby/system/CacheMemory.hh"
36 #include "mem/protocol/CacheMsg.hh"
37 #include "mem/ruby/recorder/Tracer.hh"
38 #include "mem/ruby/common/SubBlock.hh"
39 #include "mem/protocol/Protocol.hh"
40 #include "mem/gems_common/Map.hh"
41 #include "mem/ruby/buffers/MessageBuffer.hh"
42 #include "mem/ruby/slicc_interface/AbstractController.hh"
43
44 //Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
45
46 #define LLSC_FAIL -2
47
48 Sequencer::Sequencer(const string & name)
49 :RubyPort(name)
50 {
51 }
52
53 void Sequencer::init(const vector<string> & argv)
54 {
55 m_deadlock_check_scheduled = false;
56 m_outstanding_count = 0;
57
58 m_max_outstanding_requests = 0;
59 m_deadlock_threshold = 0;
60 m_version = -1;
61 m_instCache_ptr = NULL;
62 m_dataCache_ptr = NULL;
63 m_controller = NULL;
64 for (size_t i=0; i<argv.size(); i+=2) {
65 if ( argv[i] == "controller") {
66 m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
67 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
68 } else if ( argv[i] == "icache")
69 m_instCache_ptr = RubySystem::getCache(argv[i+1]);
70 else if ( argv[i] == "dcache")
71 m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
72 else if ( argv[i] == "version")
73 m_version = atoi(argv[i+1].c_str());
74 else if ( argv[i] == "max_outstanding_requests")
75 m_max_outstanding_requests = atoi(argv[i+1].c_str());
76 else if ( argv[i] == "deadlock_threshold")
77 m_deadlock_threshold = atoi(argv[i+1].c_str());
78 else {
79 cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
80 assert(false);
81 }
82 }
83 assert(m_max_outstanding_requests > 0);
84 assert(m_deadlock_threshold > 0);
85 assert(m_version > -1);
86 assert(m_instCache_ptr != NULL);
87 assert(m_dataCache_ptr != NULL);
88 assert(m_controller != NULL);
89 }
90
91 Sequencer::~Sequencer() {
92
93 }
94
95 void Sequencer::wakeup() {
96 // Check for deadlock of any of the requests
97 Time current_time = g_eventQueue_ptr->getTime();
98
99 // Check across all outstanding requests
100 int total_outstanding = 0;
101
102 Vector<Address> keys = m_readRequestTable.keys();
103 for (int i=0; i<keys.size(); i++) {
104 SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
105 if (current_time - request->issue_time >= m_deadlock_threshold) {
106 WARN_MSG("Possible Deadlock detected");
107 WARN_EXPR(request);
108 WARN_EXPR(m_version);
109 WARN_EXPR(keys.size());
110 WARN_EXPR(current_time);
111 WARN_EXPR(request->issue_time);
112 WARN_EXPR(current_time - request->issue_time);
113 ERROR_MSG("Aborting");
114 }
115 }
116
117 keys = m_writeRequestTable.keys();
118 for (int i=0; i<keys.size(); i++) {
119 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
120 if (current_time - request->issue_time >= m_deadlock_threshold) {
121 WARN_MSG("Possible Deadlock detected");
122 WARN_EXPR(request);
123 WARN_EXPR(m_version);
124 WARN_EXPR(current_time);
125 WARN_EXPR(request->issue_time);
126 WARN_EXPR(current_time - request->issue_time);
127 WARN_EXPR(keys.size());
128 ERROR_MSG("Aborting");
129 }
130 }
131 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
132
133 assert(m_outstanding_count == total_outstanding);
134
135 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
136 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
137 } else {
138 m_deadlock_check_scheduled = false;
139 }
140 }
141
142 void Sequencer::printProgress(ostream& out) const{
143 /*
144 int total_demand = 0;
145 out << "Sequencer Stats Version " << m_version << endl;
146 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
147 out << "---------------" << endl;
148 out << "outstanding requests" << endl;
149
150 Vector<Address> rkeys = m_readRequestTable.keys();
151 int read_size = rkeys.size();
152 out << "proc " << m_version << " Read Requests = " << read_size << endl;
153 // print the request table
154 for(int i=0; i < read_size; ++i){
155 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
156 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
157 total_demand++;
158 }
159
160 Vector<Address> wkeys = m_writeRequestTable.keys();
161 int write_size = wkeys.size();
162 out << "proc " << m_version << " Write Requests = " << write_size << endl;
163 // print the request table
164 for(int i=0; i < write_size; ++i){
165 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
166 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
167 if( request.getPrefetch() == PrefetchBit_No ){
168 total_demand++;
169 }
170 }
171
172 out << endl;
173
174 out << "Total Number Outstanding: " << m_outstanding_count << endl;
175 out << "Total Number Demand : " << total_demand << endl;
176 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
177 out << endl;
178 out << endl;
179 */
180 }
181
182 void Sequencer::printConfig(ostream& out) const {
183 out << "Seqeuncer config: " << m_name << endl;
184 out << " controller: " << m_controller->getName() << endl;
185 out << " version: " << m_version << endl;
186 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
187 out << " deadlock_threshold: " << m_deadlock_threshold << endl;
188 }
189
190 // Insert the request on the correct request table. Return true if
191 // the entry was already present.
192 bool Sequencer::insertRequest(SequencerRequest* request) {
193 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
194
195 assert(m_outstanding_count == total_outstanding);
196
197 // See if we should schedule a deadlock check
198 if (m_deadlock_check_scheduled == false) {
199 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
200 m_deadlock_check_scheduled = true;
201 }
202
203 Address line_addr(request->ruby_request.paddr);
204 line_addr.makeLineAddress();
205 if ((request->ruby_request.type == RubyRequestType_ST) ||
206 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
207 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
208 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
209 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
210 if (m_writeRequestTable.exist(line_addr)) {
211 m_writeRequestTable.lookup(line_addr) = request;
212 // return true;
213 assert(0); // drh5: isn't this an error? do you lose the initial request?
214 }
215 m_writeRequestTable.allocate(line_addr);
216 m_writeRequestTable.lookup(line_addr) = request;
217 m_outstanding_count++;
218 } else {
219 if (m_readRequestTable.exist(line_addr)) {
220 m_readRequestTable.lookup(line_addr) = request;
221 // return true;
222 assert(0); // drh5: isn't this an error? do you lose the initial request?
223 }
224 m_readRequestTable.allocate(line_addr);
225 m_readRequestTable.lookup(line_addr) = request;
226 m_outstanding_count++;
227 }
228
229 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
230
231 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
232 assert(m_outstanding_count == total_outstanding);
233
234 return false;
235 }
236
237 void Sequencer::removeRequest(SequencerRequest* srequest) {
238
239 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
240
241 const RubyRequest & ruby_request = srequest->ruby_request;
242 Address line_addr(ruby_request.paddr);
243 line_addr.makeLineAddress();
244 if ((ruby_request.type == RubyRequestType_ST) ||
245 (ruby_request.type == RubyRequestType_RMW_Read) ||
246 (ruby_request.type == RubyRequestType_RMW_Write) ||
247 (ruby_request.type == RubyRequestType_Locked_Read) ||
248 (ruby_request.type == RubyRequestType_Locked_Write)) {
249 m_writeRequestTable.deallocate(line_addr);
250 } else {
251 m_readRequestTable.deallocate(line_addr);
252 }
253 m_outstanding_count--;
254
255 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
256 }
257
258 void Sequencer::writeCallback(const Address& address, DataBlock& data) {
259
260 assert(address == line_address(address));
261 assert(m_writeRequestTable.exist(line_address(address)));
262
263 SequencerRequest* request = m_writeRequestTable.lookup(address);
264 removeRequest(request);
265
266 assert((request->ruby_request.type == RubyRequestType_ST) ||
267 (request->ruby_request.type == RubyRequestType_RMW_Read) ||
268 (request->ruby_request.type == RubyRequestType_RMW_Write) ||
269 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
270 (request->ruby_request.type == RubyRequestType_Locked_Write));
271 // POLINA: the assumption is that atomics are only on data cache and not instruction cache
272 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
273 m_dataCache_ptr->setLocked(address, m_version);
274 }
275
276 hitCallback(request, data);
277 }
278
279 void Sequencer::readCallback(const Address& address, DataBlock& data) {
280
281 assert(address == line_address(address));
282 assert(m_readRequestTable.exist(line_address(address)));
283
284 SequencerRequest* request = m_readRequestTable.lookup(address);
285 removeRequest(request);
286
287 assert((request->ruby_request.type == RubyRequestType_LD) ||
288 (request->ruby_request.type == RubyRequestType_IFETCH));
289
290 hitCallback(request, data);
291 }
292
293 void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
294 const RubyRequest & ruby_request = srequest->ruby_request;
295 Address request_address(ruby_request.paddr);
296 Address request_line_address(ruby_request.paddr);
297 request_line_address.makeLineAddress();
298 RubyRequestType type = ruby_request.type;
299 Time issued_time = srequest->issue_time;
300
301 // Set this cache entry to the most recently used
302 if (type == RubyRequestType_IFETCH) {
303 if (m_instCache_ptr->isTagPresent(request_line_address) )
304 m_instCache_ptr->setMRU(request_line_address);
305 } else {
306 if (m_dataCache_ptr->isTagPresent(request_line_address) )
307 m_dataCache_ptr->setMRU(request_line_address);
308 }
309
310 assert(g_eventQueue_ptr->getTime() >= issued_time);
311 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
312
313 // Profile the miss latency for all non-zero demand misses
314 if (miss_latency != 0) {
315 g_system_ptr->getProfiler()->missLatency(miss_latency, type);
316
317 if (Debug::getProtocolTrace()) {
318 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
319 "", "Done", "", int_to_string(miss_latency)+" cycles");
320 }
321 }
322 /*
323 if (request.getPrefetch() == PrefetchBit_Yes) {
324 return; // Ignore the prefetch
325 }
326 */
327
328 // update the data
329 if (ruby_request.data != NULL) {
330 if ((type == RubyRequestType_LD) ||
331 (type == RubyRequestType_IFETCH)) {
332 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
333 } else {
334 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
335 }
336 }
337
338 m_hit_callback(srequest->id);
339 delete srequest;
340 }
341
342 // Returns true if the sequencer already has a load or store outstanding
343 bool Sequencer::isReady(const RubyRequest& request) const {
344 // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
345 // to simulate stalling of the front-end
346 // Do we stall all the sequencers? If it is atomic instruction - yes!
347 if (m_outstanding_count >= m_max_outstanding_requests) {
348 return false;
349 }
350
351 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
352 m_readRequestTable.exist(line_address(Address(request.paddr))) ){
353 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
354 //printProgress(cout);
355 return false;
356 }
357
358 return true;
359 }
360
361 bool Sequencer::empty() const {
362 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
363 }
364
365
366 int64_t Sequencer::makeRequest(const RubyRequest & request)
367 {
368 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
369 if (isReady(request)) {
370 int64_t id = makeUniqueRequestID();
371 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
372 bool found = insertRequest(srequest);
373 if (!found)
374 if (request.type == RubyRequestType_Locked_Write) {
375 // NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
376 // ensuring that nothing comes between checking the flag and servicing the store
377 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
378 return LLSC_FAIL;
379 }
380 else {
381 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
382 }
383 }
384 issueRequest(request);
385
386 // TODO: issue hardware prefetches here
387 return id;
388 }
389 else {
390 return -1;
391 }
392 }
393
394 void Sequencer::issueRequest(const RubyRequest& request) {
395
396 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
397 CacheRequestType ctype;
398 switch(request.type) {
399 case RubyRequestType_IFETCH:
400 ctype = CacheRequestType_IFETCH;
401 break;
402 case RubyRequestType_LD:
403 ctype = CacheRequestType_LD;
404 break;
405 case RubyRequestType_ST:
406 ctype = CacheRequestType_ST;
407 break;
408 case RubyRequestType_Locked_Read:
409 ctype = CacheRequestType_ST;
410 break;
411 case RubyRequestType_Locked_Write:
412 ctype = CacheRequestType_ST;
413 break;
414 case RubyRequestType_RMW_Read:
415 ctype = CacheRequestType_ATOMIC;
416 break;
417 case RubyRequestType_RMW_Write:
418 ctype = CacheRequestType_ATOMIC;
419 break;
420 default:
421 assert(0);
422 }
423 AccessModeType amtype;
424 switch(request.access_mode){
425 case RubyAccessMode_User:
426 amtype = AccessModeType_UserMode;
427 break;
428 case RubyAccessMode_Supervisor:
429 amtype = AccessModeType_SupervisorMode;
430 break;
431 case RubyAccessMode_Device:
432 amtype = AccessModeType_UserMode;
433 break;
434 default:
435 assert(0);
436 }
437 Address line_addr(request.paddr);
438 line_addr.makeLineAddress();
439 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No);
440
441 if (Debug::getProtocolTrace()) {
442 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
443 "", "Begin", "", RubyRequestType_to_string(request.type));
444 }
445
446 if (g_system_ptr->getTracer()->traceEnabled()) {
447 g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
448 request.type, g_eventQueue_ptr->getTime());
449 }
450
451 Time latency = 0; // initialzed to an null value
452
453 if (request.type == RubyRequestType_IFETCH)
454 latency = m_instCache_ptr->getLatency();
455 else
456 latency = m_dataCache_ptr->getLatency();
457
458 // Send the message to the cache controller
459 assert(latency > 0);
460
461
462 m_mandatory_q_ptr->enqueue(msg, latency);
463 }
464 /*
465 bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
466 AccessModeType access_mode,
467 int size, DataBlock*& data_ptr) {
468 if (type == CacheRequestType_IFETCH) {
469 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
470 } else {
471 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
472 }
473 }
474 */
475
476 void Sequencer::print(ostream& out) const {
477 out << "[Sequencer: " << m_version
478 << ", outstanding requests: " << m_outstanding_count;
479
480 out << ", read request table: " << m_readRequestTable
481 << ", write request table: " << m_writeRequestTable;
482 out << "]";
483 }
484
485 // this can be called from setState whenever coherence permissions are upgraded
486 // when invoked, coherence violations will be checked for the given block
487 void Sequencer::checkCoherence(const Address& addr) {
488 #ifdef CHECK_COHERENCE
489 g_system_ptr->checkGlobalCoherenceInvariant(addr);
490 #endif
491 }
492