Locked requests should actually be converted to ST rather than ATOMIC, because ATOMIC...
[gem5.git] / src / mem / ruby / system / Sequencer.cc
1
2 /*
3 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are
8 * met: redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer;
10 * redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution;
13 * neither the name of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
20 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
21 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
22 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
23 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
27 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 #include "mem/ruby/common/Global.hh"
31 #include "mem/ruby/system/Sequencer.hh"
32 #include "mem/ruby/system/System.hh"
33 #include "mem/protocol/Protocol.hh"
34 #include "mem/ruby/profiler/Profiler.hh"
35 #include "mem/ruby/system/CacheMemory.hh"
36 #include "mem/protocol/CacheMsg.hh"
37 #include "mem/ruby/recorder/Tracer.hh"
38 #include "mem/ruby/common/SubBlock.hh"
39 #include "mem/protocol/Protocol.hh"
40 #include "mem/gems_common/Map.hh"
41 #include "mem/ruby/buffers/MessageBuffer.hh"
42 #include "mem/ruby/slicc_interface/AbstractController.hh"
43
44 //Sequencer::Sequencer(int core_id, MessageBuffer* mandatory_q)
45
46 Sequencer::Sequencer(const string & name)
47 :RubyPort(name)
48 {
49 }
50
51 void Sequencer::init(const vector<string> & argv)
52 {
53 m_deadlock_check_scheduled = false;
54 m_outstanding_count = 0;
55
56 m_max_outstanding_requests = 0;
57 m_deadlock_threshold = 0;
58 m_version = -1;
59 m_instCache_ptr = NULL;
60 m_dataCache_ptr = NULL;
61 m_controller = NULL;
62 for (size_t i=0; i<argv.size(); i+=2) {
63 if ( argv[i] == "controller") {
64 m_controller = RubySystem::getController(argv[i+1]); // args[i] = "L1Cache"
65 m_mandatory_q_ptr = m_controller->getMandatoryQueue();
66 } else if ( argv[i] == "icache")
67 m_instCache_ptr = RubySystem::getCache(argv[i+1]);
68 else if ( argv[i] == "dcache")
69 m_dataCache_ptr = RubySystem::getCache(argv[i+1]);
70 else if ( argv[i] == "version")
71 m_version = atoi(argv[i+1].c_str());
72 else if ( argv[i] == "max_outstanding_requests")
73 m_max_outstanding_requests = atoi(argv[i+1].c_str());
74 else if ( argv[i] == "deadlock_threshold")
75 m_deadlock_threshold = atoi(argv[i+1].c_str());
76 else {
77 cerr << "WARNING: Sequencer: Unkown configuration parameter: " << argv[i] << endl;
78 assert(false);
79 }
80 }
81 assert(m_max_outstanding_requests > 0);
82 assert(m_deadlock_threshold > 0);
83 assert(m_version > -1);
84 assert(m_instCache_ptr != NULL);
85 assert(m_dataCache_ptr != NULL);
86 assert(m_controller != NULL);
87 }
88
89 Sequencer::~Sequencer() {
90
91 }
92
93 void Sequencer::wakeup() {
94 // Check for deadlock of any of the requests
95 Time current_time = g_eventQueue_ptr->getTime();
96
97 // Check across all outstanding requests
98 int total_outstanding = 0;
99
100 Vector<Address> keys = m_readRequestTable.keys();
101 for (int i=0; i<keys.size(); i++) {
102 SequencerRequest* request = m_readRequestTable.lookup(keys[i]);
103 if (current_time - request->issue_time >= m_deadlock_threshold) {
104 WARN_MSG("Possible Deadlock detected");
105 WARN_EXPR(request);
106 WARN_EXPR(m_version);
107 WARN_EXPR(keys.size());
108 WARN_EXPR(current_time);
109 WARN_EXPR(request->issue_time);
110 WARN_EXPR(current_time - request->issue_time);
111 ERROR_MSG("Aborting");
112 }
113 }
114
115 keys = m_writeRequestTable.keys();
116 for (int i=0; i<keys.size(); i++) {
117 SequencerRequest* request = m_writeRequestTable.lookup(keys[i]);
118 if (current_time - request->issue_time >= m_deadlock_threshold) {
119 WARN_MSG("Possible Deadlock detected");
120 WARN_EXPR(request);
121 WARN_EXPR(m_version);
122 WARN_EXPR(current_time);
123 WARN_EXPR(request->issue_time);
124 WARN_EXPR(current_time - request->issue_time);
125 WARN_EXPR(keys.size());
126 ERROR_MSG("Aborting");
127 }
128 }
129 total_outstanding += m_writeRequestTable.size() + m_readRequestTable.size();
130
131 assert(m_outstanding_count == total_outstanding);
132
133 if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
134 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
135 } else {
136 m_deadlock_check_scheduled = false;
137 }
138 }
139
140 void Sequencer::printProgress(ostream& out) const{
141 /*
142 int total_demand = 0;
143 out << "Sequencer Stats Version " << m_version << endl;
144 out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
145 out << "---------------" << endl;
146 out << "outstanding requests" << endl;
147
148 Vector<Address> rkeys = m_readRequestTable.keys();
149 int read_size = rkeys.size();
150 out << "proc " << m_version << " Read Requests = " << read_size << endl;
151 // print the request table
152 for(int i=0; i < read_size; ++i){
153 SequencerRequest * request = m_readRequestTable.lookup(rkeys[i]);
154 out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time << " PF " << PrefetchBit_No << endl;
155 total_demand++;
156 }
157
158 Vector<Address> wkeys = m_writeRequestTable.keys();
159 int write_size = wkeys.size();
160 out << "proc " << m_version << " Write Requests = " << write_size << endl;
161 // print the request table
162 for(int i=0; i < write_size; ++i){
163 CacheMsg & request = m_writeRequestTable.lookup(wkeys[i]);
164 out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
165 if( request.getPrefetch() == PrefetchBit_No ){
166 total_demand++;
167 }
168 }
169
170 out << endl;
171
172 out << "Total Number Outstanding: " << m_outstanding_count << endl;
173 out << "Total Number Demand : " << total_demand << endl;
174 out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
175 out << endl;
176 out << endl;
177 */
178 }
179
180 void Sequencer::printConfig(ostream& out) const {
181 out << "Seqeuncer config: " << m_name << endl;
182 out << " controller: " << m_controller->getName() << endl;
183 out << " version: " << m_version << endl;
184 out << " max_outstanding_requests: " << m_max_outstanding_requests << endl;
185 out << " deadlock_threshold: " << m_deadlock_threshold << endl;
186 }
187
188 // Insert the request on the correct request table. Return true if
189 // the entry was already present.
190 bool Sequencer::insertRequest(SequencerRequest* request) {
191 int total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
192
193 assert(m_outstanding_count == total_outstanding);
194
195 // See if we should schedule a deadlock check
196 if (m_deadlock_check_scheduled == false) {
197 g_eventQueue_ptr->scheduleEvent(this, m_deadlock_threshold);
198 m_deadlock_check_scheduled = true;
199 }
200
201 Address line_addr(request->ruby_request.paddr);
202 line_addr.makeLineAddress();
203 if ((request->ruby_request.type == RubyRequestType_ST) ||
204 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
205 (request->ruby_request.type == RubyRequestType_Locked_Write)) {
206 if (m_writeRequestTable.exist(line_addr)) {
207 m_writeRequestTable.lookup(line_addr) = request;
208 // return true;
209 assert(0); // drh5: isn't this an error? do you lose the initial request?
210 }
211 m_writeRequestTable.allocate(line_addr);
212 m_writeRequestTable.lookup(line_addr) = request;
213 m_outstanding_count++;
214 } else {
215 if (m_readRequestTable.exist(line_addr)) {
216 m_readRequestTable.lookup(line_addr) = request;
217 // return true;
218 assert(0); // drh5: isn't this an error? do you lose the initial request?
219 }
220 m_readRequestTable.allocate(line_addr);
221 m_readRequestTable.lookup(line_addr) = request;
222 m_outstanding_count++;
223 }
224
225 g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
226
227 total_outstanding = m_writeRequestTable.size() + m_readRequestTable.size();
228 assert(m_outstanding_count == total_outstanding);
229
230 return false;
231 }
232
233 void Sequencer::removeRequest(SequencerRequest* srequest) {
234
235 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
236
237 const RubyRequest & ruby_request = srequest->ruby_request;
238 Address line_addr(ruby_request.paddr);
239 line_addr.makeLineAddress();
240 if ((ruby_request.type == RubyRequestType_ST) ||
241 (ruby_request.type == RubyRequestType_Locked_Read) ||
242 (ruby_request.type == RubyRequestType_Locked_Write)) {
243 m_writeRequestTable.deallocate(line_addr);
244 } else {
245 m_readRequestTable.deallocate(line_addr);
246 }
247 m_outstanding_count--;
248
249 assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size());
250 }
251
252 void Sequencer::writeCallback(const Address& address, DataBlock& data) {
253
254 assert(address == line_address(address));
255 assert(m_writeRequestTable.exist(line_address(address)));
256
257 SequencerRequest* request = m_writeRequestTable.lookup(address);
258 removeRequest(request);
259
260 assert((request->ruby_request.type == RubyRequestType_ST) ||
261 (request->ruby_request.type == RubyRequestType_Locked_Read) ||
262 (request->ruby_request.type == RubyRequestType_Locked_Write));
263 // POLINA: the assumption is that atomics are only on data cache and not instruction cache
264 if (request->ruby_request.type == RubyRequestType_Locked_Read) {
265 m_dataCache_ptr->setLocked(address, m_version);
266 }
267
268 hitCallback(request, data);
269 }
270
271 void Sequencer::readCallback(const Address& address, DataBlock& data) {
272
273 assert(address == line_address(address));
274 assert(m_readRequestTable.exist(line_address(address)));
275
276 SequencerRequest* request = m_readRequestTable.lookup(address);
277 removeRequest(request);
278
279 assert((request->ruby_request.type == RubyRequestType_LD) ||
280 (request->ruby_request.type == RubyRequestType_IFETCH));
281
282 hitCallback(request, data);
283 }
284
285 void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
286 const RubyRequest & ruby_request = srequest->ruby_request;
287 Address request_address(ruby_request.paddr);
288 Address request_line_address(ruby_request.paddr);
289 request_line_address.makeLineAddress();
290 RubyRequestType type = ruby_request.type;
291 Time issued_time = srequest->issue_time;
292
293 // Set this cache entry to the most recently used
294 if (type == RubyRequestType_IFETCH) {
295 if (m_instCache_ptr->isTagPresent(request_line_address) )
296 m_instCache_ptr->setMRU(request_line_address);
297 } else {
298 if (m_dataCache_ptr->isTagPresent(request_line_address) )
299 m_dataCache_ptr->setMRU(request_line_address);
300 }
301
302 assert(g_eventQueue_ptr->getTime() >= issued_time);
303 Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
304
305 // Profile the miss latency for all non-zero demand misses
306 if (miss_latency != 0) {
307 g_system_ptr->getProfiler()->missLatency(miss_latency, type);
308
309 if (Debug::getProtocolTrace()) {
310 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(ruby_request.paddr),
311 "", "Done", "", int_to_string(miss_latency)+" cycles");
312 }
313 }
314 /*
315 if (request.getPrefetch() == PrefetchBit_Yes) {
316 return; // Ignore the prefetch
317 }
318 */
319
320 // update the data
321 if (ruby_request.data != NULL) {
322 if ((type == RubyRequestType_LD) ||
323 (type == RubyRequestType_IFETCH)) {
324 memcpy(ruby_request.data, data.getData(request_address.getOffset(), ruby_request.len), ruby_request.len);
325 } else {
326 data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
327 }
328 }
329
330 m_hit_callback(srequest->id);
331 delete srequest;
332 }
333
334 // Returns true if the sequencer already has a load or store outstanding
335 bool Sequencer::isReady(const RubyRequest& request) const {
336 // POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
337 // to simulate stalling of the front-end
338 // Do we stall all the sequencers? If it is atomic instruction - yes!
339 if (m_outstanding_count >= m_max_outstanding_requests) {
340 return false;
341 }
342
343 if( m_writeRequestTable.exist(line_address(Address(request.paddr))) ||
344 m_readRequestTable.exist(line_address(Address(request.paddr))) ){
345 //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
346 //printProgress(cout);
347 return false;
348 }
349
350 return true;
351 }
352
353 bool Sequencer::empty() const {
354 return (m_writeRequestTable.size() == 0) && (m_readRequestTable.size() == 0);
355 }
356
357
358 // -2 means that the LLSC failed
359 int64_t Sequencer::makeRequest(const RubyRequest & request)
360 {
361 assert(Address(request.paddr).getOffset() + request.len <= RubySystem::getBlockSizeBytes());
362 if (isReady(request)) {
363 int64_t id = makeUniqueRequestID();
364 SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
365 bool found = insertRequest(srequest);
366 if (!found)
367 if (request.type == RubyRequestType_Locked_Write) {
368 if (!m_dataCache_ptr->isLocked(line_address(Address(request.paddr)), m_version)) {
369 return -2;
370 }
371 else {
372 m_dataCache_ptr->clearLocked(line_address(Address(request.paddr)));
373 }
374 }
375 issueRequest(request);
376
377 // TODO: issue hardware prefetches here
378 return id;
379 }
380 else {
381 return -1;
382 }
383 }
384
385 void Sequencer::issueRequest(const RubyRequest& request) {
386
387 // TODO: get rid of CacheMsg, CacheRequestType, and AccessModeTYpe, & have SLICC use RubyRequest and subtypes natively
388 CacheRequestType ctype;
389 switch(request.type) {
390 case RubyRequestType_IFETCH:
391 ctype = CacheRequestType_IFETCH;
392 break;
393 case RubyRequestType_LD:
394 ctype = CacheRequestType_LD;
395 break;
396 case RubyRequestType_ST:
397 ctype = CacheRequestType_ST;
398 break;
399 case RubyRequestType_Locked_Read:
400 ctype = CacheRequestType_ST;
401 break;
402 case RubyRequestType_Locked_Write:
403 ctype = CacheRequestType_ST;
404 break;
405 default:
406 assert(0);
407 }
408 AccessModeType amtype;
409 switch(request.access_mode){
410 case RubyAccessMode_User:
411 amtype = AccessModeType_UserMode;
412 break;
413 case RubyAccessMode_Supervisor:
414 amtype = AccessModeType_SupervisorMode;
415 break;
416 case RubyAccessMode_Device:
417 amtype = AccessModeType_UserMode;
418 break;
419 default:
420 assert(0);
421 }
422 Address line_addr(request.paddr);
423 line_addr.makeLineAddress();
424 CacheMsg msg(line_addr, Address(request.paddr), ctype, Address(request.pc), amtype, request.len, PrefetchBit_No);
425
426 if (Debug::getProtocolTrace()) {
427 g_system_ptr->getProfiler()->profileTransition("Seq", m_version, Address(request.paddr),
428 "", "Begin", "", RubyRequestType_to_string(request.type));
429 }
430
431 if (g_system_ptr->getTracer()->traceEnabled()) {
432 g_system_ptr->getTracer()->traceRequest(m_name, line_addr, Address(request.pc),
433 request.type, g_eventQueue_ptr->getTime());
434 }
435
436 Time latency = 0; // initialzed to an null value
437
438 if (request.type == RubyRequestType_IFETCH)
439 latency = m_instCache_ptr->getLatency();
440 else
441 latency = m_dataCache_ptr->getLatency();
442
443 // Send the message to the cache controller
444 assert(latency > 0);
445
446
447 m_mandatory_q_ptr->enqueue(msg, latency);
448 }
449 /*
450 bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
451 AccessModeType access_mode,
452 int size, DataBlock*& data_ptr) {
453 if (type == CacheRequestType_IFETCH) {
454 return m_instCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
455 } else {
456 return m_dataCache_ptr->tryCacheAccess(line_address(addr), type, data_ptr);
457 }
458 }
459 */
460
461 void Sequencer::print(ostream& out) const {
462 out << "[Sequencer: " << m_version
463 << ", outstanding requests: " << m_outstanding_count;
464
465 out << ", read request table: " << m_readRequestTable
466 << ", write request table: " << m_writeRequestTable;
467 out << "]";
468 }
469
470 // this can be called from setState whenever coherence permissions are upgraded
471 // when invoked, coherence violations will be checked for the given block
472 void Sequencer::checkCoherence(const Address& addr) {
473 #ifdef CHECK_COHERENCE
474 g_system_ptr->checkGlobalCoherenceInvariant(addr);
475 #endif
476 }
477
478 /*
479 bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
480 unsigned int size_in_bytes )
481 {
482 bool found = false;
483 const Address lineAddr = line_address(addr);
484 DataBlock data;
485 PhysAddress paddr(addr);
486 DataBlock* dataPtr = &data;
487
488 MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
489 int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
490
491 if (Protocol::m_TwoLevelCache) {
492 if(Protocol::m_CMP){
493 assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
494 }
495 else{
496 assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
497 }
498 }
499
500 if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
501 n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
502 found = true;
503 } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
504 n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
505 found = true;
506 } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
507 n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
508 found = true;
509 // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
510 // ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
511 // L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
512
513 // int offset = addr.getOffset();
514 // for(int i=0; i<size_in_bytes; ++i){
515 // value[i] = tbeEntry.getDataBlk().getByte(offset + i);
516 // }
517
518 // found = true;
519 } else {
520 // Address not found
521 //cout << " " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
522 n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
523 int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
524 for(unsigned int i=0; i<size_in_bytes; ++i){
525 int offset = addr.getOffset();
526 value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
527 }
528 // Address not found
529 //WARN_MSG("Couldn't find address");
530 //WARN_EXPR(addr);
531 found = false;
532 }
533 return true;
534 }
535
536 bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
537 unsigned int size_in_bytes) {
538 char test_buffer[64];
539
540 // idea here is that coherent cache should find the
541 // latest data, the update it
542 bool found = false;
543 const Address lineAddr = line_address(addr);
544 PhysAddress paddr(addr);
545 DataBlock data;
546 DataBlock* dataPtr = &data;
547 Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
548
549 MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
550 int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
551
552 assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
553 assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
554 if (Protocol::m_TwoLevelCache) {
555 if(Protocol::m_CMP){
556 assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
557 }
558 else{
559 assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
560 }
561 }
562
563 if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
564 n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
565 found = true;
566 } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
567 n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
568 found = true;
569 } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
570 n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
571 found = true;
572 } else {
573 // Address not found
574 n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
575 int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
576 for(unsigned int i=0; i<size_in_bytes; ++i){
577 int offset = addr.getOffset();
578 n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
579 }
580 found = false;
581 }
582
583 if (found){
584 found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
585 assert(found);
586 if(value[0] != test_buffer[0]){
587 WARN_EXPR((int) value[0]);
588 WARN_EXPR((int) test_buffer[0]);
589 ERROR_MSG("setRubyMemoryValue failed to set value.");
590 }
591 }
592
593 return true;
594 }
595 */
596 /*
597
598 void
599 Sequencer::rubyMemAccess(const uint64 paddr, char* data, const int len, const AccessType type)
600 {
601 if ( type == AccessType_Read || type == AccessType_Write ) {
602 // need to break up the packet data
603 uint64 guest_ptr = paddr;
604 Vector<DataBlock*> datablocks;
605 while (paddr + len != guest_ptr) {
606 Address addr(guest_ptr);
607 Address line_addr = line_address(addr);
608
609 int bytes_copied;
610 if (addr.getOffset() == 0) {
611 bytes_copied = (guest_ptr + RubyConfig::dataBlockBytes() > paddr + len)?
612 (paddr + len - guest_ptr):
613 RubyConfig::dataBlockBytes();
614 } else {
615 bytes_copied = RubyConfig::dataBlockBytes() - addr.getOffset();
616 if (guest_ptr + bytes_copied > paddr + len)
617 bytes_copied = paddr + len - guest_ptr;
618 }
619
620 // first we need to find all data blocks that have to be updated for a write
621 // and the highest block for a read
622 for(int i=0;i<RubyConfig::numberOfProcessors();i++) {
623 if (Protocol::m_TwoLevelCache){
624 if(m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->isTagPresent(line_address(addr)))
625 datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
626 if(m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->isTagPresent(line_address(addr)))
627 datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[i]->lookup(line_addr).getDataBlk());
628 } else {
629 if(m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->isTagPresent(line_address(addr)))
630 datablocks.insertAtBottom(&m_chip_ptr->m_L1Cache_cacheMemory_vec[i]->lookup(line_addr).getDataBlk());
631 }
632 }
633 if (Protocol::m_TwoLevelCache){
634 int l2_bank = map_L2ChipId_to_L2Cache(addr, 0).num; // TODO: ONLY WORKS WITH CMP!!!
635 if (m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->isTagPresent(line_address(Address(paddr)))) {
636 datablocks.insertAtBottom(&m_chip_ptr->m_L2Cache_L2cacheMemory_vec[l2_bank]->lookup(addr).getDataBlk());
637 }
638 }
639 assert(dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec.size() > map_Address_to_DirectoryNode(addr));
640 DirectoryMemory* dir = dynamic_cast<Chip*>(m_chip_ptr)->m_Directory_directory_vec[map_Address_to_DirectoryNode(addr)];
641 Directory_Entry& entry = dir->lookup(line_addr);
642 datablocks.insertAtBottom(&entry.getDataBlk());
643
644 if (pkt->isRead()){
645 datablocks[0]->copyData(pkt_data, addr.getOffset(), bytes_copied);
646 } else {// pkt->isWrite() {
647 for (int i=0;i<datablocks.size();i++)
648 datablocks[i]->setData(pkt_data, addr.getOffset(), bytes_copied);
649 }
650
651 guest_ptr += bytes_copied;
652 pkt_data += bytes_copied;
653 datablocks.clear();
654 }
655 }
656
657 */