2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "base/misc.hh"
30 #include "base/str.hh"
31 #include "cpu/testers/rubytest/RubyTester.hh"
32 #include "debug/MemoryAccess.hh"
33 #include "debug/ProtocolTrace.hh"
34 #include "mem/ruby/buffers/MessageBuffer.hh"
35 #include "mem/ruby/common/Global.hh"
36 #include "mem/ruby/common/SubBlock.hh"
37 #include "mem/ruby/profiler/Profiler.hh"
38 #include "mem/ruby/recorder/Tracer.hh"
39 #include "mem/ruby/slicc_interface/AbstractController.hh"
40 #include "mem/ruby/slicc_interface/RubyRequest.hh"
41 #include "mem/ruby/system/CacheMemory.hh"
42 #include "mem/ruby/system/Sequencer.hh"
43 #include "mem/ruby/system/System.hh"
44 #include "mem/packet.hh"
45 #include "params/RubySequencer.hh"
50 RubySequencerParams::create()
52 return new Sequencer(this);
55 Sequencer::Sequencer(const Params
*p
)
56 : RubyPort(p
), deadlockCheckEvent(this)
58 m_store_waiting_on_load_cycles
= 0;
59 m_store_waiting_on_store_cycles
= 0;
60 m_load_waiting_on_store_cycles
= 0;
61 m_load_waiting_on_load_cycles
= 0;
63 m_outstanding_count
= 0;
65 m_max_outstanding_requests
= 0;
66 m_deadlock_threshold
= 0;
67 m_instCache_ptr
= NULL
;
68 m_dataCache_ptr
= NULL
;
70 m_instCache_ptr
= p
->icache
;
71 m_dataCache_ptr
= p
->dcache
;
72 m_max_outstanding_requests
= p
->max_outstanding_requests
;
73 m_deadlock_threshold
= p
->deadlock_threshold
;
75 assert(m_max_outstanding_requests
> 0);
76 assert(m_deadlock_threshold
> 0);
77 assert(m_instCache_ptr
!= NULL
);
78 assert(m_dataCache_ptr
!= NULL
);
80 m_usingNetworkTester
= p
->using_network_tester
;
83 Sequencer::~Sequencer()
90 // Check for deadlock of any of the requests
91 Time current_time
= g_eventQueue_ptr
->getTime();
93 // Check across all outstanding requests
94 int total_outstanding
= 0;
96 RequestTable::iterator read
= m_readRequestTable
.begin();
97 RequestTable::iterator read_end
= m_readRequestTable
.end();
98 for (; read
!= read_end
; ++read
) {
99 SequencerRequest
* request
= read
->second
;
100 if (current_time
- request
->issue_time
< m_deadlock_threshold
)
103 panic("Possible Deadlock detected. Aborting!\n"
104 "version: %d request.paddr: 0x%x m_readRequestTable: %d "
105 "current time: %u issue_time: %d difference: %d\n", m_version
,
106 request
->ruby_request
.m_PhysicalAddress
, m_readRequestTable
.size(),
107 current_time
, request
->issue_time
,
108 current_time
- request
->issue_time
);
111 RequestTable::iterator write
= m_writeRequestTable
.begin();
112 RequestTable::iterator write_end
= m_writeRequestTable
.end();
113 for (; write
!= write_end
; ++write
) {
114 SequencerRequest
* request
= write
->second
;
115 if (current_time
- request
->issue_time
< m_deadlock_threshold
)
118 panic("Possible Deadlock detected. Aborting!\n"
119 "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
120 "current time: %u issue_time: %d difference: %d\n", m_version
,
121 request
->ruby_request
.m_PhysicalAddress
, m_writeRequestTable
.size(),
122 current_time
, request
->issue_time
,
123 current_time
- request
->issue_time
);
126 total_outstanding
+= m_writeRequestTable
.size();
127 total_outstanding
+= m_readRequestTable
.size();
129 assert(m_outstanding_count
== total_outstanding
);
131 if (m_outstanding_count
> 0) {
132 // If there are still outstanding requests, keep checking
133 schedule(deadlockCheckEvent
,
134 m_deadlock_threshold
* g_eventQueue_ptr
->getClock() +
140 Sequencer::printStats(ostream
& out
) const
142 out
<< "Sequencer: " << m_name
<< endl
143 << " store_waiting_on_load_cycles: "
144 << m_store_waiting_on_load_cycles
<< endl
145 << " store_waiting_on_store_cycles: "
146 << m_store_waiting_on_store_cycles
<< endl
147 << " load_waiting_on_load_cycles: "
148 << m_load_waiting_on_load_cycles
<< endl
149 << " load_waiting_on_store_cycles: "
150 << m_load_waiting_on_store_cycles
<< endl
;
154 Sequencer::printProgress(ostream
& out
) const
157 int total_demand
= 0;
158 out
<< "Sequencer Stats Version " << m_version
<< endl
;
159 out
<< "Current time = " << g_eventQueue_ptr
->getTime() << endl
;
160 out
<< "---------------" << endl
;
161 out
<< "outstanding requests" << endl
;
163 out
<< "proc " << m_Read
164 << " version Requests = " << m_readRequestTable
.size() << endl
;
166 // print the request table
167 RequestTable::iterator read
= m_readRequestTable
.begin();
168 RequestTable::iterator read_end
= m_readRequestTable
.end();
169 for (; read
!= read_end
; ++read
) {
170 SequencerRequest
* request
= read
->second
;
171 out
<< "\tRequest[ " << i
<< " ] = " << request
->type
172 << " Address " << rkeys
[i
]
173 << " Posted " << request
->issue_time
174 << " PF " << PrefetchBit_No
<< endl
;
178 out
<< "proc " << m_version
179 << " Write Requests = " << m_writeRequestTable
.size
<< endl
;
181 // print the request table
182 RequestTable::iterator write
= m_writeRequestTable
.begin();
183 RequestTable::iterator write_end
= m_writeRequestTable
.end();
184 for (; write
!= write_end
; ++write
) {
185 SequencerRequest
* request
= write
->second
;
186 out
<< "\tRequest[ " << i
<< " ] = " << request
.getType()
187 << " Address " << wkeys
[i
]
188 << " Posted " << request
.getTime()
189 << " PF " << request
.getPrefetch() << endl
;
190 if (request
.getPrefetch() == PrefetchBit_No
) {
197 out
<< "Total Number Outstanding: " << m_outstanding_count
<< endl
198 << "Total Number Demand : " << total_demand
<< endl
199 << "Total Number Prefetches : " << m_outstanding_count
- total_demand
200 << endl
<< endl
<< endl
;
205 Sequencer::printConfig(ostream
& out
) const
207 out
<< "Seqeuncer config: " << m_name
<< endl
208 << " controller: " << m_controller
->getName() << endl
209 << " version: " << m_version
<< endl
210 << " max_outstanding_requests: " << m_max_outstanding_requests
<< endl
211 << " deadlock_threshold: " << m_deadlock_threshold
<< endl
;
214 // Insert the request on the correct request table. Return true if
215 // the entry was already present.
217 Sequencer::insertRequest(SequencerRequest
* request
)
219 int total_outstanding
=
220 m_writeRequestTable
.size() + m_readRequestTable
.size();
222 assert(m_outstanding_count
== total_outstanding
);
224 // See if we should schedule a deadlock check
225 if (deadlockCheckEvent
.scheduled() == false) {
226 schedule(deadlockCheckEvent
, m_deadlock_threshold
+ curTick());
229 Address
line_addr(request
->ruby_request
.m_PhysicalAddress
);
230 line_addr
.makeLineAddress();
231 if ((request
->ruby_request
.m_Type
== RubyRequestType_ST
) ||
232 (request
->ruby_request
.m_Type
== RubyRequestType_ATOMIC
) ||
233 (request
->ruby_request
.m_Type
== RubyRequestType_RMW_Read
) ||
234 (request
->ruby_request
.m_Type
== RubyRequestType_RMW_Write
) ||
235 (request
->ruby_request
.m_Type
== RubyRequestType_Load_Linked
) ||
236 (request
->ruby_request
.m_Type
== RubyRequestType_Store_Conditional
) ||
237 (request
->ruby_request
.m_Type
== RubyRequestType_Locked_RMW_Read
) ||
238 (request
->ruby_request
.m_Type
== RubyRequestType_Locked_RMW_Write
) ||
239 (request
->ruby_request
.m_Type
== RubyRequestType_FLUSH
)) {
240 pair
<RequestTable::iterator
, bool> r
=
241 m_writeRequestTable
.insert(RequestTable::value_type(line_addr
, 0));
242 bool success
= r
.second
;
243 RequestTable::iterator i
= r
.first
;
248 // drh5: isn't this an error? do you lose the initial request?
252 m_outstanding_count
++;
254 pair
<RequestTable::iterator
, bool> r
=
255 m_readRequestTable
.insert(RequestTable::value_type(line_addr
, 0));
256 bool success
= r
.second
;
257 RequestTable::iterator i
= r
.first
;
262 // drh5: isn't this an error? do you lose the initial request?
266 m_outstanding_count
++;
269 g_system_ptr
->getProfiler()->sequencerRequests(m_outstanding_count
);
271 total_outstanding
= m_writeRequestTable
.size() + m_readRequestTable
.size();
272 assert(m_outstanding_count
== total_outstanding
);
278 Sequencer::markRemoved()
280 m_outstanding_count
--;
281 assert(m_outstanding_count
==
282 m_writeRequestTable
.size() + m_readRequestTable
.size());
286 Sequencer::removeRequest(SequencerRequest
* srequest
)
288 assert(m_outstanding_count
==
289 m_writeRequestTable
.size() + m_readRequestTable
.size());
291 const RubyRequest
& ruby_request
= srequest
->ruby_request
;
292 Address
line_addr(ruby_request
.m_PhysicalAddress
);
293 line_addr
.makeLineAddress();
294 if ((ruby_request
.m_Type
== RubyRequestType_ST
) ||
295 (ruby_request
.m_Type
== RubyRequestType_RMW_Read
) ||
296 (ruby_request
.m_Type
== RubyRequestType_RMW_Write
) ||
297 (ruby_request
.m_Type
== RubyRequestType_Load_Linked
) ||
298 (ruby_request
.m_Type
== RubyRequestType_Store_Conditional
) ||
299 (ruby_request
.m_Type
== RubyRequestType_Locked_RMW_Read
) ||
300 (ruby_request
.m_Type
== RubyRequestType_Locked_RMW_Write
)) {
301 m_writeRequestTable
.erase(line_addr
);
303 m_readRequestTable
.erase(line_addr
);
310 Sequencer::handleLlsc(const Address
& address
, SequencerRequest
* request
)
313 // The success flag indicates whether the LLSC operation was successful.
314 // LL ops will always succeed, but SC may fail if the cache line is no
318 if (request
->ruby_request
.m_Type
== RubyRequestType_Store_Conditional
) {
319 if (!m_dataCache_ptr
->isLocked(address
, m_version
)) {
321 // For failed SC requests, indicate the failure to the cpu by
322 // setting the extra data to zero.
324 request
->ruby_request
.pkt
->req
->setExtraData(0);
328 // For successful SC requests, indicate the success to the cpu by
329 // setting the extra data to one.
331 request
->ruby_request
.pkt
->req
->setExtraData(1);
334 // Independent of success, all SC operations must clear the lock
336 m_dataCache_ptr
->clearLocked(address
);
337 } else if (request
->ruby_request
.m_Type
== RubyRequestType_Load_Linked
) {
339 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
340 // previously locked cache lines?
342 m_dataCache_ptr
->setLocked(address
, m_version
);
343 } else if ((m_dataCache_ptr
->isTagPresent(address
)) && (m_dataCache_ptr
->isLocked(address
, m_version
))) {
345 // Normal writes should clear the locked address
347 m_dataCache_ptr
->clearLocked(address
);
353 Sequencer::writeCallback(const Address
& address
, DataBlock
& data
)
355 writeCallback(address
, GenericMachineType_NULL
, data
);
359 Sequencer::writeCallback(const Address
& address
,
360 GenericMachineType mach
,
363 writeCallback(address
, mach
, data
, 0, 0, 0);
367 Sequencer::writeCallback(const Address
& address
,
368 GenericMachineType mach
,
370 Time initialRequestTime
,
371 Time forwardRequestTime
,
372 Time firstResponseTime
)
374 assert(address
== line_address(address
));
375 assert(m_writeRequestTable
.count(line_address(address
)));
377 RequestTable::iterator i
= m_writeRequestTable
.find(address
);
378 assert(i
!= m_writeRequestTable
.end());
379 SequencerRequest
* request
= i
->second
;
381 m_writeRequestTable
.erase(i
);
384 assert((request
->ruby_request
.m_Type
== RubyRequestType_ST
) ||
385 (request
->ruby_request
.m_Type
== RubyRequestType_ATOMIC
) ||
386 (request
->ruby_request
.m_Type
== RubyRequestType_RMW_Read
) ||
387 (request
->ruby_request
.m_Type
== RubyRequestType_RMW_Write
) ||
388 (request
->ruby_request
.m_Type
== RubyRequestType_Load_Linked
) ||
389 (request
->ruby_request
.m_Type
== RubyRequestType_Store_Conditional
) ||
390 (request
->ruby_request
.m_Type
== RubyRequestType_Locked_RMW_Read
) ||
391 (request
->ruby_request
.m_Type
== RubyRequestType_Locked_RMW_Write
) ||
392 (request
->ruby_request
.m_Type
== RubyRequestType_FLUSH
));
396 // For Alpha, properly handle LL, SC, and write requests with respect to
397 // locked cache blocks.
399 // Not valid for Network_test protocl
402 if(!m_usingNetworkTester
)
403 success
= handleLlsc(address
, request
);
405 if (request
->ruby_request
.m_Type
== RubyRequestType_Locked_RMW_Read
) {
406 m_controller
->blockOnQueue(address
, m_mandatory_q_ptr
);
407 } else if (request
->ruby_request
.m_Type
== RubyRequestType_Locked_RMW_Write
) {
408 m_controller
->unblock(address
);
411 hitCallback(request
, mach
, data
, success
,
412 initialRequestTime
, forwardRequestTime
, firstResponseTime
);
416 Sequencer::readCallback(const Address
& address
, DataBlock
& data
)
418 readCallback(address
, GenericMachineType_NULL
, data
);
422 Sequencer::readCallback(const Address
& address
,
423 GenericMachineType mach
,
426 readCallback(address
, mach
, data
, 0, 0, 0);
430 Sequencer::readCallback(const Address
& address
,
431 GenericMachineType mach
,
433 Time initialRequestTime
,
434 Time forwardRequestTime
,
435 Time firstResponseTime
)
437 assert(address
== line_address(address
));
438 assert(m_readRequestTable
.count(line_address(address
)));
440 RequestTable::iterator i
= m_readRequestTable
.find(address
);
441 assert(i
!= m_readRequestTable
.end());
442 SequencerRequest
* request
= i
->second
;
444 m_readRequestTable
.erase(i
);
447 assert((request
->ruby_request
.m_Type
== RubyRequestType_LD
) ||
448 (request
->ruby_request
.m_Type
== RubyRequestType_IFETCH
));
450 hitCallback(request
, mach
, data
, true,
451 initialRequestTime
, forwardRequestTime
, firstResponseTime
);
455 Sequencer::hitCallback(SequencerRequest
* srequest
,
456 GenericMachineType mach
,
459 Time initialRequestTime
,
460 Time forwardRequestTime
,
461 Time firstResponseTime
)
463 const RubyRequest
& ruby_request
= srequest
->ruby_request
;
464 Address
request_address(ruby_request
.m_PhysicalAddress
);
465 Address
request_line_address(ruby_request
.m_PhysicalAddress
);
466 request_line_address
.makeLineAddress();
467 RubyRequestType type
= ruby_request
.m_Type
;
468 Time issued_time
= srequest
->issue_time
;
470 // Set this cache entry to the most recently used
471 if (type
== RubyRequestType_IFETCH
) {
472 if (m_instCache_ptr
->isTagPresent(request_line_address
))
473 m_instCache_ptr
->setMRU(request_line_address
);
475 if (m_dataCache_ptr
->isTagPresent(request_line_address
))
476 m_dataCache_ptr
->setMRU(request_line_address
);
479 assert(g_eventQueue_ptr
->getTime() >= issued_time
);
480 Time miss_latency
= g_eventQueue_ptr
->getTime() - issued_time
;
482 // Profile the miss latency for all non-zero demand misses
483 if (miss_latency
!= 0) {
484 g_system_ptr
->getProfiler()->missLatency(miss_latency
, type
, mach
);
486 if (mach
== GenericMachineType_L1Cache_wCC
) {
487 g_system_ptr
->getProfiler()->missLatencyWcc(issued_time
,
491 g_eventQueue_ptr
->getTime());
494 if (mach
== GenericMachineType_Directory
) {
495 g_system_ptr
->getProfiler()->missLatencyDir(issued_time
,
499 g_eventQueue_ptr
->getTime());
502 DPRINTFR(ProtocolTrace
, "%15s %3s %10s%20s %6s>%-6s %s %d cycles\n",
503 curTick(), m_version
, "Seq",
504 success
? "Done" : "SC_Failed", "", "",
505 ruby_request
.m_PhysicalAddress
, miss_latency
);
509 if (ruby_request
.data
!= NULL
) {
510 if ((type
== RubyRequestType_LD
) ||
511 (type
== RubyRequestType_IFETCH
) ||
512 (type
== RubyRequestType_RMW_Read
) ||
513 (type
== RubyRequestType_Locked_RMW_Read
) ||
514 (type
== RubyRequestType_Load_Linked
)) {
515 memcpy(ruby_request
.data
,
516 data
.getData(request_address
.getOffset(), ruby_request
.m_Size
),
517 ruby_request
.m_Size
);
519 data
.setData(ruby_request
.data
, request_address
.getOffset(),
520 ruby_request
.m_Size
);
523 DPRINTF(MemoryAccess
,
524 "WARNING. Data not transfered from Ruby to M5 for type %s\n",
525 RubyRequestType_to_string(type
));
528 // If using the RubyTester, update the RubyTester sender state's
529 // subBlock with the recieved data. The tester will later access
531 // Note: RubyPort will access it's sender state before the
533 if (m_usingRubyTester
) {
534 RubyPort::SenderState
*requestSenderState
=
535 safe_cast
<RubyPort::SenderState
*>(ruby_request
.pkt
->senderState
);
536 RubyTester::SenderState
* testerSenderState
=
537 safe_cast
<RubyTester::SenderState
*>(requestSenderState
->saved
);
538 testerSenderState
->subBlock
->mergeFrom(data
);
541 ruby_hit_callback(ruby_request
.pkt
);
545 // Returns true if the sequencer already has a load or store outstanding
547 Sequencer::getRequestStatus(const RubyRequest
& request
)
549 bool is_outstanding_store
=
550 !!m_writeRequestTable
.count(line_address(request
.m_PhysicalAddress
));
551 bool is_outstanding_load
=
552 !!m_readRequestTable
.count(line_address(request
.m_PhysicalAddress
));
553 if (is_outstanding_store
) {
554 if ((request
.m_Type
== RubyRequestType_LD
) ||
555 (request
.m_Type
== RubyRequestType_IFETCH
) ||
556 (request
.m_Type
== RubyRequestType_RMW_Read
)) {
557 m_store_waiting_on_load_cycles
++;
559 m_store_waiting_on_store_cycles
++;
561 return RequestStatus_Aliased
;
562 } else if (is_outstanding_load
) {
563 if ((request
.m_Type
== RubyRequestType_ST
) ||
564 (request
.m_Type
== RubyRequestType_RMW_Write
)) {
565 m_load_waiting_on_store_cycles
++;
567 m_load_waiting_on_load_cycles
++;
569 return RequestStatus_Aliased
;
572 if (m_outstanding_count
>= m_max_outstanding_requests
) {
573 return RequestStatus_BufferFull
;
576 return RequestStatus_Ready
;
580 Sequencer::empty() const
582 return m_writeRequestTable
.empty() && m_readRequestTable
.empty();
586 Sequencer::makeRequest(const RubyRequest
&request
)
588 assert(request
.m_PhysicalAddress
.getOffset() + request
.m_Size
<=
589 RubySystem::getBlockSizeBytes());
590 RequestStatus status
= getRequestStatus(request
);
591 if (status
!= RequestStatus_Ready
)
594 SequencerRequest
*srequest
=
595 new SequencerRequest(request
, g_eventQueue_ptr
->getTime());
596 bool found
= insertRequest(srequest
);
598 panic("Sequencer::makeRequest should never be called if the "
599 "request is already outstanding\n");
600 return RequestStatus_NULL
;
603 issueRequest(request
);
605 // TODO: issue hardware prefetches here
606 return RequestStatus_Issued
;
610 Sequencer::issueRequest(const RubyRequest
& request
)
612 // TODO: Eliminate RubyRequest being copied again.
614 RubyRequestType ctype
= RubyRequestType_NUM
;
615 switch(request
.m_Type
) {
616 case RubyRequestType_IFETCH
:
617 ctype
= RubyRequestType_IFETCH
;
619 case RubyRequestType_LD
:
620 ctype
= RubyRequestType_LD
;
622 case RubyRequestType_FLUSH
:
623 ctype
= RubyRequestType_FLUSH
;
625 case RubyRequestType_ST
:
626 case RubyRequestType_RMW_Read
:
627 case RubyRequestType_RMW_Write
:
629 // x86 locked instructions are translated to store cache coherence
630 // requests because these requests should always be treated as read
631 // exclusive operations and should leverage any migratory sharing
632 // optimization built into the protocol.
634 case RubyRequestType_Locked_RMW_Read
:
635 case RubyRequestType_Locked_RMW_Write
:
636 ctype
= RubyRequestType_ST
;
639 // Alpha LL/SC instructions need to be handled carefully by the cache
640 // coherence protocol to ensure they follow the proper semantics. In
641 // particular, by identifying the operations as atomic, the protocol
642 // should understand that migratory sharing optimizations should not be
643 // performed (i.e. a load between the LL and SC should not steal away
644 // exclusive permission).
646 case RubyRequestType_Load_Linked
:
647 case RubyRequestType_Store_Conditional
:
648 case RubyRequestType_ATOMIC
:
649 ctype
= RubyRequestType_ATOMIC
;
655 RubyAccessMode amtype
= RubyAccessMode_NUM
;
656 switch(request
.m_AccessMode
){
657 case RubyAccessMode_User
:
658 amtype
= RubyAccessMode_User
;
660 case RubyAccessMode_Supervisor
:
661 amtype
= RubyAccessMode_Supervisor
;
663 case RubyAccessMode_Device
:
664 amtype
= RubyAccessMode_User
;
670 Address
line_addr(request
.m_PhysicalAddress
);
671 line_addr
.makeLineAddress();
673 if (request
.pkt
!= NULL
&& request
.pkt
->req
->hasContextId()) {
674 proc_id
= request
.pkt
->req
->contextId();
676 RubyRequest
*msg
= new RubyRequest(request
.m_PhysicalAddress
.getAddress(),
677 request
.data
, request
.m_Size
,
678 request
.m_ProgramCounter
.getAddress(),
679 ctype
, amtype
, request
.pkt
,
680 PrefetchBit_No
, proc_id
);
682 DPRINTFR(ProtocolTrace
, "%15s %3s %10s%20s %6s>%-6s %s %s\n",
683 curTick(), m_version
, "Seq", "Begin", "", "",
684 request
.m_PhysicalAddress
, RubyRequestType_to_string(request
.m_Type
));
686 Time latency
= 0; // initialzed to an null value
688 if (request
.m_Type
== RubyRequestType_IFETCH
)
689 latency
= m_instCache_ptr
->getLatency();
691 latency
= m_dataCache_ptr
->getLatency();
693 // Send the message to the cache controller
696 assert(m_mandatory_q_ptr
!= NULL
);
697 m_mandatory_q_ptr
->enqueue(msg
, latency
);
700 template <class KEY
, class VALUE
>
702 operator<<(ostream
&out
, const m5::hash_map
<KEY
, VALUE
> &map
)
704 typename
m5::hash_map
<KEY
, VALUE
>::const_iterator i
= map
.begin();
705 typename
m5::hash_map
<KEY
, VALUE
>::const_iterator end
= map
.end();
708 for (; i
!= end
; ++i
)
709 out
<< " " << i
->first
<< "=" << i
->second
;
716 Sequencer::print(ostream
& out
) const
718 out
<< "[Sequencer: " << m_version
719 << ", outstanding requests: " << m_outstanding_count
720 << ", read request table: " << m_readRequestTable
721 << ", write request table: " << m_writeRequestTable
725 // this can be called from setState whenever coherence permissions are
726 // upgraded when invoked, coherence violations will be checked for the
729 Sequencer::checkCoherence(const Address
& addr
)
731 #ifdef CHECK_COHERENCE
732 g_system_ptr
->checkGlobalCoherenceInvariant(addr
);