2 * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include "arch/x86/ldstflags.hh"
30 #include "base/misc.hh"
31 #include "base/str.hh"
32 #include "cpu/testers/rubytest/RubyTester.hh"
33 #include "debug/MemoryAccess.hh"
34 #include "debug/ProtocolTrace.hh"
35 #include "debug/RubySequencer.hh"
36 #include "debug/RubyStats.hh"
37 #include "mem/protocol/PrefetchBit.hh"
38 #include "mem/protocol/RubyAccessMode.hh"
39 #include "mem/ruby/profiler/Profiler.hh"
40 #include "mem/ruby/slicc_interface/RubyRequest.hh"
41 #include "mem/ruby/system/Sequencer.hh"
42 #include "mem/ruby/system/System.hh"
43 #include "mem/packet.hh"
44 #include "sim/system.hh"
49 RubySequencerParams::create()
51 return new Sequencer(this);
54 Sequencer::Sequencer(const Params
*p
)
55 : RubyPort(p
), m_IncompleteTimes(MachineType_NUM
), deadlockCheckEvent(this)
57 m_outstanding_count
= 0;
59 m_instCache_ptr
= p
->icache
;
60 m_dataCache_ptr
= p
->dcache
;
61 m_data_cache_hit_latency
= p
->dcache_hit_latency
;
62 m_inst_cache_hit_latency
= p
->icache_hit_latency
;
63 m_max_outstanding_requests
= p
->max_outstanding_requests
;
64 m_deadlock_threshold
= p
->deadlock_threshold
;
66 assert(m_max_outstanding_requests
> 0);
67 assert(m_deadlock_threshold
> 0);
68 assert(m_instCache_ptr
!= NULL
);
69 assert(m_dataCache_ptr
!= NULL
);
70 assert(m_data_cache_hit_latency
> 0);
71 assert(m_inst_cache_hit_latency
> 0);
73 m_usingNetworkTester
= p
->using_network_tester
;
76 Sequencer::~Sequencer()
83 assert(drainState() != DrainState::Draining
);
85 // Check for deadlock of any of the requests
86 Cycles current_time
= curCycle();
88 // Check across all outstanding requests
89 int total_outstanding
= 0;
91 RequestTable::iterator read
= m_readRequestTable
.begin();
92 RequestTable::iterator read_end
= m_readRequestTable
.end();
93 for (; read
!= read_end
; ++read
) {
94 SequencerRequest
* request
= read
->second
;
95 if (current_time
- request
->issue_time
< m_deadlock_threshold
)
98 panic("Possible Deadlock detected. Aborting!\n"
99 "version: %d request.paddr: 0x%x m_readRequestTable: %d "
100 "current time: %u issue_time: %d difference: %d\n", m_version
,
101 request
->pkt
->getAddr(), m_readRequestTable
.size(),
102 current_time
* clockPeriod(), request
->issue_time
* clockPeriod(),
103 (current_time
* clockPeriod()) - (request
->issue_time
* clockPeriod()));
106 RequestTable::iterator write
= m_writeRequestTable
.begin();
107 RequestTable::iterator write_end
= m_writeRequestTable
.end();
108 for (; write
!= write_end
; ++write
) {
109 SequencerRequest
* request
= write
->second
;
110 if (current_time
- request
->issue_time
< m_deadlock_threshold
)
113 panic("Possible Deadlock detected. Aborting!\n"
114 "version: %d request.paddr: 0x%x m_writeRequestTable: %d "
115 "current time: %u issue_time: %d difference: %d\n", m_version
,
116 request
->pkt
->getAddr(), m_writeRequestTable
.size(),
117 current_time
* clockPeriod(), request
->issue_time
* clockPeriod(),
118 (current_time
* clockPeriod()) - (request
->issue_time
* clockPeriod()));
121 total_outstanding
+= m_writeRequestTable
.size();
122 total_outstanding
+= m_readRequestTable
.size();
124 assert(m_outstanding_count
== total_outstanding
);
126 if (m_outstanding_count
> 0) {
127 // If there are still outstanding requests, keep checking
128 schedule(deadlockCheckEvent
, clockEdge(m_deadlock_threshold
));
132 void Sequencer::resetStats()
134 m_latencyHist
.reset();
135 m_hitLatencyHist
.reset();
136 m_missLatencyHist
.reset();
137 for (int i
= 0; i
< RubyRequestType_NUM
; i
++) {
138 m_typeLatencyHist
[i
]->reset();
139 m_hitTypeLatencyHist
[i
]->reset();
140 m_missTypeLatencyHist
[i
]->reset();
141 for (int j
= 0; j
< MachineType_NUM
; j
++) {
142 m_hitTypeMachLatencyHist
[i
][j
]->reset();
143 m_missTypeMachLatencyHist
[i
][j
]->reset();
147 for (int i
= 0; i
< MachineType_NUM
; i
++) {
148 m_missMachLatencyHist
[i
]->reset();
149 m_hitMachLatencyHist
[i
]->reset();
151 m_IssueToInitialDelayHist
[i
]->reset();
152 m_InitialToForwardDelayHist
[i
]->reset();
153 m_ForwardToFirstResponseDelayHist
[i
]->reset();
154 m_FirstResponseToCompletionDelayHist
[i
]->reset();
156 m_IncompleteTimes
[i
] = 0;
161 Sequencer::printProgress(ostream
& out
) const
164 int total_demand
= 0;
165 out
<< "Sequencer Stats Version " << m_version
<< endl
;
166 out
<< "Current time = " << m_ruby_system
->getTime() << endl
;
167 out
<< "---------------" << endl
;
168 out
<< "outstanding requests" << endl
;
170 out
<< "proc " << m_Read
171 << " version Requests = " << m_readRequestTable
.size() << endl
;
173 // print the request table
174 RequestTable::iterator read
= m_readRequestTable
.begin();
175 RequestTable::iterator read_end
= m_readRequestTable
.end();
176 for (; read
!= read_end
; ++read
) {
177 SequencerRequest
* request
= read
->second
;
178 out
<< "\tRequest[ " << i
<< " ] = " << request
->type
179 << " Address " << rkeys
[i
]
180 << " Posted " << request
->issue_time
181 << " PF " << PrefetchBit_No
<< endl
;
185 out
<< "proc " << m_version
186 << " Write Requests = " << m_writeRequestTable
.size
<< endl
;
188 // print the request table
189 RequestTable::iterator write
= m_writeRequestTable
.begin();
190 RequestTable::iterator write_end
= m_writeRequestTable
.end();
191 for (; write
!= write_end
; ++write
) {
192 SequencerRequest
* request
= write
->second
;
193 out
<< "\tRequest[ " << i
<< " ] = " << request
.getType()
194 << " Address " << wkeys
[i
]
195 << " Posted " << request
.getTime()
196 << " PF " << request
.getPrefetch() << endl
;
197 if (request
.getPrefetch() == PrefetchBit_No
) {
204 out
<< "Total Number Outstanding: " << m_outstanding_count
<< endl
205 << "Total Number Demand : " << total_demand
<< endl
206 << "Total Number Prefetches : " << m_outstanding_count
- total_demand
207 << endl
<< endl
<< endl
;
211 // Insert the request on the correct request table. Return true if
212 // the entry was already present.
214 Sequencer::insertRequest(PacketPtr pkt
, RubyRequestType request_type
)
216 assert(m_outstanding_count
==
217 (m_writeRequestTable
.size() + m_readRequestTable
.size()));
219 // See if we should schedule a deadlock check
220 if (!deadlockCheckEvent
.scheduled() &&
221 drainState() != DrainState::Draining
) {
222 schedule(deadlockCheckEvent
, clockEdge(m_deadlock_threshold
));
225 Addr line_addr
= makeLineAddress(pkt
->getAddr());
226 // Create a default entry, mapping the address to NULL, the cast is
227 // there to make gcc 4.4 happy
228 RequestTable::value_type
default_entry(line_addr
,
229 (SequencerRequest
*) NULL
);
231 if ((request_type
== RubyRequestType_ST
) ||
232 (request_type
== RubyRequestType_RMW_Read
) ||
233 (request_type
== RubyRequestType_RMW_Write
) ||
234 (request_type
== RubyRequestType_Load_Linked
) ||
235 (request_type
== RubyRequestType_Store_Conditional
) ||
236 (request_type
== RubyRequestType_Locked_RMW_Read
) ||
237 (request_type
== RubyRequestType_Locked_RMW_Write
) ||
238 (request_type
== RubyRequestType_FLUSH
)) {
240 // Check if there is any outstanding read request for the same
242 if (m_readRequestTable
.count(line_addr
) > 0) {
243 m_store_waiting_on_load
++;
244 return RequestStatus_Aliased
;
247 pair
<RequestTable::iterator
, bool> r
=
248 m_writeRequestTable
.insert(default_entry
);
250 RequestTable::iterator i
= r
.first
;
251 i
->second
= new SequencerRequest(pkt
, request_type
, curCycle());
252 m_outstanding_count
++;
254 // There is an outstanding write request for the cache line
255 m_store_waiting_on_store
++;
256 return RequestStatus_Aliased
;
259 // Check if there is any outstanding write request for the same
261 if (m_writeRequestTable
.count(line_addr
) > 0) {
262 m_load_waiting_on_store
++;
263 return RequestStatus_Aliased
;
266 pair
<RequestTable::iterator
, bool> r
=
267 m_readRequestTable
.insert(default_entry
);
270 RequestTable::iterator i
= r
.first
;
271 i
->second
= new SequencerRequest(pkt
, request_type
, curCycle());
272 m_outstanding_count
++;
274 // There is an outstanding read request for the cache line
275 m_load_waiting_on_load
++;
276 return RequestStatus_Aliased
;
280 m_outstandReqHist
.sample(m_outstanding_count
);
281 assert(m_outstanding_count
==
282 (m_writeRequestTable
.size() + m_readRequestTable
.size()));
284 return RequestStatus_Ready
;
288 Sequencer::markRemoved()
290 m_outstanding_count
--;
291 assert(m_outstanding_count
==
292 m_writeRequestTable
.size() + m_readRequestTable
.size());
296 Sequencer::removeRequest(SequencerRequest
* srequest
)
298 assert(m_outstanding_count
==
299 m_writeRequestTable
.size() + m_readRequestTable
.size());
301 Addr line_addr
= makeLineAddress(srequest
->pkt
->getAddr());
302 if ((srequest
->m_type
== RubyRequestType_ST
) ||
303 (srequest
->m_type
== RubyRequestType_RMW_Read
) ||
304 (srequest
->m_type
== RubyRequestType_RMW_Write
) ||
305 (srequest
->m_type
== RubyRequestType_Load_Linked
) ||
306 (srequest
->m_type
== RubyRequestType_Store_Conditional
) ||
307 (srequest
->m_type
== RubyRequestType_Locked_RMW_Read
) ||
308 (srequest
->m_type
== RubyRequestType_Locked_RMW_Write
)) {
309 m_writeRequestTable
.erase(line_addr
);
311 m_readRequestTable
.erase(line_addr
);
318 Sequencer::invalidateSC(Addr address
)
320 AbstractCacheEntry
*e
= m_dataCache_ptr
->lookup(address
);
321 // The controller has lost the coherence permissions, hence the lock
322 // on the cache line maintained by the cache should be cleared.
323 if (e
&& e
->isLocked(m_version
)) {
329 Sequencer::handleLlsc(Addr address
, SequencerRequest
* request
)
331 AbstractCacheEntry
*e
= m_dataCache_ptr
->lookup(address
);
335 // The success flag indicates whether the LLSC operation was successful.
336 // LL ops will always succeed, but SC may fail if the cache line is no
339 if (request
->m_type
== RubyRequestType_Store_Conditional
) {
340 if (!e
->isLocked(m_version
)) {
342 // For failed SC requests, indicate the failure to the cpu by
343 // setting the extra data to zero.
345 request
->pkt
->req
->setExtraData(0);
349 // For successful SC requests, indicate the success to the cpu by
350 // setting the extra data to one.
352 request
->pkt
->req
->setExtraData(1);
355 // Independent of success, all SC operations must clear the lock
358 } else if (request
->m_type
== RubyRequestType_Load_Linked
) {
360 // Note: To fully follow Alpha LLSC semantics, should the LL clear any
361 // previously locked cache lines?
363 e
->setLocked(m_version
);
364 } else if (e
->isLocked(m_version
)) {
366 // Normal writes should clear the locked address
374 Sequencer::recordMissLatency(const Cycles cycles
, const RubyRequestType type
,
375 const MachineType respondingMach
,
376 bool isExternalHit
, Cycles issuedTime
,
377 Cycles initialRequestTime
,
378 Cycles forwardRequestTime
,
379 Cycles firstResponseTime
, Cycles completionTime
)
381 m_latencyHist
.sample(cycles
);
382 m_typeLatencyHist
[type
]->sample(cycles
);
385 m_missLatencyHist
.sample(cycles
);
386 m_missTypeLatencyHist
[type
]->sample(cycles
);
388 if (respondingMach
!= MachineType_NUM
) {
389 m_missMachLatencyHist
[respondingMach
]->sample(cycles
);
390 m_missTypeMachLatencyHist
[type
][respondingMach
]->sample(cycles
);
392 if ((issuedTime
<= initialRequestTime
) &&
393 (initialRequestTime
<= forwardRequestTime
) &&
394 (forwardRequestTime
<= firstResponseTime
) &&
395 (firstResponseTime
<= completionTime
)) {
397 m_IssueToInitialDelayHist
[respondingMach
]->sample(
398 initialRequestTime
- issuedTime
);
399 m_InitialToForwardDelayHist
[respondingMach
]->sample(
400 forwardRequestTime
- initialRequestTime
);
401 m_ForwardToFirstResponseDelayHist
[respondingMach
]->sample(
402 firstResponseTime
- forwardRequestTime
);
403 m_FirstResponseToCompletionDelayHist
[respondingMach
]->sample(
404 completionTime
- firstResponseTime
);
406 m_IncompleteTimes
[respondingMach
]++;
410 m_hitLatencyHist
.sample(cycles
);
411 m_hitTypeLatencyHist
[type
]->sample(cycles
);
413 if (respondingMach
!= MachineType_NUM
) {
414 m_hitMachLatencyHist
[respondingMach
]->sample(cycles
);
415 m_hitTypeMachLatencyHist
[type
][respondingMach
]->sample(cycles
);
421 Sequencer::writeCallback(Addr address
, DataBlock
& data
,
422 const bool externalHit
, const MachineType mach
,
423 const Cycles initialRequestTime
,
424 const Cycles forwardRequestTime
,
425 const Cycles firstResponseTime
)
427 assert(address
== makeLineAddress(address
));
428 assert(m_writeRequestTable
.count(makeLineAddress(address
)));
430 RequestTable::iterator i
= m_writeRequestTable
.find(address
);
431 assert(i
!= m_writeRequestTable
.end());
432 SequencerRequest
* request
= i
->second
;
434 m_writeRequestTable
.erase(i
);
437 assert((request
->m_type
== RubyRequestType_ST
) ||
438 (request
->m_type
== RubyRequestType_ATOMIC
) ||
439 (request
->m_type
== RubyRequestType_RMW_Read
) ||
440 (request
->m_type
== RubyRequestType_RMW_Write
) ||
441 (request
->m_type
== RubyRequestType_Load_Linked
) ||
442 (request
->m_type
== RubyRequestType_Store_Conditional
) ||
443 (request
->m_type
== RubyRequestType_Locked_RMW_Read
) ||
444 (request
->m_type
== RubyRequestType_Locked_RMW_Write
) ||
445 (request
->m_type
== RubyRequestType_FLUSH
));
448 // For Alpha, properly handle LL, SC, and write requests with respect to
449 // locked cache blocks.
451 // Not valid for Network_test protocl
454 if(!m_usingNetworkTester
)
455 success
= handleLlsc(address
, request
);
457 if (request
->m_type
== RubyRequestType_Locked_RMW_Read
) {
458 m_controller
->blockOnQueue(address
, m_mandatory_q_ptr
);
459 } else if (request
->m_type
== RubyRequestType_Locked_RMW_Write
) {
460 m_controller
->unblock(address
);
463 hitCallback(request
, data
, success
, mach
, externalHit
,
464 initialRequestTime
, forwardRequestTime
, firstResponseTime
);
468 Sequencer::readCallback(Addr address
, DataBlock
& data
,
469 bool externalHit
, const MachineType mach
,
470 Cycles initialRequestTime
,
471 Cycles forwardRequestTime
,
472 Cycles firstResponseTime
)
474 assert(address
== makeLineAddress(address
));
475 assert(m_readRequestTable
.count(makeLineAddress(address
)));
477 RequestTable::iterator i
= m_readRequestTable
.find(address
);
478 assert(i
!= m_readRequestTable
.end());
479 SequencerRequest
* request
= i
->second
;
481 m_readRequestTable
.erase(i
);
484 assert((request
->m_type
== RubyRequestType_LD
) ||
485 (request
->m_type
== RubyRequestType_IFETCH
));
487 hitCallback(request
, data
, true, mach
, externalHit
,
488 initialRequestTime
, forwardRequestTime
, firstResponseTime
);
492 Sequencer::hitCallback(SequencerRequest
* srequest
, DataBlock
& data
,
494 const MachineType mach
, const bool externalHit
,
495 const Cycles initialRequestTime
,
496 const Cycles forwardRequestTime
,
497 const Cycles firstResponseTime
)
499 warn_once("Replacement policy updates recently became the responsibility "
500 "of SLICC state machines. Make sure to setMRU() near callbacks "
503 PacketPtr pkt
= srequest
->pkt
;
504 Addr
request_address(pkt
->getAddr());
505 RubyRequestType type
= srequest
->m_type
;
506 Cycles issued_time
= srequest
->issue_time
;
508 assert(curCycle() >= issued_time
);
509 Cycles total_latency
= curCycle() - issued_time
;
511 // Profile the latency for all demand accesses.
512 recordMissLatency(total_latency
, type
, mach
, externalHit
, issued_time
,
513 initialRequestTime
, forwardRequestTime
,
514 firstResponseTime
, curCycle());
516 DPRINTFR(ProtocolTrace
, "%15s %3s %10s%20s %6s>%-6s %#x %d cycles\n",
517 curTick(), m_version
, "Seq",
518 llscSuccess
? "Done" : "SC_Failed", "", "",
519 request_address
, total_latency
);
521 // update the data unless it is a non-data-carrying flush
522 if (RubySystem::getWarmupEnabled()) {
523 data
.setData(pkt
->getConstPtr
<uint8_t>(),
524 getOffset(request_address
), pkt
->getSize());
525 } else if (!pkt
->isFlush()) {
526 if ((type
== RubyRequestType_LD
) ||
527 (type
== RubyRequestType_IFETCH
) ||
528 (type
== RubyRequestType_RMW_Read
) ||
529 (type
== RubyRequestType_Locked_RMW_Read
) ||
530 (type
== RubyRequestType_Load_Linked
)) {
531 memcpy(pkt
->getPtr
<uint8_t>(),
532 data
.getData(getOffset(request_address
), pkt
->getSize()),
534 DPRINTF(RubySequencer
, "read data %s\n", data
);
536 data
.setData(pkt
->getConstPtr
<uint8_t>(),
537 getOffset(request_address
), pkt
->getSize());
538 DPRINTF(RubySequencer
, "set data %s\n", data
);
542 // If using the RubyTester, update the RubyTester sender state's
543 // subBlock with the recieved data. The tester will later access
545 if (m_usingRubyTester
) {
546 DPRINTF(RubySequencer
, "hitCallback %s 0x%x using RubyTester\n",
547 pkt
->cmdString(), pkt
->getAddr());
548 RubyTester::SenderState
* testerSenderState
=
549 pkt
->findNextSenderState
<RubyTester::SenderState
>();
550 assert(testerSenderState
);
551 testerSenderState
->subBlock
.mergeFrom(data
);
556 RubySystem
*rs
= m_ruby_system
;
557 if (RubySystem::getWarmupEnabled()) {
561 rs
->m_cache_recorder
->enqueueNextFetchRequest();
562 } else if (RubySystem::getCooldownEnabled()) {
564 rs
->m_cache_recorder
->enqueueNextFlushRequest();
566 ruby_hit_callback(pkt
);
571 Sequencer::empty() const
573 return m_writeRequestTable
.empty() && m_readRequestTable
.empty();
577 Sequencer::makeRequest(PacketPtr pkt
)
579 if (m_outstanding_count
>= m_max_outstanding_requests
) {
580 return RequestStatus_BufferFull
;
583 RubyRequestType primary_type
= RubyRequestType_NULL
;
584 RubyRequestType secondary_type
= RubyRequestType_NULL
;
588 // Alpha LL/SC instructions need to be handled carefully by the cache
589 // coherence protocol to ensure they follow the proper semantics. In
590 // particular, by identifying the operations as atomic, the protocol
591 // should understand that migratory sharing optimizations should not
592 // be performed (i.e. a load between the LL and SC should not steal
593 // away exclusive permission).
595 if (pkt
->isWrite()) {
596 DPRINTF(RubySequencer
, "Issuing SC\n");
597 primary_type
= RubyRequestType_Store_Conditional
;
599 DPRINTF(RubySequencer
, "Issuing LL\n");
600 assert(pkt
->isRead());
601 primary_type
= RubyRequestType_Load_Linked
;
603 secondary_type
= RubyRequestType_ATOMIC
;
604 } else if (pkt
->req
->isLockedRMW()) {
606 // x86 locked instructions are translated to store cache coherence
607 // requests because these requests should always be treated as read
608 // exclusive operations and should leverage any migratory sharing
609 // optimization built into the protocol.
611 if (pkt
->isWrite()) {
612 DPRINTF(RubySequencer
, "Issuing Locked RMW Write\n");
613 primary_type
= RubyRequestType_Locked_RMW_Write
;
615 DPRINTF(RubySequencer
, "Issuing Locked RMW Read\n");
616 assert(pkt
->isRead());
617 primary_type
= RubyRequestType_Locked_RMW_Read
;
619 secondary_type
= RubyRequestType_ST
;
622 if (pkt
->req
->isInstFetch()) {
623 primary_type
= secondary_type
= RubyRequestType_IFETCH
;
625 bool storeCheck
= false;
626 // only X86 need the store check
627 if (system
->getArch() == Arch::X86ISA
) {
628 uint32_t flags
= pkt
->req
->getFlags();
630 (X86ISA::StoreCheck
<< X86ISA::FlagShift
);
633 primary_type
= RubyRequestType_RMW_Read
;
634 secondary_type
= RubyRequestType_ST
;
636 primary_type
= secondary_type
= RubyRequestType_LD
;
639 } else if (pkt
->isWrite()) {
641 // Note: M5 packets do not differentiate ST from RMW_Write
643 primary_type
= secondary_type
= RubyRequestType_ST
;
644 } else if (pkt
->isFlush()) {
645 primary_type
= secondary_type
= RubyRequestType_FLUSH
;
647 panic("Unsupported ruby packet type\n");
651 RequestStatus status
= insertRequest(pkt
, primary_type
);
652 if (status
!= RequestStatus_Ready
)
655 issueRequest(pkt
, secondary_type
);
657 // TODO: issue hardware prefetches here
658 return RequestStatus_Issued
;
662 Sequencer::issueRequest(PacketPtr pkt
, RubyRequestType secondary_type
)
665 ContextID proc_id
= pkt
->req
->hasContextId() ?
666 pkt
->req
->contextId() : InvalidContextID
;
668 // If valid, copy the pc to the ruby request
670 if (pkt
->req
->hasPC()) {
671 pc
= pkt
->req
->getPC();
674 // check if the packet has data as for example prefetch and flush
676 std::shared_ptr
<RubyRequest
> msg
=
677 std::make_shared
<RubyRequest
>(clockEdge(), pkt
->getAddr(),
679 nullptr : pkt
->getPtr
<uint8_t>(),
680 pkt
->getSize(), pc
, secondary_type
,
681 RubyAccessMode_Supervisor
, pkt
,
682 PrefetchBit_No
, proc_id
);
684 DPRINTFR(ProtocolTrace
, "%15s %3s %10s%20s %6s>%-6s %#x %s\n",
685 curTick(), m_version
, "Seq", "Begin", "", "",
686 msg
->getPhysicalAddress(),
687 RubyRequestType_to_string(secondary_type
));
689 // The Sequencer currently assesses instruction and data cache hit latency
690 // for the top-level caches at the beginning of a memory access.
691 // TODO: Eventually, this latency should be moved to represent the actual
692 // cache access latency portion of the memory access. This will require
693 // changing cache controller protocol files to assess the latency on the
694 // access response path.
695 Cycles
latency(0); // Initialize to zero to catch misconfigured latency
696 if (secondary_type
== RubyRequestType_IFETCH
)
697 latency
= m_inst_cache_hit_latency
;
699 latency
= m_data_cache_hit_latency
;
701 // Send the message to the cache controller
704 assert(m_mandatory_q_ptr
!= NULL
);
705 m_mandatory_q_ptr
->enqueue(msg
, latency
);
708 template <class KEY
, class VALUE
>
710 operator<<(ostream
&out
, const m5::hash_map
<KEY
, VALUE
> &map
)
712 typename
m5::hash_map
<KEY
, VALUE
>::const_iterator i
= map
.begin();
713 typename
m5::hash_map
<KEY
, VALUE
>::const_iterator end
= map
.end();
716 for (; i
!= end
; ++i
)
717 out
<< " " << i
->first
<< "=" << i
->second
;
724 Sequencer::print(ostream
& out
) const
726 out
<< "[Sequencer: " << m_version
727 << ", outstanding requests: " << m_outstanding_count
728 << ", read request table: " << m_readRequestTable
729 << ", write request table: " << m_writeRequestTable
733 // this can be called from setState whenever coherence permissions are
734 // upgraded when invoked, coherence violations will be checked for the
737 Sequencer::checkCoherence(Addr addr
)
739 #ifdef CHECK_COHERENCE
740 m_ruby_system
->checkGlobalCoherenceInvariant(addr
);
745 Sequencer::recordRequestType(SequencerRequestType requestType
) {
746 DPRINTF(RubyStats
, "Recorded statistic: %s\n",
747 SequencerRequestType_to_string(requestType
));
752 Sequencer::evictionCallback(Addr address
)
754 ruby_eviction_callback(address
);
758 Sequencer::regStats()
760 m_store_waiting_on_load
761 .name(name() + ".store_waiting_on_load")
762 .desc("Number of times a store aliased with a pending load")
763 .flags(Stats::nozero
);
764 m_store_waiting_on_store
765 .name(name() + ".store_waiting_on_store")
766 .desc("Number of times a store aliased with a pending store")
767 .flags(Stats::nozero
);
768 m_load_waiting_on_load
769 .name(name() + ".load_waiting_on_load")
770 .desc("Number of times a load aliased with a pending load")
771 .flags(Stats::nozero
);
772 m_load_waiting_on_store
773 .name(name() + ".load_waiting_on_store")
774 .desc("Number of times a load aliased with a pending store")
775 .flags(Stats::nozero
);
777 // These statistical variables are not for display.
778 // The profiler will collate these across different
779 // sequencers and display those collated statistics.
780 m_outstandReqHist
.init(10);
781 m_latencyHist
.init(10);
782 m_hitLatencyHist
.init(10);
783 m_missLatencyHist
.init(10);
785 for (int i
= 0; i
< RubyRequestType_NUM
; i
++) {
786 m_typeLatencyHist
.push_back(new Stats::Histogram());
787 m_typeLatencyHist
[i
]->init(10);
789 m_hitTypeLatencyHist
.push_back(new Stats::Histogram());
790 m_hitTypeLatencyHist
[i
]->init(10);
792 m_missTypeLatencyHist
.push_back(new Stats::Histogram());
793 m_missTypeLatencyHist
[i
]->init(10);
796 for (int i
= 0; i
< MachineType_NUM
; i
++) {
797 m_hitMachLatencyHist
.push_back(new Stats::Histogram());
798 m_hitMachLatencyHist
[i
]->init(10);
800 m_missMachLatencyHist
.push_back(new Stats::Histogram());
801 m_missMachLatencyHist
[i
]->init(10);
803 m_IssueToInitialDelayHist
.push_back(new Stats::Histogram());
804 m_IssueToInitialDelayHist
[i
]->init(10);
806 m_InitialToForwardDelayHist
.push_back(new Stats::Histogram());
807 m_InitialToForwardDelayHist
[i
]->init(10);
809 m_ForwardToFirstResponseDelayHist
.push_back(new Stats::Histogram());
810 m_ForwardToFirstResponseDelayHist
[i
]->init(10);
812 m_FirstResponseToCompletionDelayHist
.push_back(new Stats::Histogram());
813 m_FirstResponseToCompletionDelayHist
[i
]->init(10);
816 for (int i
= 0; i
< RubyRequestType_NUM
; i
++) {
817 m_hitTypeMachLatencyHist
.push_back(std::vector
<Stats::Histogram
*>());
818 m_missTypeMachLatencyHist
.push_back(std::vector
<Stats::Histogram
*>());
820 for (int j
= 0; j
< MachineType_NUM
; j
++) {
821 m_hitTypeMachLatencyHist
[i
].push_back(new Stats::Histogram());
822 m_hitTypeMachLatencyHist
[i
][j
]->init(10);
824 m_missTypeMachLatencyHist
[i
].push_back(new Stats::Histogram());
825 m_missTypeMachLatencyHist
[i
][j
]->init(10);