2 * Copyright (c) 2005-2006 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Korey Sewell
35 #include "cpu/o3/lsq.hh"
39 LSQ<Impl>::DcachePort::recvAtomic(PacketPtr pkt)
41 panic("O3CPU model does not work with atomic mode!");
47 LSQ<Impl>::DcachePort::recvFunctional(PacketPtr pkt)
49 panic("O3CPU doesn't expect recvFunctional callback!");
54 LSQ<Impl>::DcachePort::recvStatusChange(Status status)
56 if (status == RangeChange)
59 panic("O3CPU doesn't expect recvStatusChange callback!");
64 LSQ<Impl>::DcachePort::recvTiming(PacketPtr pkt)
66 lsq->thread[pkt->req->getThreadNum()].completeDataAccess(pkt);
72 LSQ<Impl>::DcachePort::recvRetry()
74 if (lsq->retryTid == -1)
76 //Squashed, so drop it
79 lsq->thread[lsq->retryTid].recvRetry();
80 // Speculatively clear the retry Tid. This will get set again if
81 // the LSQUnit was unable to complete its access.
86 LSQ<Impl>::LSQ(Params *params)
87 : dcachePort(this), LQEntries(params->LQEntries),
88 SQEntries(params->SQEntries), numThreads(params->numberOfThreads),
91 DPRINTF(LSQ, "Creating LSQ object.\n");
93 //**********************************************/
94 //************ Handle SMT Parameters ***********/
95 //**********************************************/
96 std::string policy = params->smtLSQPolicy;
98 //Convert string to lowercase
99 std::transform(policy.begin(), policy.end(), policy.begin(),
100 (int(*)(int)) tolower);
102 //Figure out fetch policy
103 if (policy == "dynamic") {
106 maxLQEntries = LQEntries;
107 maxSQEntries = SQEntries;
109 DPRINTF(LSQ, "LSQ sharing policy set to Dynamic\n");
111 } else if (policy == "partitioned") {
112 lsqPolicy = Partitioned;
114 //@todo:make work if part_amt doesnt divide evenly.
115 maxLQEntries = LQEntries / numThreads;
116 maxSQEntries = SQEntries / numThreads;
118 DPRINTF(Fetch, "LSQ sharing policy set to Partitioned: "
119 "%i entries per LQ | %i entries per SQ",
120 maxLQEntries,maxSQEntries);
122 } else if (policy == "threshold") {
123 lsqPolicy = Threshold;
125 assert(params->smtLSQThreshold > LQEntries);
126 assert(params->smtLSQThreshold > SQEntries);
128 //Divide up by threshold amount
129 //@todo: Should threads check the max and the total
131 maxLQEntries = params->smtLSQThreshold;
132 maxSQEntries = params->smtLSQThreshold;
134 DPRINTF(LSQ, "LSQ sharing policy set to Threshold: "
135 "%i entries per LQ | %i entries per SQ",
136 maxLQEntries,maxSQEntries);
139 assert(0 && "Invalid LSQ Sharing Policy.Options Are:{Dynamic,"
140 "Partitioned, Threshold}");
144 for (int tid=0; tid < numThreads; tid++) {
145 thread[tid].init(params, this, maxLQEntries, maxSQEntries, tid);
146 thread[tid].setDcachePort(&dcachePort);
153 LSQ<Impl>::name() const
155 return iewStage->name() + ".lsq";
160 LSQ<Impl>::regStats()
163 for (int tid=0; tid < numThreads; tid++) {
164 thread[tid].regStats();
170 LSQ<Impl>::regStats()
173 for (int tid=0; tid < numThreads; tid++) {
174 thread[tid].regStats();
180 LSQ<Impl>::setActiveThreads(std::list<unsigned> *at_ptr)
182 activeThreads = at_ptr;
183 assert(activeThreads != 0);
188 LSQ<Impl>::setCPU(O3CPU *cpu_ptr)
192 dcachePort.setName(name());
194 for (int tid=0; tid < numThreads; tid++) {
195 thread[tid].setCPU(cpu_ptr);
201 LSQ<Impl>::setIEW(IEW *iew_ptr)
205 for (int tid=0; tid < numThreads; tid++) {
206 thread[tid].setIEW(iew_ptr);
210 template <class Impl>
212 LSQ<Impl>::switchOut()
214 for (int tid = 0; tid < numThreads; tid++) {
215 thread[tid].switchOut();
219 template <class Impl>
221 LSQ<Impl>::takeOverFrom()
223 for (int tid = 0; tid < numThreads; tid++) {
224 thread[tid].takeOverFrom();
228 template <class Impl>
230 LSQ<Impl>::entryAmount(int num_threads)
232 if (lsqPolicy == Partitioned) {
233 return LQEntries / num_threads;
239 template <class Impl>
241 LSQ<Impl>::resetEntries()
243 if (lsqPolicy != Dynamic || numThreads > 1) {
244 int active_threads = (*activeThreads).size();
246 std::list<unsigned>::iterator threads = (*activeThreads).begin();
247 std::list<unsigned>::iterator list_end = (*activeThreads).end();
251 if (lsqPolicy == Partitioned) {
252 maxEntries = LQEntries / active_threads;
253 } else if (lsqPolicy == Threshold && active_threads == 1) {
254 maxEntries = LQEntries;
256 maxEntries = LQEntries;
259 while (threads != list_end) {
260 resizeEntries(maxEntries,*threads++);
267 LSQ<Impl>::removeEntries(unsigned tid)
269 thread[tid].clearLQ();
270 thread[tid].clearSQ();
275 LSQ<Impl>::resizeEntries(unsigned size,unsigned tid)
277 thread[tid].resizeLQ(size);
278 thread[tid].resizeSQ(size);
285 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
287 while (active_threads != (*activeThreads).end()) {
288 unsigned tid = *active_threads++;
296 LSQ<Impl>::insertLoad(DynInstPtr &load_inst)
298 unsigned tid = load_inst->threadNumber;
300 thread[tid].insertLoad(load_inst);
305 LSQ<Impl>::insertStore(DynInstPtr &store_inst)
307 unsigned tid = store_inst->threadNumber;
309 thread[tid].insertStore(store_inst);
314 LSQ<Impl>::executeLoad(DynInstPtr &inst)
316 unsigned tid = inst->threadNumber;
318 return thread[tid].executeLoad(inst);
323 LSQ<Impl>::executeStore(DynInstPtr &inst)
325 unsigned tid = inst->threadNumber;
327 return thread[tid].executeStore(inst);
332 LSQ<Impl>::writebackStores()
334 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
336 while (active_threads != (*activeThreads).end()) {
337 unsigned tid = *active_threads++;
339 if (numStoresToWB(tid) > 0) {
340 DPRINTF(Writeback,"[tid:%i] Writing back stores. %i stores "
341 "available for Writeback.\n", tid, numStoresToWB(tid));
344 thread[tid].writebackStores();
350 LSQ<Impl>::violation()
352 /* Answers: Does Anybody Have a Violation?*/
353 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
355 while (active_threads != (*activeThreads).end()) {
356 unsigned tid = *active_threads++;
357 if (thread[tid].violation())
366 LSQ<Impl>::getCount()
370 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
372 while (active_threads != (*activeThreads).end()) {
373 unsigned tid = *active_threads++;
374 total += getCount(tid);
382 LSQ<Impl>::numLoads()
386 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
388 while (active_threads != (*activeThreads).end()) {
389 unsigned tid = *active_threads++;
390 total += numLoads(tid);
398 LSQ<Impl>::numStores()
402 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
404 while (active_threads != (*activeThreads).end()) {
405 unsigned tid = *active_threads++;
406 total += thread[tid].numStores();
414 LSQ<Impl>::numLoadsReady()
418 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
420 while (active_threads != (*activeThreads).end()) {
421 unsigned tid = *active_threads++;
422 total += thread[tid].numLoadsReady();
430 LSQ<Impl>::numFreeEntries()
434 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
436 while (active_threads != (*activeThreads).end()) {
437 unsigned tid = *active_threads++;
438 total += thread[tid].numFreeEntries();
446 LSQ<Impl>::numFreeEntries(unsigned tid)
448 //if( lsqPolicy == Dynamic )
449 //return numFreeEntries();
451 return thread[tid].numFreeEntries();
458 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
460 while (active_threads != (*activeThreads).end()) {
461 unsigned tid = *active_threads++;
462 if (! (thread[tid].lqFull() || thread[tid].sqFull()) )
471 LSQ<Impl>::isFull(unsigned tid)
473 //@todo: Change to Calculate All Entries for
475 if( lsqPolicy == Dynamic )
478 return thread[tid].lqFull() || thread[tid].sqFull();
485 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
487 while (active_threads != (*activeThreads).end()) {
488 unsigned tid = *active_threads++;
489 if (!thread[tid].lqFull())
498 LSQ<Impl>::lqFull(unsigned tid)
500 //@todo: Change to Calculate All Entries for
502 if( lsqPolicy == Dynamic )
505 return thread[tid].lqFull();
512 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
514 while (active_threads != (*activeThreads).end()) {
515 unsigned tid = *active_threads++;
525 LSQ<Impl>::sqFull(unsigned tid)
527 //@todo: Change to Calculate All Entries for
529 if( lsqPolicy == Dynamic )
532 return thread[tid].sqFull();
537 LSQ<Impl>::isStalled()
539 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
541 while (active_threads != (*activeThreads).end()) {
542 unsigned tid = *active_threads++;
543 if (!thread[tid].isStalled())
552 LSQ<Impl>::isStalled(unsigned tid)
554 if( lsqPolicy == Dynamic )
557 return thread[tid].isStalled();
562 LSQ<Impl>::hasStoresToWB()
564 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
566 if ((*activeThreads).empty())
569 while (active_threads != (*activeThreads).end()) {
570 unsigned tid = *active_threads++;
571 if (!hasStoresToWB(tid))
582 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
584 while (active_threads != (*activeThreads).end()) {
585 unsigned tid = *active_threads++;
595 LSQ<Impl>::dumpInsts()
597 std::list<unsigned>::iterator active_threads = (*activeThreads).begin();
599 while (active_threads != (*activeThreads).end()) {
600 unsigned tid = *active_threads++;
601 thread[tid].dumpInsts();