2 * Copyright (c) 2011 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Korey Sewell
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/predecoder.hh"
38 #include "arch/utility.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/resources/fetch_unit.hh"
42 #include "cpu/inorder/cpu.hh"
43 #include "cpu/inorder/pipeline_traits.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "debug/Activity.hh"
46 #include "debug/InOrderCachePort.hh"
47 #include "debug/InOrderStall.hh"
48 #include "debug/RefCount.hh"
49 #include "debug/ThreadModel.hh"
50 #include "mem/request.hh"
53 using namespace TheISA
;
54 using namespace ThePipeline
;
56 FetchUnit::FetchUnit(string res_name
, int res_id
, int res_width
,
57 int res_latency
, InOrderCPU
*_cpu
,
58 ThePipeline::Params
*params
)
59 : CacheUnit(res_name
, res_id
, res_width
, res_latency
, _cpu
, params
),
60 instSize(sizeof(TheISA::MachInst
)), fetchBuffSize(params
->fetchBuffSize
)
62 for (int tid
= 0; tid
< MaxThreads
; tid
++)
63 predecoder
[tid
] = new Predecoder(NULL
);
66 FetchUnit::~FetchUnit()
68 std::list
<FetchBlock
*>::iterator fetch_it
= fetchBuffer
.begin();
69 std::list
<FetchBlock
*>::iterator end_it
= fetchBuffer
.end();
70 while (fetch_it
!= end_it
) {
71 delete (*fetch_it
)->block
;
78 std::list
<FetchBlock
*>::iterator pend_it
= pendingFetch
.begin();
79 std::list
<FetchBlock
*>::iterator pend_end
= pendingFetch
.end();
80 while (pend_it
!= pend_end
) {
81 if ((*pend_it
)->block
) {
82 delete (*pend_it
)->block
;
92 FetchUnit::createMachInst(std::list
<FetchBlock
*>::iterator fetch_it
,
96 Addr block_addr
= cacheBlockAlign(inst
->getMemAddr());
97 Addr fetch_addr
= inst
->getMemAddr();
98 unsigned fetch_offset
= (fetch_addr
- block_addr
) / instSize
;
99 ThreadID tid
= inst
->readTid();
100 TheISA::PCState instPC
= inst
->pcState();
103 DPRINTF(InOrderCachePort
, "Creating instruction [sn:%i] w/fetch data @"
104 "addr:%08p block:%08p\n", inst
->seqNum
, fetch_addr
, block_addr
);
106 assert((*fetch_it
)->valid
);
108 TheISA::MachInst
*fetchInsts
=
109 reinterpret_cast<TheISA::MachInst
*>((*fetch_it
)->block
);
112 TheISA::gtoh(fetchInsts
[fetch_offset
]);
114 predecoder
[tid
]->setTC(cpu
->thread
[tid
]->getTC());
115 predecoder
[tid
]->moreBytes(instPC
, inst
->instAddr(), mach_inst
);
116 assert(predecoder
[tid
]->extMachInstReady());
117 ext_inst
= predecoder
[tid
]->getExtMachInst(instPC
);
119 inst
->pcState(instPC
);
120 inst
->setStaticInst(decoder
.decode(ext_inst
, instPC
.instAddr()));
124 FetchUnit::removeAddrDependency(DynInstPtr inst
)
126 inst
->unsetMemAddr();
130 FetchUnit::getRequest(DynInstPtr inst
, int stage_num
, int res_idx
,
131 int slot_num
, unsigned cmd
)
133 ScheduleEntry
* sched_entry
= *inst
->curSkedEntry
;
134 CacheRequest
* cache_req
= dynamic_cast<CacheRequest
*>(reqs
[slot_num
]);
136 if (!inst
->validMemAddr()) {
137 panic("Mem. Addr. must be set before requesting cache access\n");
140 assert(sched_entry
->cmd
== InitiateFetch
);
142 DPRINTF(InOrderCachePort
,
143 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
144 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
146 cache_req
->setRequest(inst
, stage_num
, id
, slot_num
,
147 sched_entry
->cmd
, MemCmd::ReadReq
,
148 inst
->curSkedEntry
->idx
);
154 FetchUnit::setupMemRequest(DynInstPtr inst
, CacheReqPtr cache_req
,
155 int acc_size
, int flags
)
157 ThreadID tid
= inst
->readTid();
158 Addr aligned_addr
= cacheBlockAlign(inst
->getMemAddr());
159 if (cache_req
->memReq
== NULL
) {
161 new Request(tid
, aligned_addr
, acc_size
, flags
,
162 cpu
->instMasterId(), inst
->instAddr(), cpu
->readCpuId(),
164 DPRINTF(InOrderCachePort
, "[sn:%i] Created memReq @%x, ->%x\n",
165 inst
->seqNum
, &cache_req
->memReq
, cache_req
->memReq
);
169 std::list
<FetchUnit::FetchBlock
*>::iterator
170 FetchUnit::findBlock(std::list
<FetchBlock
*> &fetch_blocks
, int asid
,
173 std::list
<FetchBlock
*>::iterator fetch_it
= fetch_blocks
.begin();
174 std::list
<FetchBlock
*>::iterator end_it
= fetch_blocks
.end();
176 while (fetch_it
!= end_it
) {
177 if ((*fetch_it
)->asid
== asid
&&
178 (*fetch_it
)->addr
== block_addr
) {
188 std::list
<FetchUnit::FetchBlock
*>::iterator
189 FetchUnit::findReplacementBlock()
191 std::list
<FetchBlock
*>::iterator fetch_it
= fetchBuffer
.begin();
192 std::list
<FetchBlock
*>::iterator end_it
= fetchBuffer
.end();
194 while (fetch_it
!= end_it
) {
195 if ((*fetch_it
)->cnt
== 0) {
198 DPRINTF(InOrderCachePort
, "Block %08p has %i insts pending.\n",
199 (*fetch_it
)->addr
, (*fetch_it
)->cnt
);
208 FetchUnit::markBlockUsed(std::list
<FetchBlock
*>::iterator block_it
)
210 // Move block from whatever location it is in fetch buffer
211 // to the back (represents most-recently-used location)
212 if (block_it
!= fetchBuffer
.end()) {
213 FetchBlock
*mru_blk
= *block_it
;
214 fetchBuffer
.erase(block_it
);
215 fetchBuffer
.push_back(mru_blk
);
220 FetchUnit::blocksInUse()
222 std::list
<FetchBlock
*>::iterator fetch_it
= fetchBuffer
.begin();
223 std::list
<FetchBlock
*>::iterator end_it
= fetchBuffer
.end();
226 while (fetch_it
!= end_it
) {
227 if ((*fetch_it
)->cnt
> 0)
237 FetchUnit::clearFetchBuffer()
239 std::list
<FetchBlock
*>::iterator fetch_it
= fetchBuffer
.begin();
240 std::list
<FetchBlock
*>::iterator end_it
= fetchBuffer
.end();
242 while (fetch_it
!= end_it
) {
243 if ((*fetch_it
)->block
) {
244 delete [] (*fetch_it
)->block
;
253 FetchUnit::execute(int slot_num
)
255 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(reqs
[slot_num
]);
258 if (cachePortBlocked
&& cache_req
->cmd
== InitiateFetch
) {
259 DPRINTF(InOrderCachePort
, "Cache Port Blocked. Cannot Access\n");
260 cache_req
->done(false);
264 DynInstPtr inst
= cache_req
->inst
;
265 ThreadID tid
= inst
->readTid();
266 Addr block_addr
= cacheBlockAlign(inst
->getMemAddr());
267 int asid
= cpu
->asid
[tid
];
269 if (inst
->fault
!= NoFault
) {
270 DPRINTF(InOrderCachePort
,
271 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
272 "next stage.\n", tid
, inst
->seqNum
, inst
->fault
->name(),
273 cacheBlockAlign(inst
->getMemAddr()));
274 finishCacheUnitReq(inst
, cache_req
);
278 switch (cache_req
->cmd
)
282 // Check to see if we've already got this request buffered
283 // or pending to be buffered
284 bool do_fetch
= true;
285 int total_pending
= pendingFetch
.size() + blocksInUse();
287 std::list
<FetchBlock
*>::iterator pending_it
;
288 pending_it
= findBlock(pendingFetch
, asid
, block_addr
);
289 if (pending_it
!= pendingFetch
.end()) {
290 (*pending_it
)->cnt
++;
293 DPRINTF(InOrderCachePort
, "%08p is a pending fetch block "
294 "(pending:%i).\n", block_addr
,
296 } else if (total_pending
< fetchBuffSize
) {
297 std::list
<FetchBlock
*>::iterator buff_it
;
298 buff_it
= findBlock(fetchBuffer
, asid
, block_addr
);
299 if (buff_it
!= fetchBuffer
.end()) {
303 DPRINTF(InOrderCachePort
, "%08p is in fetch buffer "
304 "(pending:%i).\n", block_addr
, (*buff_it
)->cnt
);
309 DPRINTF(InOrderCachePort
, "Inst. [sn:%i] marked to be filled "
310 "through fetch buffer.\n", inst
->seqNum
);
311 cache_req
->fetchBufferFill
= true;
312 cache_req
->setCompleted(true);
316 // Check to see if there is room in the fetchbuffer for this instruction.
317 // If not, block this request.
318 if (total_pending
>= fetchBuffSize
) {
319 DPRINTF(InOrderCachePort
, "No room available in fetch buffer.\n");
320 cache_req
->done(false);
324 doTLBAccess(inst
, cache_req
, cacheBlkSize
, Request::INST_FETCH
, TheISA::TLB::Execute
);
326 if (inst
->fault
== NoFault
) {
327 DPRINTF(InOrderCachePort
,
328 "[tid:%u]: Initiating fetch access to %s for "
329 "addr:%#x (block:%#x)\n", tid
, name(),
330 cache_req
->inst
->getMemAddr(), block_addr
);
332 cache_req
->reqData
= new uint8_t[cacheBlkSize
];
334 inst
->setCurResSlot(slot_num
);
338 if (cache_req
->isMemAccPending()) {
339 pendingFetch
.push_back(new FetchBlock(asid
, block_addr
));
341 // mark replacement block
349 if (inst
->fault
!= NoFault
) {
350 DPRINTF(InOrderCachePort
,
351 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
352 "next stage.\n", tid
, inst
->seqNum
, inst
->fault
->name(),
354 finishCacheUnitReq(inst
, cache_req
);
358 if (cache_req
->fetchBufferFill
) {
359 // Block request if it's depending on a previous fetch, but it hasnt made it yet
360 std::list
<FetchBlock
*>::iterator fetch_it
= findBlock(fetchBuffer
, asid
, block_addr
);
361 if (fetch_it
== fetchBuffer
.end()) {
362 DPRINTF(InOrderCachePort
, "%#x not available yet\n",
364 cache_req
->setCompleted(false);
368 // Make New Instruction
369 createMachInst(fetch_it
, inst
);
370 if (inst
->traceData
) {
371 inst
->traceData
->setStaticInst(inst
->staticInst
);
372 inst
->traceData
->setPC(inst
->pcState());
375 // FetchBuffer Book-Keeping
377 assert((*fetch_it
)->cnt
>= 0);
378 markBlockUsed(fetch_it
);
384 if (cache_req
->isMemAccComplete()) {
385 if (fetchBuffer
.size() >= fetchBuffSize
) {
386 // If there is no replacement block, then we'll just have
387 // to wait till that gets cleared before satisfying the fetch
388 // for this instruction
389 std::list
<FetchBlock
*>::iterator repl_it
=
390 findReplacementBlock();
391 if (repl_it
== fetchBuffer
.end()) {
392 DPRINTF(InOrderCachePort
, "Unable to find replacement block"
393 " and complete fetch.\n");
394 cache_req
->setCompleted(false);
398 delete [] (*repl_it
)->block
;
400 fetchBuffer
.erase(repl_it
);
403 DPRINTF(InOrderCachePort
,
404 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
407 // Make New Instruction
408 std::list
<FetchBlock
*>::iterator fetch_it
=
409 findBlock(pendingFetch
, asid
, block_addr
);
411 assert(fetch_it
!= pendingFetch
.end());
412 assert((*fetch_it
)->valid
);
414 createMachInst(fetch_it
, inst
);
415 if (inst
->traceData
) {
416 inst
->traceData
->setStaticInst(inst
->staticInst
);
417 inst
->traceData
->setPC(inst
->pcState());
421 // Update instructions waiting on new fetch block
422 FetchBlock
*new_block
= (*fetch_it
);
424 assert(new_block
->cnt
>= 0);
426 // Finally, update FetchBuffer w/Pending Block into the
428 pendingFetch
.erase(fetch_it
);
429 fetchBuffer
.push_back(new_block
);
431 DPRINTF(InOrderCachePort
, "[tid:%i]: Instruction [sn:%i] is: %s\n",
433 inst
->staticInst
->disassemble(inst
->instAddr()));
435 inst
->unsetMemAddr();
439 DPRINTF(InOrderCachePort
,
440 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
442 DPRINTF(InOrderStall
,
443 "STALL: [tid:%i]: Fetch miss from %08p\n",
444 tid
, cache_req
->inst
->instAddr());
445 cache_req
->setCompleted(false);
446 // NOTE: For SwitchOnCacheMiss ThreadModel, we *don't* switch on
447 // fetch miss, but we could ...
448 // cache_req->setMemStall(true);
453 fatal("Unrecognized command to %s", resName
);
458 FetchUnit::processCacheCompletion(PacketPtr pkt
)
460 // Cast to correct packet type
461 // @todo: use pkt Sender state here to be consistent with other
463 CacheReqPacket
* cache_pkt
= dynamic_cast<CacheReqPacket
*>(pkt
);
466 DPRINTF(InOrderCachePort
, "Finished request for %x\n",
467 cache_pkt
->getAddr());
469 if (processSquash(cache_pkt
))
472 Addr block_addr
= cacheBlockAlign(cache_pkt
->cacheReq
->
473 getInst()->getMemAddr());
475 DPRINTF(InOrderCachePort
,
476 "[tid:%u]: [sn:%i]: Waking from fetch access to addr:%#x(phys:%#x), size:%i\n",
477 cache_pkt
->cacheReq
->getInst()->readTid(),
478 cache_pkt
->cacheReq
->getInst()->seqNum
,
479 block_addr
, cache_pkt
->getAddr(), cache_pkt
->getSize());
481 // Cast to correct request type
482 CacheRequest
*cache_req
= dynamic_cast<CacheReqPtr
>(
483 findRequest(cache_pkt
->cacheReq
->getInst(), cache_pkt
->instIdx
));
486 panic("[tid:%u]: [sn:%i]: Can't find slot for fetch access to "
487 "addr. %08p\n", cache_pkt
->cacheReq
->getInst()->readTid(),
488 cache_pkt
->cacheReq
->getInst()->seqNum
,
492 // Get resource request info
493 unsigned stage_num
= cache_req
->getStageNum();
494 DynInstPtr inst
= cache_req
->inst
;
495 ThreadID tid
= cache_req
->inst
->readTid();
496 short asid
= cpu
->asid
[tid
];
498 assert(!cache_req
->isSquashed());
499 assert(inst
->curSkedEntry
->cmd
== CompleteFetch
);
501 DPRINTF(InOrderCachePort
,
502 "[tid:%u]: [sn:%i]: Processing fetch access for block %#x\n",
503 tid
, inst
->seqNum
, block_addr
);
505 std::list
<FetchBlock
*>::iterator pend_it
= findBlock(pendingFetch
, asid
,
507 assert(pend_it
!= pendingFetch
.end());
509 // Copy Data to pendingFetch queue...
510 (*pend_it
)->block
= new uint8_t[cacheBlkSize
];
511 memcpy((*pend_it
)->block
, cache_pkt
->getPtr
<uint8_t>(), cacheBlkSize
);
512 (*pend_it
)->valid
= true;
514 cache_req
->setMemAccPending(false);
515 cache_req
->setMemAccCompleted();
517 if (cache_req
->isMemStall() &&
518 cpu
->threadModel
== InOrderCPU::SwitchOnCacheMiss
) {
519 DPRINTF(InOrderCachePort
, "[tid:%u] Waking up from Cache Miss.\n",
522 cpu
->activateContext(tid
);
524 DPRINTF(ThreadModel
, "Activating [tid:%i] after return from cache"
528 // Wake up the CPU (if it went to sleep and was waiting on this
529 // completion event).
532 DPRINTF(Activity
, "[tid:%u] Activating %s due to cache completion\n",
533 tid
, cpu
->pipelineStage
[stage_num
]->name());
535 cpu
->switchToActive(stage_num
);
539 FetchUnit::squashCacheRequest(CacheReqPtr req_ptr
)
541 DynInstPtr inst
= req_ptr
->getInst();
542 ThreadID tid
= inst
->readTid();
543 Addr block_addr
= cacheBlockAlign(inst
->getMemAddr());
544 int asid
= cpu
->asid
[tid
];
546 // Check Fetch Buffer (or pending fetch) for this block and
547 // update pending counts
548 std::list
<FetchBlock
*>::iterator buff_it
= findBlock(fetchBuffer
,
551 if (buff_it
!= fetchBuffer
.end()) {
553 DPRINTF(InOrderCachePort
, "[sn:%i] Removing Pending Access "
554 "for Fetch Buffer block %08p (cnt=%i)\n", inst
->seqNum
,
555 block_addr
, (*buff_it
)->cnt
);
556 assert((*buff_it
)->cnt
>= 0);
558 std::list
<FetchBlock
*>::iterator block_it
= findBlock(pendingFetch
,
561 if (block_it
!= pendingFetch
.end()) {
563 DPRINTF(InOrderCachePort
, "[sn:%i] Removing Pending Access "
564 "for Pending Buffer Block %08p (cnt=%i)\n",
566 block_addr
, (*block_it
)->cnt
);
567 assert((*block_it
)->cnt
>= 0);
568 if ((*block_it
)->cnt
== 0) {
569 if ((*block_it
)->block
) {
570 delete [] (*block_it
)->block
;
573 pendingFetch
.erase(block_it
);
578 CacheUnit::squashCacheRequest(req_ptr
);
582 FetchUnit::trap(Fault fault
, ThreadID tid
, DynInstPtr inst
)
585 predecoder
[tid
]->reset();
587 //@todo: squash using dummy inst seq num
588 squash(NULL
, NumStages
- 1, 0, tid
);
590 //@todo: make sure no blocks are in use
591 assert(blocksInUse() == 0);
592 assert(pendingFetch
.size() == 0);
594 //@todo: clear pendingFetch and fetchBuffer