2 * Copyright (c) 2011 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Korey Sewell
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/utility.hh"
38 #include "arch/predecoder.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/resources/fetch_unit.hh"
42 #include "cpu/inorder/pipeline_traits.hh"
43 #include "cpu/inorder/cpu.hh"
44 #include "cpu/inorder/resource_pool.hh"
45 #include "mem/request.hh"
48 using namespace TheISA
;
49 using namespace ThePipeline
;
51 FetchUnit::FetchUnit(string res_name
, int res_id
, int res_width
,
52 int res_latency
, InOrderCPU
*_cpu
,
53 ThePipeline::Params
*params
)
54 : CacheUnit(res_name
, res_id
, res_width
, res_latency
, _cpu
, params
),
55 instSize(sizeof(TheISA::MachInst
)), fetchBuffSize(params
->fetchBuffSize
),
60 FetchUnit::createMachInst(std::list
<FetchBlock
*>::iterator fetch_it
,
64 Addr block_addr
= cacheBlockAlign(inst
->getMemAddr());
65 Addr fetch_addr
= inst
->getMemAddr();
66 unsigned fetch_offset
= (fetch_addr
- block_addr
) / instSize
;
67 ThreadID tid
= inst
->readTid();
68 TheISA::PCState instPC
= inst
->pcState();
71 DPRINTF(InOrderCachePort
, "Creating instruction [sn:%i] w/fetch data @"
72 "addr:%08p block:%08p\n", inst
->seqNum
, fetch_addr
, block_addr
);
74 assert((*fetch_it
)->valid
);
76 TheISA::MachInst
*fetchInsts
=
77 reinterpret_cast<TheISA::MachInst
*>((*fetch_it
)->block
);
80 TheISA::gtoh(fetchInsts
[fetch_offset
]);
82 predecoder
.setTC(cpu
->thread
[tid
]->getTC());
83 predecoder
.moreBytes(instPC
, inst
->instAddr(), mach_inst
);
84 ext_inst
= predecoder
.getExtMachInst(instPC
);
86 inst
->pcState(instPC
);
87 inst
->setMachInst(ext_inst
);
91 FetchUnit::getSlot(DynInstPtr inst
)
93 if (tlbBlocked
[inst
->threadNumber
]) {
97 if (!inst
->validMemAddr()) {
98 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
99 "cache access\n", inst
->readTid(), inst
->seqNum
);
102 int new_slot
= Resource::getSlot(inst
);
107 inst
->memTime
= curTick();
112 FetchUnit::removeAddrDependency(DynInstPtr inst
)
114 inst
->unsetMemAddr();
118 FetchUnit::getRequest(DynInstPtr inst
, int stage_num
, int res_idx
,
119 int slot_num
, unsigned cmd
)
121 ScheduleEntry
* sched_entry
= inst
->resSched
.top();
123 if (!inst
->validMemAddr()) {
124 panic("Mem. Addr. must be set before requesting cache access\n");
127 MemCmd::Command pkt_cmd
;
129 switch (sched_entry
->cmd
)
132 pkt_cmd
= MemCmd::ReadReq
;
134 DPRINTF(InOrderCachePort
,
135 "[tid:%i]: Fetch request from [sn:%i] for addr %08p\n",
136 inst
->readTid(), inst
->seqNum
, inst
->getMemAddr());
140 panic("%i: Unexpected request type (%i) to %s", curTick(),
141 sched_entry
->cmd
, name());
144 return new CacheRequest(this, inst
, stage_num
, id
, slot_num
,
145 sched_entry
->cmd
, 0, pkt_cmd
,
146 0/*flags*/, this->cpu
->readCpuId(),
147 inst
->resSched
.top()->idx
);
151 FetchUnit::setupMemRequest(DynInstPtr inst
, CacheReqPtr cache_req
,
152 int acc_size
, int flags
)
154 ThreadID tid
= inst
->readTid();
155 Addr aligned_addr
= cacheBlockAlign(inst
->getMemAddr());
158 new Request(tid
, aligned_addr
, acc_size
, flags
,
159 inst
->instAddr(), cpu
->readCpuId(), tid
);
161 cache_req
->memReq
= inst
->fetchMemReq
;
164 std::list
<FetchUnit::FetchBlock
*>::iterator
165 FetchUnit::findBlock(std::list
<FetchBlock
*> &fetch_blocks
, int asid
,
168 std::list
<FetchBlock
*>::iterator fetch_it
= fetch_blocks
.begin();
169 std::list
<FetchBlock
*>::iterator end_it
= fetch_blocks
.end();
171 while (fetch_it
!= end_it
) {
172 if ((*fetch_it
)->asid
== asid
&&
173 (*fetch_it
)->addr
== block_addr
) {
183 std::list
<FetchUnit::FetchBlock
*>::iterator
184 FetchUnit::findReplacementBlock()
186 std::list
<FetchBlock
*>::iterator fetch_it
= fetchBuffer
.begin();
187 std::list
<FetchBlock
*>::iterator end_it
= fetchBuffer
.end();
189 while (fetch_it
!= end_it
) {
190 if ((*fetch_it
)->cnt
== 0) {
193 DPRINTF(InOrderCachePort
, "Block %08p has %i insts pending.\n",
194 (*fetch_it
)->addr
, (*fetch_it
)->cnt
);
203 FetchUnit::markBlockUsed(std::list
<FetchBlock
*>::iterator block_it
)
205 // Move block from whatever location it is in fetch buffer
206 // to the back (represents most-recently-used location)
207 if (block_it
!= fetchBuffer
.end()) {
208 FetchBlock
*mru_blk
= *block_it
;
209 fetchBuffer
.erase(block_it
);
210 fetchBuffer
.push_back(mru_blk
);
215 FetchUnit::execute(int slot_num
)
217 CacheReqPtr cache_req
= dynamic_cast<CacheReqPtr
>(reqMap
[slot_num
]);
220 if (cachePortBlocked
) {
221 DPRINTF(InOrderCachePort
, "Cache Port Blocked. Cannot Access\n");
222 cache_req
->setCompleted(false);
226 DynInstPtr inst
= cache_req
->inst
;
227 ThreadID tid
= inst
->readTid();
228 Addr block_addr
= cacheBlockAlign(inst
->getMemAddr());
229 int asid
= cpu
->asid
[tid
];
230 cache_req
->fault
= NoFault
;
232 switch (cache_req
->cmd
)
236 // Check to see if we've already got this request buffered
237 // or pending to be buffered
238 bool do_fetch
= true;
239 std::list
<FetchBlock
*>::iterator pending_it
;
240 pending_it
= findBlock(pendingFetch
, asid
, block_addr
);
241 if (pending_it
!= pendingFetch
.end()) {
242 (*pending_it
)->cnt
++;
245 DPRINTF(InOrderCachePort
, "%08p is a pending fetch block "
246 "(pending:%i).\n", block_addr
,
248 } else if (pendingFetch
.size() < fetchBuffSize
) {
249 std::list
<FetchBlock
*>::iterator buff_it
;
250 buff_it
= findBlock(fetchBuffer
, asid
, block_addr
);
251 if (buff_it
!= fetchBuffer
.end()) {
255 DPRINTF(InOrderCachePort
, "%08p is in fetch buffer"
256 "(pending:%i).\n", block_addr
, (*buff_it
)->cnt
);
261 DPRINTF(InOrderCachePort
, "Inst. [sn:%i] marked to be filled "
262 "through fetch buffer.\n", inst
->seqNum
);
263 cache_req
->fetchBufferFill
= true;
264 cache_req
->setCompleted(true);
268 // Check to see if there is room in the fetchbuffer for this instruction.
269 // If not, block this request.
270 if (pendingFetch
.size() >= fetchBuffSize
) {
271 DPRINTF(InOrderCachePort
, "No room available in fetch buffer.\n");
272 cache_req
->setCompleted(false);
276 doTLBAccess(inst
, cache_req
, cacheBlkSize
, 0, TheISA::TLB::Execute
);
278 if (cache_req
->fault
== NoFault
) {
279 DPRINTF(InOrderCachePort
,
280 "[tid:%u]: Initiating fetch access to %s for "
281 "addr:%#x (block:%#x)\n", tid
, name(),
282 cache_req
->inst
->getMemAddr(), block_addr
);
284 cache_req
->reqData
= new uint8_t[cacheBlkSize
];
286 inst
->setCurResSlot(slot_num
);
290 if (cache_req
->isMemAccPending()) {
291 pendingFetch
.push_back(new FetchBlock(asid
, block_addr
));
299 if (cache_req
->fetchBufferFill
) {
300 // Block request if it's depending on a previous fetch, but it hasnt made it yet
301 std::list
<FetchBlock
*>::iterator fetch_it
= findBlock(fetchBuffer
, asid
, block_addr
);
302 if (fetch_it
== fetchBuffer
.end()) {
303 DPRINTF(InOrderCachePort
, "%#x not available yet\n",
305 cache_req
->setCompleted(false);
309 // Make New Instruction
310 createMachInst(fetch_it
, inst
);
311 if (inst
->traceData
) {
312 inst
->traceData
->setStaticInst(inst
->staticInst
);
313 inst
->traceData
->setPC(inst
->pcState());
316 // FetchBuffer Book-Keeping
318 assert((*fetch_it
)->cnt
>= 0);
319 markBlockUsed(fetch_it
);
325 if (cache_req
->isMemAccComplete()) {
326 if (fetchBuffer
.size() >= fetchBuffSize
) {
327 // If there is no replacement block, then we'll just have
328 // to wait till that gets cleared before satisfying the fetch
329 // for this instruction
330 std::list
<FetchBlock
*>::iterator repl_it
=
331 findReplacementBlock();
332 if (repl_it
== fetchBuffer
.end()) {
333 DPRINTF(InOrderCachePort
, "Unable to find replacement block"
334 " and complete fetch.\n");
335 cache_req
->setCompleted(false);
339 fetchBuffer
.erase(repl_it
);
342 DPRINTF(InOrderCachePort
,
343 "[tid:%i]: Completing Fetch Access for [sn:%i]\n",
346 // Make New Instruction
347 std::list
<FetchBlock
*>::iterator fetch_it
=
348 findBlock(pendingFetch
, asid
, block_addr
);
350 assert(fetch_it
!= pendingFetch
.end());
351 assert((*fetch_it
)->valid
);
353 createMachInst(fetch_it
, inst
);
354 if (inst
->traceData
) {
355 inst
->traceData
->setStaticInst(inst
->staticInst
);
356 inst
->traceData
->setPC(inst
->pcState());
360 // Update instructions waiting on new fetch block
361 FetchBlock
*new_block
= (*fetch_it
);
363 assert(new_block
->cnt
>= 0);
365 // Finally, update FetchBuffer w/Pending Block into the
367 pendingFetch
.erase(fetch_it
);
368 fetchBuffer
.push_back(new_block
);
370 DPRINTF(InOrderCachePort
, "[tid:%i]: Instruction [sn:%i] is: %s\n",
372 inst
->staticInst
->disassemble(inst
->instAddr()));
374 inst
->unsetMemAddr();
376 delete cache_req
->dataPkt
;
380 DPRINTF(InOrderCachePort
,
381 "[tid:%i]: [sn:%i]: Unable to Complete Fetch Access\n",
383 DPRINTF(InOrderStall
,
384 "STALL: [tid:%i]: Fetch miss from %08p\n",
385 tid
, cache_req
->inst
->instAddr());
386 cache_req
->setCompleted(false);
387 // NOTE: For SwitchOnCacheMiss ThreadModel, we *don't* switch on
388 // fetch miss, but we could ...
389 // cache_req->setMemStall(true);
394 fatal("Unrecognized command to %s", resName
);
399 FetchUnit::processCacheCompletion(PacketPtr pkt
)
401 // Cast to correct packet type
402 CacheReqPacket
* cache_pkt
= dynamic_cast<CacheReqPacket
*>(pkt
);
405 if (cache_pkt
->cacheReq
->isSquashed()) {
406 DPRINTF(InOrderCachePort
,
407 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
408 cache_pkt
->cacheReq
->getInst()->readTid(),
409 cache_pkt
->cacheReq
->getInst()->seqNum
);
411 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
412 cache_pkt
->cacheReq
->getTid(),
413 cache_pkt
->cacheReq
->seqNum
);
415 cache_pkt
->cacheReq
->done();
422 Addr block_addr
= cacheBlockAlign(cache_pkt
->cacheReq
->
423 getInst()->getMemAddr());
425 DPRINTF(InOrderCachePort
,
426 "[tid:%u]: [sn:%i]: Waking from fetch access to addr:%#x(phys:%#x), size:%i\n",
427 cache_pkt
->cacheReq
->getInst()->readTid(),
428 cache_pkt
->cacheReq
->getInst()->seqNum
,
429 block_addr
, cache_pkt
->getAddr(), cache_pkt
->getSize());
431 // Cast to correct request type
432 CacheRequest
*cache_req
= dynamic_cast<CacheReqPtr
>(
433 findRequest(cache_pkt
->cacheReq
->getInst(), cache_pkt
->instIdx
));
436 panic("[tid:%u]: [sn:%i]: Can't find slot for fetch access to "
437 "addr. %08p\n", cache_pkt
->cacheReq
->getInst()->readTid(),
438 cache_pkt
->cacheReq
->getInst()->seqNum
,
442 // Get resource request info
443 unsigned stage_num
= cache_req
->getStageNum();
444 DynInstPtr inst
= cache_req
->inst
;
445 ThreadID tid
= cache_req
->inst
->readTid();
446 short asid
= cpu
->asid
[tid
];
448 assert(!cache_req
->isSquashed());
449 assert(inst
->resSched
.top()->cmd
== CompleteFetch
);
451 DPRINTF(InOrderCachePort
,
452 "[tid:%u]: [sn:%i]: Processing fetch access for block %#x\n",
453 tid
, inst
->seqNum
, block_addr
);
455 std::list
<FetchBlock
*>::iterator pend_it
= findBlock(pendingFetch
, asid
,
457 assert(pend_it
!= pendingFetch
.end());
459 // Copy Data to pendingFetch queue...
460 (*pend_it
)->block
= new uint8_t[cacheBlkSize
];
461 memcpy((*pend_it
)->block
, cache_pkt
->getPtr
<uint8_t>(), cacheBlkSize
);
462 (*pend_it
)->valid
= true;
464 cache_req
->setMemAccPending(false);
465 cache_req
->setMemAccCompleted();
467 if (cache_req
->isMemStall() &&
468 cpu
->threadModel
== InOrderCPU::SwitchOnCacheMiss
) {
469 DPRINTF(InOrderCachePort
, "[tid:%u] Waking up from Cache Miss.\n",
472 cpu
->activateContext(tid
);
474 DPRINTF(ThreadModel
, "Activating [tid:%i] after return from cache"
478 // Wake up the CPU (if it went to sleep and was waiting on this
479 // completion event).
482 DPRINTF(Activity
, "[tid:%u] Activating %s due to cache completion\n",
483 tid
, cpu
->pipelineStage
[stage_num
]->name());
485 cpu
->switchToActive(stage_num
);
489 FetchUnit::squashCacheRequest(CacheReqPtr req_ptr
)
491 DynInstPtr inst
= req_ptr
->getInst();
492 ThreadID tid
= inst
->readTid();
493 Addr block_addr
= cacheBlockAlign(inst
->getMemAddr());
494 int asid
= cpu
->asid
[tid
];
496 // Check Fetch Buffer (or pending fetch) for this block and
497 // update pending counts
498 std::list
<FetchBlock
*>::iterator buff_it
= findBlock(fetchBuffer
,
501 if (buff_it
!= fetchBuffer
.end()) {
503 DPRINTF(InOrderCachePort
, "[sn:%i] Removing Pending Fetch "
504 "for Buffer block %08p (cnt=%i)\n", inst
->seqNum
,
505 block_addr
, (*buff_it
)->cnt
);
507 std::list
<FetchBlock
*>::iterator block_it
= findBlock(pendingFetch
,
510 if (block_it
!= pendingFetch
.end()) {
512 if ((*block_it
)->cnt
== 0) {
513 DPRINTF(InOrderCachePort
, "[sn:%i] Removing Pending Fetch "
514 "for block %08p (cnt=%i)\n", inst
->seqNum
,
515 block_addr
, (*block_it
)->cnt
);
516 pendingFetch
.erase(block_it
);
521 CacheUnit::squashCacheRequest(req_ptr
);