inorder: cleanup dprintfs in cache unit
[gem5.git] / src / cpu / inorder / resources / cache_unit.cc
1 /*
2 * Copyright (c) 2007 MIPS Technologies, Inc.
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Korey Sewell
29 *
30 */
31
32 #include <list>
33 #include <vector>
34
35 #include "arch/isa_traits.hh"
36 #include "arch/locked_mem.hh"
37 #include "arch/predecoder.hh"
38 #include "arch/utility.hh"
39 #include "config/the_isa.hh"
40 #include "cpu/inorder/resources/cache_unit.hh"
41 #include "cpu/inorder/cpu.hh"
42 #include "cpu/inorder/pipeline_traits.hh"
43 #include "cpu/inorder/resource_pool.hh"
44 #include "debug/Activity.hh"
45 #include "debug/AddrDep.hh"
46 #include "debug/InOrderCachePort.hh"
47 #include "debug/InOrderStall.hh"
48 #include "debug/InOrderTLB.hh"
49 #include "debug/LLSC.hh"
50 #include "debug/RefCount.hh"
51 #include "debug/ThreadModel.hh"
52 #include "mem/request.hh"
53
54 using namespace std;
55 using namespace TheISA;
56 using namespace ThePipeline;
57
58 #if TRACING_ON
59 static std::string
60 printMemData(uint8_t *data, unsigned size)
61 {
62 std::stringstream dataStr;
63 for (unsigned pos = 0; pos < size; pos++) {
64 ccprintf(dataStr, "%02x", data[pos]);
65 }
66 return dataStr.str();
67 }
68 #endif
69
70 Tick
71 CacheUnit::CachePort::recvAtomic(PacketPtr pkt)
72 {
73 panic("%s doesn't expect recvAtomic callback!", cachePortUnit->name());
74 return curTick();
75 }
76
77 void
78 CacheUnit::CachePort::recvFunctional(PacketPtr pkt)
79 {
80 DPRINTF(InOrderCachePort, "Doesn't update state on a recvFunctional."
81 "Ignoring packet for %x.\n", pkt->getAddr());
82 }
83
84 void
85 CacheUnit::CachePort::recvStatusChange(Status status)
86 {
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("CacheUnit::CachePort doesn't expect recvStatusChange callback!");
96 }
97
98 bool
99 CacheUnit::CachePort::recvTiming(Packet *pkt)
100 {
101 if (pkt->isError())
102 DPRINTF(InOrderCachePort, "Got error packet back for address: %x\n",
103 pkt->getAddr());
104 else if (pkt->isResponse())
105 cachePortUnit->processCacheCompletion(pkt);
106 else {
107 //@note: depending on consistency model, update here
108 DPRINTF(InOrderCachePort, "Received snoop pkt %x,Ignoring\n", pkt->getAddr());
109 }
110
111 return true;
112 }
113
114 void
115 CacheUnit::CachePort::recvRetry()
116 {
117 cachePortUnit->recvRetry();
118 }
119
120 CacheUnit::CacheUnit(string res_name, int res_id, int res_width,
121 int res_latency, InOrderCPU *_cpu, ThePipeline::Params *params)
122 : Resource(res_name, res_id, res_width, res_latency, _cpu),
123 cachePortBlocked(false)
124 {
125 cachePort = new CachePort(this);
126
127 // Hard-Code Selection For Now
128 if (res_name == "icache_port")
129 _tlb = params->itb;
130 else if (res_name == "dcache_port")
131 _tlb = params->dtb;
132 else
133 fatal("Unrecognized TLB name passed by user");
134
135 for (int i=0; i < MaxThreads; i++) {
136 tlbBlocked[i] = false;
137 tlbBlockSeqNum[i] = 0;
138 }
139 }
140
141 TheISA::TLB*
142 CacheUnit::tlb()
143 {
144 return _tlb;
145
146 }
147
148 void
149 CacheUnit::CachePort::setPeer(Port *port)
150 {
151 Port::setPeer(port);
152
153 #if FULL_SYSTEM
154 // Update the ThreadContext's memory ports (Functional/Virtual
155 // Ports)
156 if (cachePortUnit->resName == "dcache_port") {
157 cachePortUnit->cpu->updateMemPorts();
158 }
159
160 #endif
161 }
162
163 Port *
164 CacheUnit::getPort(const string &if_name, int idx)
165 {
166 if (if_name == resName)
167 return cachePort;
168 else
169 return NULL;
170 }
171
172 void
173 CacheUnit::init()
174 {
175 for (int i = 0; i < width; i++) {
176 reqs[i] = new CacheRequest(this);
177 }
178
179 cacheBlkSize = this->cachePort->peerBlockSize();
180 cacheBlkMask = cacheBlkSize - 1;
181
182 initSlots();
183 }
184
185 int
186 CacheUnit::getSlot(DynInstPtr inst)
187 {
188 ThreadID tid = inst->readTid();
189 if (tlbBlocked[tid]) {
190 return -1;
191 }
192
193 // For a Split-Load, the instruction would have processed once already
194 // causing the address to be unset.
195 if (!inst->validMemAddr() && !inst->splitInst) {
196 panic("[tid:%i][sn:%i] Mem. Addr. must be set before requesting "
197 "cache access\n", inst->readTid(), inst->seqNum);
198 }
199
200 int new_slot = Resource::getSlot(inst);
201 inst->memTime = curTick();
202 //@note: add back in if you want speculative loads/store capability
203 //setAddrDependency(inst);
204 return new_slot;
205 }
206
207 void
208 CacheUnit::setAddrDependency(DynInstPtr inst)
209 {
210 Addr req_addr = inst->getMemAddr();
211 ThreadID tid = inst->readTid();
212
213 addrList[tid].push_back(req_addr);
214 addrMap[tid][req_addr] = inst->seqNum;
215
216 DPRINTF(AddrDep,
217 "[tid:%i]: [sn:%i]: Address %08p added to dependency list (size=%i)\n",
218 inst->readTid(), inst->seqNum, req_addr, addrList[tid].size());
219
220 //@NOTE: 10 is an arbitrarily "high" number, but to be exact
221 // we would need to know the # of outstanding accesses
222 // a priori. Information like fetch width, stage width,
223 // fetch buffer, and the branch resolution stage would be
224 // useful for the icache_port. For the dcache port, the #
225 // of outstanding cache accesses (mshrs) would be a good
226 // sanity check here.
227 //assert(addrList[tid].size() < 10);
228 }
229
230 void
231 CacheUnit::removeAddrDependency(DynInstPtr inst)
232 {
233 ThreadID tid = inst->readTid();
234
235 Addr mem_addr = inst->getMemAddr();
236
237 inst->unsetMemAddr();
238
239 // Erase from Address List
240 std::list<Addr>::iterator list_it = find(addrList[tid].begin(),
241 addrList[tid].end(),
242 mem_addr);
243 assert(list_it != addrList[tid].end() || inst->splitInst);
244
245 if (list_it != addrList[tid].end()) {
246 DPRINTF(AddrDep,
247 "[tid:%i]: [sn:%i] Address %08p removed from dependency "
248 "list\n", inst->readTid(), inst->seqNum, (*list_it));
249
250 addrList[tid].erase(list_it);
251
252 // Erase From Address Map (Used for Debugging)
253 addrMap[tid].erase(addrMap[tid].find(mem_addr));
254 }
255
256
257 }
258
259 ResReqPtr
260 CacheUnit::findRequest(DynInstPtr inst)
261 {
262 for (int i = 0; i < width; i++) {
263 CacheRequest* cache_req =
264 dynamic_cast<CacheRequest*>(reqs[i]);
265 assert(cache_req);
266
267 if (cache_req->valid &&
268 cache_req->getInst() == inst &&
269 cache_req->instIdx == inst->curSkedEntry->idx) {
270 return cache_req;
271 }
272 }
273
274 return NULL;
275 }
276
277 ResReqPtr
278 CacheUnit::findRequest(DynInstPtr inst, int idx)
279 {
280 for (int i = 0; i < width; i++) {
281 CacheRequest* cache_req =
282 dynamic_cast<CacheRequest*>(reqs[i]);
283 assert(cache_req);
284
285 if (cache_req->valid &&
286 cache_req->getInst() == inst &&
287 cache_req->instIdx == idx) {
288 return cache_req;
289 }
290 }
291
292 return NULL;
293 }
294
295
296 ResReqPtr
297 CacheUnit::getRequest(DynInstPtr inst, int stage_num, int res_idx,
298 int slot_num, unsigned cmd)
299 {
300 ScheduleEntry* sched_entry = *inst->curSkedEntry;
301 CacheRequest* cache_req = dynamic_cast<CacheRequest*>(reqs[slot_num]);
302
303 if (!inst->validMemAddr()) {
304 panic("Mem. Addr. must be set before requesting cache access\n");
305 }
306
307 MemCmd::Command pkt_cmd;
308
309 switch (sched_entry->cmd)
310 {
311 case InitSecondSplitRead:
312 pkt_cmd = MemCmd::ReadReq;
313
314 DPRINTF(InOrderCachePort,
315 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
316 inst->readTid(), inst->seqNum, inst->split2ndAddr);
317 break;
318
319 case InitiateReadData:
320 pkt_cmd = MemCmd::ReadReq;
321
322 DPRINTF(InOrderCachePort,
323 "[tid:%i]: Read request from [sn:%i] for addr %08p\n",
324 inst->readTid(), inst->seqNum, inst->getMemAddr());
325 break;
326
327 case InitSecondSplitWrite:
328 pkt_cmd = MemCmd::WriteReq;
329
330 DPRINTF(InOrderCachePort,
331 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
332 inst->readTid(), inst->seqNum, inst->split2ndAddr);
333 break;
334
335 case InitiateWriteData:
336 pkt_cmd = MemCmd::WriteReq;
337
338 DPRINTF(InOrderCachePort,
339 "[tid:%i]: Write request from [sn:%i] for addr %08p\n",
340 inst->readTid(), inst->seqNum, inst->getMemAddr());
341 break;
342
343 default:
344 panic("%i: Unexpected request type (%i) to %s", curTick(),
345 sched_entry->cmd, name());
346 }
347
348 cache_req->setRequest(inst, stage_num, id, slot_num,
349 sched_entry->cmd, pkt_cmd,
350 inst->curSkedEntry->idx);
351 return cache_req;
352 }
353
354 void
355 CacheUnit::requestAgain(DynInstPtr inst, bool &service_request)
356 {
357 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
358 assert(cache_req);
359
360 // Check to see if this instruction is requesting the same command
361 // or a different one
362 if (cache_req->cmd != inst->curSkedEntry->cmd &&
363 cache_req->instIdx == inst->curSkedEntry->idx) {
364 // If different, then update command in the request
365 cache_req->cmd = inst->curSkedEntry->cmd;
366 DPRINTF(InOrderCachePort,
367 "[tid:%i]: [sn:%i]: Updating the command for this "
368 "instruction\n", inst->readTid(), inst->seqNum);
369
370 service_request = true;
371 } else if (inst->curSkedEntry->idx != CacheUnit::InitSecondSplitRead &&
372 inst->curSkedEntry->idx != CacheUnit::InitSecondSplitWrite) {
373 // If same command, just check to see if memory access was completed
374 // but dont try to re-execute
375 DPRINTF(InOrderCachePort,
376 "[tid:%i]: [sn:%i]: requesting this resource again\n",
377 inst->readTid(), inst->seqNum);
378
379 service_request = true;
380 }
381 }
382
383 void
384 CacheUnit::setupMemRequest(DynInstPtr inst, CacheReqPtr cache_req,
385 int acc_size, int flags)
386 {
387 ThreadID tid = inst->readTid();
388 Addr aligned_addr = inst->getMemAddr();
389
390 if (!cache_req->is2ndSplit()) {
391 if (cache_req->memReq == NULL) {
392 cache_req->memReq =
393 new Request(cpu->asid[tid], aligned_addr, acc_size, flags,
394 inst->instAddr(),
395 cpu->readCpuId(), //@todo: use context id
396 tid);
397 }
398 } else {
399 assert(inst->splitInst);
400
401 if (inst->splitMemReq == NULL) {
402 inst->splitMemReq = new Request(cpu->asid[tid],
403 inst->split2ndAddr,
404 acc_size,
405 flags,
406 inst->instAddr(),
407 cpu->readCpuId(),
408 tid);
409 }
410
411 cache_req->memReq = inst->splitMemReq;
412 }
413 }
414
415 void
416 CacheUnit::doTLBAccess(DynInstPtr inst, CacheReqPtr cache_req, int acc_size,
417 int flags, TheISA::TLB::Mode tlb_mode)
418 {
419 ThreadID tid = inst->readTid();
420
421 setupMemRequest(inst, cache_req, acc_size, flags);
422
423 //@todo: HACK: the DTB expects the correct PC in the ThreadContext
424 // but how if the memory accesses are speculative? Shouldn't
425 // we send along the requestor's PC to the translate functions?
426 ThreadContext *tc = cpu->thread[tid]->getTC();
427 PCState old_pc = tc->pcState();
428 tc->pcState() = inst->pcState();
429
430 inst->fault =
431 _tlb->translateAtomic(cache_req->memReq, tc, tlb_mode);
432 tc->pcState() = old_pc;
433
434 if (inst->fault != NoFault) {
435 DPRINTF(InOrderTLB, "[tid:%i]: %s encountered while translating "
436 "addr:%08p for [sn:%i].\n", tid, tlb_fault->name(),
437 cache_req->memReq->getVaddr(), inst->seqNum);
438
439 tlbBlocked[tid] = true;
440 tlbBlockSeqNum[tid] = inst->seqNum;
441
442 // Make sure nothing gets executed until after this faulting
443 // instruction gets handled.
444 inst->setSerializeAfter();
445
446 // Mark it as complete so it can pass through next stage.
447 // Fault Handling will happen at commit/graduation
448 cache_req->setCompleted();
449 } else {
450 DPRINTF(InOrderTLB, "[tid:%i]: [sn:%i] virt. addr %08p translated "
451 "to phys. addr:%08p.\n", tid, inst->seqNum,
452 cache_req->memReq->getVaddr(),
453 cache_req->memReq->getPaddr());
454 }
455 }
456
457 #if !FULL_SYSTEM
458 void
459 CacheUnit::trap(Fault fault, ThreadID tid, DynInstPtr inst)
460 {
461 tlbBlocked[tid] = false;
462 }
463 #endif
464
465 Fault
466 CacheUnit::read(DynInstPtr inst, Addr addr,
467 uint8_t *data, unsigned size, unsigned flags)
468 {
469 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
470 assert(cache_req && "Can't Find Instruction for Read!");
471
472 // The block size of our peer
473 unsigned blockSize = this->cachePort->peerBlockSize();
474
475 //The size of the data we're trying to read.
476 int fullSize = size;
477 inst->totalSize = size;
478
479 if (inst->traceData) {
480 inst->traceData->setAddr(addr);
481 }
482
483 if (inst->split2ndAccess) {
484 size = inst->split2ndSize;
485 cache_req->splitAccess = true;
486 cache_req->split2ndAccess = true;
487
488 DPRINTF(InOrderCachePort, "[sn:%i] Split Read Access (2 of 2) for "
489 "(%#x, %#x).\n", inst->seqNum, inst->getMemAddr(),
490 inst->split2ndAddr);
491 }
492
493
494 //The address of the second part of this access if it needs to be split
495 //across a cache line boundary.
496 Addr secondAddr = roundDown(addr + size - 1, blockSize);
497
498
499 if (secondAddr > addr && !inst->split2ndAccess) {
500
501 if (!inst->splitInst) {
502 DPRINTF(InOrderCachePort, "%i: sn[%i] Split Read Access (1 of 2) for "
503 "(%#x, %#x).\n", curTick(), inst->seqNum, addr, secondAddr);
504
505 unsigned stage_num = cache_req->getStageNum();
506 unsigned cmd = inst->curSkedEntry->cmd;
507
508 // 1. Make A New Inst. Schedule w/Split Read/Complete Entered on
509 // the schedule
510 // ==============================
511 // 2. Reassign curSkedPtr to current command (InitiateRead) on new
512 // schedule
513 // ==============================
514 inst->splitInst = true;
515 inst->setBackSked(cpu->createBackEndSked(inst));
516 inst->curSkedEntry = inst->backSked->find(stage_num, cmd);
517 } else {
518 DPRINTF(InOrderCachePort, "[tid:%i] [sn:%i] Retrying Split Read "
519 "Access (1 of 2) for (%#x, %#x).\n", inst->readTid(),
520 inst->seqNum, addr, secondAddr);
521 }
522
523 // Save All "Total" Split Information
524 // ==============================
525 inst->splitMemData = new uint8_t[size];
526
527 // Split Information for First Access
528 // ==============================
529 size = secondAddr - addr;
530 cache_req->splitAccess = true;
531
532 // Split Information for Second Access
533 // ==============================
534 inst->split2ndSize = addr + fullSize - secondAddr;
535 inst->split2ndAddr = secondAddr;
536 inst->split2ndDataPtr = inst->splitMemData + size;
537 inst->split2ndFlags = flags;
538 }
539
540 doTLBAccess(inst, cache_req, size, flags, TheISA::TLB::Read);
541
542 if (inst->fault == NoFault) {
543 if (!cache_req->splitAccess) {
544 cache_req->reqData = new uint8_t[size];
545 doCacheAccess(inst, NULL);
546 } else {
547 if (!inst->split2ndAccess) {
548 cache_req->reqData = inst->splitMemData;
549 } else {
550 cache_req->reqData = inst->split2ndDataPtr;
551 }
552
553 doCacheAccess(inst, NULL, cache_req);
554 }
555 }
556
557 return inst->fault;
558 }
559
560 Fault
561 CacheUnit::write(DynInstPtr inst, uint8_t *data, unsigned size,
562 Addr addr, unsigned flags, uint64_t *write_res)
563 {
564 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(findRequest(inst));
565 assert(cache_req && "Can't Find Instruction for Write!");
566
567 // The block size of our peer
568 unsigned blockSize = this->cachePort->peerBlockSize();
569
570 //The size of the data we're trying to write.
571 int fullSize = size;
572 inst->totalSize = size;
573
574 if (inst->traceData) {
575 inst->traceData->setAddr(addr);
576 }
577
578 if (inst->split2ndAccess) {
579 size = inst->split2ndSize;
580 cache_req->splitAccess = true;
581 cache_req->split2ndAccess = true;
582
583 DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (2 of 2) for "
584 "(%#x, %#x).\n", inst->seqNum, inst->getMemAddr(),
585 inst->split2ndAddr);
586 }
587
588 //The address of the second part of this access if it needs to be split
589 //across a cache line boundary.
590 Addr secondAddr = roundDown(addr + size - 1, blockSize);
591
592 if (secondAddr > addr && !inst->split2ndAccess) {
593
594 DPRINTF(InOrderCachePort, "[sn:%i] Split Write Access (1 of 2) for "
595 "(%#x, %#x).\n", inst->seqNum, addr, secondAddr);
596
597 // Save All "Total" Split Information
598 // ==============================
599 inst->splitInst = true;
600
601 if (!inst->splitInstSked) {
602 assert(0 && "Split Requests Not Supported for Now...");
603
604 // Schedule Split Read/Complete for Instruction
605 // ==============================
606 int stage_num = cache_req->getStageNum();
607 RSkedPtr inst_sked = (stage_num >= ThePipeline::BackEndStartStage) ?
608 inst->backSked : inst->frontSked;
609
610 // this is just an arbitrarily high priority to ensure that this
611 // gets pushed to the back of the list
612 int stage_pri = 20;
613
614 int isplit_cmd = CacheUnit::InitSecondSplitWrite;
615 inst_sked->push(new
616 ScheduleEntry(stage_num,
617 stage_pri,
618 cpu->resPool->getResIdx(DCache),
619 isplit_cmd,
620 1));
621
622 int csplit_cmd = CacheUnit::CompleteSecondSplitWrite;
623 inst_sked->push(new
624 ScheduleEntry(stage_num + 1,
625 1/*stage_pri*/,
626 cpu->resPool->getResIdx(DCache),
627 csplit_cmd,
628 1));
629 inst->splitInstSked = true;
630 } else {
631 DPRINTF(InOrderCachePort, "[tid:%i] sn:%i] Retrying Split Read "
632 "Access (1 of 2) for (%#x, %#x).\n",
633 inst->readTid(), inst->seqNum, addr, secondAddr);
634 }
635
636
637
638 // Split Information for First Access
639 // ==============================
640 size = secondAddr - addr;
641 cache_req->splitAccess = true;
642
643 // Split Information for Second Access
644 // ==============================
645 inst->split2ndSize = addr + fullSize - secondAddr;
646 inst->split2ndAddr = secondAddr;
647 inst->split2ndFlags = flags;
648 inst->splitInstSked = true;
649 }
650
651 doTLBAccess(inst, cache_req, size, flags, TheISA::TLB::Write);
652
653 if (inst->fault == NoFault) {
654 if (!cache_req->splitAccess) {
655 cache_req->reqData = new uint8_t[size];
656 memcpy(cache_req->reqData, data, size);
657
658 //inst->split2ndStoreDataPtr = cache_req->reqData;
659 //inst->split2ndStoreDataPtr += size;
660
661 doCacheAccess(inst, write_res);
662 } else {
663 doCacheAccess(inst, write_res, cache_req);
664 }
665
666 }
667
668 return inst->fault;
669 }
670
671
672 void
673 CacheUnit::execute(int slot_num)
674 {
675 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(reqs[slot_num]);
676 assert(cache_req);
677
678 if (cachePortBlocked &&
679 (cache_req->cmd == InitiateReadData ||
680 cache_req->cmd == InitiateWriteData ||
681 cache_req->cmd == InitSecondSplitRead ||
682 cache_req->cmd == InitSecondSplitWrite)) {
683 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
684 cache_req->done(false);
685 return;
686 }
687
688 DynInstPtr inst = cache_req->inst;
689 if (inst->fault != NoFault) {
690 DPRINTF(InOrderCachePort,
691 "[tid:%i]: [sn:%i]: Detected %s fault @ %x. Forwarding to "
692 "next stage.\n", inst->readTid(), inst->seqNum, inst->fault->name(),
693 inst->getMemAddr());
694 finishCacheUnitReq(inst, cache_req);
695 return;
696 }
697
698 if (inst->isSquashed()) {
699 DPRINTF(InOrderCachePort,
700 "[tid:%i]: [sn:%i]: Detected squashed instruction "
701 "next stage.\n", inst->readTid(), inst->seqNum);
702 finishCacheUnitReq(inst, cache_req);
703 return;
704 }
705
706 #if TRACING_ON
707 ThreadID tid = inst->readTid();
708 std::string acc_type = "write";
709 #endif
710
711 switch (cache_req->cmd)
712 {
713
714 case InitiateReadData:
715 #if TRACING_ON
716 acc_type = "read";
717 #endif
718 case InitiateWriteData:
719 if (cachePortBlocked) {
720 DPRINTF(InOrderCachePort, "Cache Port Blocked. Cannot Access\n");
721 cache_req->done(false);
722 return;
723 }
724
725 DPRINTF(InOrderCachePort,
726 "[tid:%u]: [sn:%i] Initiating data %s access to %s for "
727 "addr. %08p\n", tid, inst->seqNum, acc_type, name(),
728 cache_req->inst->getMemAddr());
729
730 inst->setCurResSlot(slot_num);
731
732 if (inst->isDataPrefetch() || inst->isInstPrefetch()) {
733 inst->execute();
734 } else {
735 inst->initiateAcc();
736 }
737
738 break;
739
740 case InitSecondSplitRead:
741 DPRINTF(InOrderCachePort,
742 "[tid:%u]: [sn:%i] Initiating split data read access to %s "
743 "for addr. %08p\n", tid, inst->seqNum, name(),
744 cache_req->inst->split2ndAddr);
745 inst->split2ndAccess = true;
746 assert(inst->split2ndAddr != 0);
747 read(inst, inst->split2ndAddr, &inst->split2ndData,
748 inst->totalSize, inst->split2ndFlags);
749 break;
750
751 case InitSecondSplitWrite:
752 DPRINTF(InOrderCachePort,
753 "[tid:%u]: [sn:%i] Initiating split data write access to %s "
754 "for addr. %08p\n", tid, inst->seqNum, name(),
755 cache_req->inst->getMemAddr());
756
757 inst->split2ndAccess = true;
758 assert(inst->split2ndAddr != 0);
759 write(inst, &inst->split2ndData, inst->totalSize,
760 inst->split2ndAddr, inst->split2ndFlags, NULL);
761 break;
762
763 case CompleteReadData:
764 DPRINTF(InOrderCachePort,
765 "[tid:%i]: [sn:%i]: Trying to Complete Data Read Access\n",
766 tid, inst->seqNum);
767
768
769 //@todo: timing translations need to check here...
770 assert(!inst->isInstPrefetch() && "Can't Handle Inst. Prefecthes");
771 if (cache_req->isMemAccComplete() || inst->isDataPrefetch()) {
772 finishCacheUnitReq(inst, cache_req);
773 } else {
774 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
775 tid, cache_req->inst->getMemAddr());
776 cache_req->setCompleted(false);
777 cache_req->setMemStall(true);
778 }
779 break;
780
781 case CompleteWriteData:
782 {
783 DPRINTF(InOrderCachePort,
784 "[tid:%i]: [sn:%i]: Trying to Complete Data Write Access\n",
785 tid, inst->seqNum);
786
787
788 //@todo: check that timing translation is finished here
789 RequestPtr mem_req = cache_req->memReq;
790 if (mem_req->isCondSwap() || mem_req->isLLSC() || mem_req->isSwap()) {
791 DPRINTF(InOrderCachePort, "Detected Conditional Store Inst.\n");
792
793 if (!cache_req->isMemAccComplete()) {
794 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
795 tid, cache_req->inst->getMemAddr());
796 cache_req->setCompleted(false);
797 cache_req->setMemStall(true);
798 return;
799 } else {
800 DPRINTF(InOrderStall, "Mem Acc Completed\n");
801 }
802 }
803
804 if (cache_req->isMemAccPending()) {
805 DPRINTF(InOrderCachePort, "Store Instruction Pending Completion.\n");
806 cache_req->dataPkt->reqData = cache_req->reqData;
807 cache_req->dataPkt->memReq = cache_req->memReq;
808 } else
809 DPRINTF(InOrderCachePort, "Store Instruction Finished Completion.\n");
810
811 //@todo: if split inst save data
812 finishCacheUnitReq(inst, cache_req);
813 }
814 break;
815
816 case CompleteSecondSplitRead:
817 DPRINTF(InOrderCachePort,
818 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Read "
819 "Access\n", tid, inst->seqNum);
820
821 //@todo: check that timing translation is finished here
822 assert(!inst->isInstPrefetch() && "Can't Handle Inst. Prefecthes");
823 if (cache_req->isMemAccComplete() || inst->isDataPrefetch()) {
824 finishCacheUnitReq(inst, cache_req);
825 } else {
826 DPRINTF(InOrderStall, "STALL: [tid:%i]: Data miss from %08p\n",
827 tid, cache_req->inst->split2ndAddr);
828 cache_req->setCompleted(false);
829 cache_req->setMemStall(true);
830 }
831 break;
832
833 case CompleteSecondSplitWrite:
834 DPRINTF(InOrderCachePort,
835 "[tid:%i]: [sn:%i]: Trying to Complete Split Data Write "
836 "Access\n", tid, inst->seqNum);
837 //@todo: illegal to have a unaligned cond.swap or llsc?
838 assert(!cache_req->memReq->isSwap() && !cache_req->memReq->isCondSwap()
839 && !cache_req->memReq->isLLSC());
840
841 if (cache_req->isMemAccPending()) {
842 cache_req->dataPkt->reqData = cache_req->reqData;
843 cache_req->dataPkt->memReq = cache_req->memReq;
844 }
845
846 //@todo: check that timing translation is finished here
847 finishCacheUnitReq(inst, cache_req);
848 break;
849
850 default:
851 fatal("Unrecognized command to %s", resName);
852 }
853 }
854
855 void
856 CacheUnit::finishCacheUnitReq(DynInstPtr inst, CacheRequest *cache_req)
857 {
858 //@note: add back in for speculative load/store capability
859 //removeAddrDependency(inst);
860 cache_req->setMemStall(false);
861 cache_req->done();
862 }
863
864 void
865 CacheUnit::buildDataPacket(CacheRequest *cache_req)
866 {
867 // Check for LL/SC and if so change command
868 if (cache_req->memReq->isLLSC() && cache_req->pktCmd == MemCmd::ReadReq) {
869 cache_req->pktCmd = MemCmd::LoadLockedReq;
870 }
871
872 if (cache_req->pktCmd == MemCmd::WriteReq) {
873 cache_req->pktCmd =
874 cache_req->memReq->isSwap() ? MemCmd::SwapReq :
875 (cache_req->memReq->isLLSC() ? MemCmd::StoreCondReq
876 : MemCmd::WriteReq);
877 }
878
879 cache_req->dataPkt = new CacheReqPacket(cache_req,
880 cache_req->pktCmd,
881 Packet::Broadcast,
882 cache_req->instIdx);
883 DPRINTF(InOrderCachePort, "[slot:%i]: Slot marked for %x\n",
884 cache_req->getSlot(),
885 cache_req->dataPkt->getAddr());
886
887 cache_req->dataPkt->hasSlot = true;
888 cache_req->dataPkt->dataStatic(cache_req->reqData);
889 }
890
891 void
892 CacheUnit::doCacheAccess(DynInstPtr inst, uint64_t *write_res,
893 CacheReqPtr split_req)
894 {
895 Fault fault = NoFault;
896 #if TRACING_ON
897 ThreadID tid = inst->readTid();
898 #endif
899 bool do_access = true; // flag to suppress cache access
900
901 // Special Handling if this is a split request
902 CacheReqPtr cache_req;
903 if (split_req == NULL)
904 cache_req = dynamic_cast<CacheReqPtr>(reqs[inst->getCurResSlot()]);
905 else {
906 cache_req = split_req;
907 assert(0);
908 }
909
910 // Make a new packet inside the CacheRequest object
911 assert(cache_req);
912 buildDataPacket(cache_req);
913
914 // Special Handling for LL/SC or Compare/Swap
915 bool is_write = cache_req->dataPkt->isWrite();
916 RequestPtr mem_req = cache_req->dataPkt->req;
917 if (is_write) {
918 DPRINTF(InOrderCachePort,
919 "[tid:%u]: [sn:%i]: Storing data: %s\n",
920 tid, inst->seqNum,
921 printMemData(cache_req->dataPkt->getPtr<uint8_t>(),
922 cache_req->dataPkt->getSize()));
923
924 if (mem_req->isCondSwap()) {
925 assert(write_res);
926 cache_req->memReq->setExtraData(*write_res);
927 }
928 if (mem_req->isLLSC()) {
929 assert(cache_req->inst->isStoreConditional());
930 DPRINTF(InOrderCachePort, "Evaluating Store Conditional access\n");
931 do_access = TheISA::handleLockedWrite(inst.get(), mem_req);
932 }
933 }
934
935 // Finally, go ahead and make the access if we can...
936 DPRINTF(InOrderCachePort,
937 "[tid:%i] [sn:%i] attempting to access cache for addr %08p\n",
938 tid, inst->seqNum, cache_req->dataPkt->getAddr());
939
940 if (do_access) {
941 if (!cachePort->sendTiming(cache_req->dataPkt)) {
942 DPRINTF(InOrderCachePort,
943 "[tid:%i] [sn:%i] cannot access cache, because port "
944 "is blocked. now waiting to retry request\n", tid,
945 inst->seqNum);
946 delete cache_req->dataPkt;
947 cache_req->dataPkt = NULL;
948
949 delete cache_req->memReq;
950 cache_req->memReq = NULL;
951
952 cache_req->done(false);
953 cachePortBlocked = true;
954 } else {
955 DPRINTF(InOrderCachePort,
956 "[tid:%i] [sn:%i] is now waiting for cache response\n",
957 tid, inst->seqNum);
958 cache_req->setCompleted();
959 cache_req->setMemAccPending();
960 cachePortBlocked = false;
961 }
962 } else if (mem_req->isLLSC()){
963 // Store-Conditional instructions complete even if they "failed"
964 assert(cache_req->inst->isStoreConditional());
965 cache_req->setCompleted(true);
966
967 DPRINTF(LLSC,
968 "[tid:%i]: T%i Ignoring Failed Store Conditional Access\n",
969 tid, tid);
970
971 processCacheCompletion(cache_req->dataPkt);
972 } else {
973 delete cache_req->dataPkt;
974 cache_req->dataPkt = NULL;
975
976 delete cache_req->memReq;
977 cache_req->memReq = NULL;
978
979 // Make cache request again since access due to
980 // inability to access
981 DPRINTF(InOrderStall, "STALL: \n");
982 cache_req->done(false);
983 }
984
985 }
986
987 bool
988 CacheUnit::processSquash(CacheReqPacket *cache_pkt)
989 {
990 // The resource may no longer be actively servicing this
991 // packet. Scenarios like a store that has been sent to the
992 // memory system or access that's been squashed. If that's
993 // the case, we can't access the request slot because it
994 // will be either invalid or servicing another request.
995 if (!cache_pkt->hasSlot) {
996 DPRINTF(InOrderCachePort,
997 "%x does not have a slot in unit, ignoring.\n",
998 cache_pkt->getAddr());
999
1000 if (cache_pkt->reqData) {
1001 delete [] cache_pkt->reqData;
1002 cache_pkt->reqData = NULL;
1003 }
1004
1005 if (cache_pkt->memReq) {
1006 delete cache_pkt->memReq;
1007 cache_pkt->memReq = NULL;
1008 }
1009
1010 delete cache_pkt;
1011 cache_pkt = NULL;
1012 cpu->wakeCPU();
1013 return true;
1014 } else {
1015 DPRINTF(InOrderCachePort, "%x has slot %i\n",
1016 cache_pkt->getAddr(), cache_pkt->cacheReq->getSlot());
1017 }
1018
1019
1020 // It's possible that the request is squashed but the
1021 // packet is still acknowledged by the resource. Squashes
1022 // should happen at the end of the cycles and trigger the
1023 // code above, but if not, this would handle any timing
1024 // variations due to diff. user parameters.
1025 if (cache_pkt->cacheReq->isSquashed()) {
1026 DPRINTF(InOrderCachePort,
1027 "Ignoring completion of squashed access, [tid:%i] [sn:%i]\n",
1028 cache_pkt->cacheReq->getInst()->readTid(),
1029 cache_pkt->cacheReq->getInst()->seqNum);
1030
1031 cache_pkt->cacheReq->setMemAccPending(false);
1032 cache_pkt->cacheReq->freeSlot();
1033 delete cache_pkt;
1034 cache_pkt = NULL;
1035 cpu->wakeCPU();
1036 return true;
1037 }
1038
1039
1040 return false;
1041 }
1042
1043 void
1044 CacheUnit::processCacheCompletion(PacketPtr pkt)
1045 {
1046 //@todo: use packet sender state instead of deriving from packet class to
1047 // get special state
1048 CacheReqPacket* cache_pkt = dynamic_cast<CacheReqPacket*>(pkt);
1049 assert(cache_pkt);
1050
1051 DPRINTF(InOrderCachePort, "Finished request for %x\n", pkt->getAddr());
1052
1053 if (processSquash(cache_pkt))
1054 return;
1055
1056 CacheRequest *cache_req = dynamic_cast<CacheReqPtr>(
1057 findRequest(cache_pkt->cacheReq->getInst(), cache_pkt->instIdx));
1058
1059 if (!cache_req) {
1060 panic("[tid:%u]: [sn:%i]: Can't find slot for cache access to "
1061 "addr. %08p\n", cache_pkt->cacheReq->getInst()->readTid(),
1062 cache_pkt->cacheReq->getInst()->seqNum,
1063 cache_pkt->cacheReq->getInst()->getMemAddr());
1064 }
1065
1066 assert(cache_req);
1067 assert(cache_req == cache_pkt->cacheReq);
1068
1069 DPRINTF(InOrderCachePort,
1070 "[tid:%u]: [sn:%i]: [slot:%i] Waking from cache access (vaddr.%08p, paddr:%08p)\n",
1071 cache_pkt->cacheReq->getInst()->readTid(),
1072 cache_pkt->cacheReq->getInst()->seqNum,
1073 cache_req->getSlot(),
1074 cache_pkt->req->getVaddr(),
1075 cache_pkt->req->getPaddr());
1076
1077 // Get resource request info
1078 unsigned stage_num = cache_req->getStageNum();
1079 DynInstPtr inst = cache_req->inst;
1080 ThreadID tid = cache_req->inst->readTid();
1081
1082 assert(!cache_req->isSquashed());
1083 assert(inst->staticInst && inst->isMemRef());
1084
1085
1086 DPRINTF(InOrderCachePort,
1087 "[tid:%u]: [sn:%i]: Processing cache access\n",
1088 tid, inst->seqNum);
1089
1090 PacketPtr split_pkt = NULL;
1091 if (inst->splitInst) {
1092 inst->splitFinishCnt++;
1093
1094 if (inst->splitFinishCnt == 2) {
1095 cache_req->memReq->setVirt(0/*inst->tid*/,
1096 inst->getMemAddr(),
1097 inst->totalSize,
1098 0,
1099 0);
1100
1101 split_pkt = new Packet(cache_req->memReq, cache_req->pktCmd,
1102 Packet::Broadcast);
1103 split_pkt->dataStatic(inst->splitMemData);
1104
1105 DPRINTF(InOrderCachePort, "Completing Split Access.\n");
1106 inst->completeAcc(split_pkt);
1107 }
1108 } else {
1109 inst->completeAcc(cache_pkt);
1110 }
1111
1112 inst->setExecuted();
1113
1114 if (inst->isLoad()) {
1115 assert(cache_pkt->isRead());
1116
1117 if (cache_pkt->req->isLLSC()) {
1118 DPRINTF(InOrderCachePort,
1119 "[tid:%u]: Handling Load-Linked for [sn:%u]\n",
1120 tid, inst->seqNum);
1121 TheISA::handleLockedRead(inst.get(), cache_pkt->req);
1122 }
1123
1124 DPRINTF(InOrderCachePort,
1125 "[tid:%u]: [sn:%i]: Bytes loaded were: %s\n",
1126 tid, inst->seqNum,
1127 (split_pkt) ? printMemData(split_pkt->getPtr<uint8_t>(),
1128 split_pkt->getSize()) :
1129 printMemData(cache_pkt->getPtr<uint8_t>(),
1130 cache_pkt->getSize()));
1131 } else if(inst->isStore()) {
1132 assert(cache_pkt->isWrite());
1133
1134 DPRINTF(InOrderCachePort,
1135 "[tid:%u]: [sn:%i]: Bytes stored were: %s\n",
1136 tid, inst->seqNum,
1137 (split_pkt) ? printMemData(split_pkt->getPtr<uint8_t>(),
1138 split_pkt->getSize()) :
1139 printMemData(cache_pkt->getPtr<uint8_t>(),
1140 cache_pkt->getSize()));
1141 }
1142
1143
1144 if (split_pkt) {
1145 delete split_pkt;
1146 split_pkt = NULL;
1147 }
1148
1149 cache_req->setMemAccPending(false);
1150 cache_req->setMemAccCompleted();
1151
1152 if (cache_req->isMemStall() &&
1153 cpu->threadModel == InOrderCPU::SwitchOnCacheMiss) {
1154 DPRINTF(InOrderCachePort, "[tid:%u] Waking up from Cache Miss.\n",
1155 tid);
1156
1157 cpu->activateContext(tid);
1158
1159 DPRINTF(ThreadModel, "Activating [tid:%i] after return from cache"
1160 "miss.\n", tid);
1161 }
1162
1163 // Wake up the CPU (if it went to sleep and was waiting on this
1164 // completion event).
1165 cpu->wakeCPU();
1166
1167 DPRINTF(Activity, "[tid:%u] Activating %s due to cache completion\n",
1168 tid, cpu->pipelineStage[stage_num]->name());
1169
1170 cpu->switchToActive(stage_num);
1171 }
1172
1173 void
1174 CacheUnit::recvRetry()
1175 {
1176 DPRINTF(InOrderCachePort, "Unblocking Cache Port. \n");
1177
1178 assert(cachePortBlocked);
1179
1180 // Clear the cache port for use again
1181 cachePortBlocked = false;
1182
1183 cpu->wakeCPU();
1184 }
1185
1186 CacheUnitEvent::CacheUnitEvent()
1187 : ResourceEvent()
1188 { }
1189
1190 void
1191 CacheUnitEvent::process()
1192 {
1193 DynInstPtr inst = resource->reqs[slotIdx]->inst;
1194 int stage_num = resource->reqs[slotIdx]->getStageNum();
1195 ThreadID tid = inst->threadNumber;
1196 CacheReqPtr req_ptr = dynamic_cast<CacheReqPtr>(resource->reqs[slotIdx]);
1197
1198 DPRINTF(InOrderTLB, "Waking up from TLB Miss caused by [sn:%i].\n",
1199 inst->seqNum);
1200
1201 CacheUnit* tlb_res = dynamic_cast<CacheUnit*>(resource);
1202 assert(tlb_res);
1203
1204 //@todo: eventually, we should do a timing translation w/
1205 // hw page table walk on tlb miss
1206 DPRINTF(InOrderTLB, "Handling Fault %s : [sn:%i] %x\n", inst->fault->name(), inst->seqNum, inst->getMemAddr());
1207 inst->fault->invoke(tlb_res->cpu->tcBase(tid), inst->staticInst);
1208
1209 tlb_res->tlbBlocked[tid] = false;
1210
1211 tlb_res->cpu->pipelineStage[stage_num]->
1212 unsetResStall(tlb_res->reqs[slotIdx], tid);
1213
1214 req_ptr->tlbStall = false;
1215
1216 //@todo: timing translation needs to have some type of independent
1217 // info regarding if it's squashed or not so we can
1218 // free up the resource if a request gets squashed in the middle
1219 // of a table walk
1220 if (req_ptr->isSquashed()) {
1221 req_ptr->freeSlot();
1222 }
1223
1224 tlb_res->cpu->wakeCPU();
1225 }
1226
1227 void
1228 CacheUnit::squashDueToMemStall(DynInstPtr inst, int stage_num,
1229 InstSeqNum squash_seq_num, ThreadID tid)
1230 {
1231 // If squashing due to memory stall, then we do NOT want to
1232 // squash the instruction that caused the stall so we
1233 // increment the sequence number here to prevent that.
1234 //
1235 // NOTE: This is only for the SwitchOnCacheMiss Model
1236 // NOTE: If you have multiple outstanding misses from the same
1237 // thread then you need to reevaluate this code
1238 // NOTE: squash should originate from
1239 // pipeline_stage.cc:processInstSchedule
1240 DPRINTF(InOrderCachePort, "Squashing above [sn:%u]\n",
1241 squash_seq_num + 1);
1242
1243 squash(inst, stage_num, squash_seq_num + 1, tid);
1244 }
1245
1246 void
1247 CacheUnit::squashCacheRequest(CacheReqPtr req_ptr)
1248 {
1249 DynInstPtr inst = req_ptr->getInst();
1250 req_ptr->setSquashed();
1251 inst->setSquashed();
1252
1253 //@note: add back in for speculative load/store capability
1254 /*if (inst->validMemAddr()) {
1255 DPRINTF(AddrDep, "Squash of [tid:%i] [sn:%i], attempting to "
1256 "remove addr. %08p dependencies.\n",
1257 inst->readTid(),
1258 inst->seqNum,
1259 inst->getMemAddr());
1260
1261 removeAddrDependency(inst);
1262 }*/
1263 }
1264
1265
1266 void
1267 CacheUnit::squash(DynInstPtr inst, int stage_num,
1268 InstSeqNum squash_seq_num, ThreadID tid)
1269 {
1270 if (tlbBlocked[tid] &&
1271 tlbBlockSeqNum[tid] > squash_seq_num) {
1272 DPRINTF(InOrderCachePort, "Releasing TLB Block due to "
1273 " squash after [sn:%i].\n", squash_seq_num);
1274 tlbBlocked[tid] = false;
1275 }
1276
1277 for (int i = 0; i < width; i++) {
1278 ResReqPtr req_ptr = reqs[i];
1279
1280 if (req_ptr->valid &&
1281 req_ptr->getInst()->readTid() == tid &&
1282 req_ptr->getInst()->seqNum > squash_seq_num) {
1283
1284 DPRINTF(InOrderCachePort,
1285 "[tid:%i] Squashing request from [sn:%i]\n",
1286 req_ptr->getInst()->readTid(), req_ptr->getInst()->seqNum);
1287
1288 if (req_ptr->isSquashed()) {
1289 DPRINTF(AddrDep, "Request for [tid:%i] [sn:%i] already "
1290 "squashed, ignoring squash process.\n",
1291 req_ptr->getInst()->readTid(),
1292 req_ptr->getInst()->seqNum);
1293 continue;
1294 }
1295
1296 CacheReqPtr cache_req = dynamic_cast<CacheReqPtr>(req_ptr);
1297 assert(cache_req);
1298
1299 squashCacheRequest(cache_req);
1300
1301 int req_slot_num = req_ptr->getSlot();
1302
1303 if (cache_req->tlbStall) {
1304 tlbBlocked[tid] = false;
1305
1306 int stall_stage = reqs[req_slot_num]->getStageNum();
1307
1308 cpu->pipelineStage[stall_stage]->
1309 unsetResStall(reqs[req_slot_num], tid);
1310 }
1311
1312 if (cache_req->isMemAccPending()) {
1313 cache_req->dataPkt->reqData = cache_req->reqData;
1314 cache_req->dataPkt->memReq = cache_req->memReq;
1315 }
1316
1317 if (!cache_req->tlbStall)
1318 freeSlot(req_slot_num);
1319 }
1320 }
1321
1322 }
1323
1324 void
1325 CacheRequest::clearRequest()
1326 {
1327 if (!memAccPending) {
1328 if (reqData && !splitAccess)
1329 delete [] reqData;
1330
1331 if (memReq)
1332 delete memReq;
1333
1334 if (dataPkt)
1335 delete dataPkt;
1336 } else {
1337 if (dataPkt)
1338 dataPkt->hasSlot = false;
1339 }
1340
1341 memReq = NULL;
1342 reqData = NULL;
1343 dataPkt = NULL;
1344 memAccComplete = false;
1345 memAccPending = false;
1346 tlbStall = false;
1347 splitAccess = false;
1348 splitAccessNum = -1;
1349 split2ndAccess = false;
1350 instIdx = 0;
1351 fetchBufferFill = false;
1352
1353 ResourceRequest::clearRequest();
1354 }