cpu: Unify the serialization code for all of the CPU models
[gem5.git] / src / cpu / simple / timing.cc
1 /*
2 * Copyright (c) 2010-2012 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
16 *
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 *
40 * Authors: Steve Reinhardt
41 */
42
43 #include "arch/locked_mem.hh"
44 #include "arch/mmapped_ipr.hh"
45 #include "arch/utility.hh"
46 #include "base/bigint.hh"
47 #include "config/the_isa.hh"
48 #include "cpu/simple/timing.hh"
49 #include "cpu/exetrace.hh"
50 #include "debug/Config.hh"
51 #include "debug/Drain.hh"
52 #include "debug/ExecFaulting.hh"
53 #include "debug/SimpleCPU.hh"
54 #include "mem/packet.hh"
55 #include "mem/packet_access.hh"
56 #include "params/TimingSimpleCPU.hh"
57 #include "sim/faults.hh"
58 #include "sim/full_system.hh"
59 #include "sim/system.hh"
60
61 using namespace std;
62 using namespace TheISA;
63
64 void
65 TimingSimpleCPU::init()
66 {
67 BaseCPU::init();
68
69 if (!params()->switched_out &&
70 system->getMemoryMode() != Enums::timing) {
71 fatal("The timing CPU requires the memory system to be in "
72 "'timing' mode.\n");
73 }
74
75 // Initialise the ThreadContext's memory proxies
76 tcBase()->initMemProxies(tcBase());
77
78 if (FullSystem && !params()->switched_out) {
79 for (int i = 0; i < threadContexts.size(); ++i) {
80 ThreadContext *tc = threadContexts[i];
81 // initialize CPU, including PC
82 TheISA::initCPU(tc, _cpuId);
83 }
84 }
85 }
86
87 void
88 TimingSimpleCPU::TimingCPUPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
89 {
90 pkt = _pkt;
91 cpu->schedule(this, t);
92 }
93
94 TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
95 : BaseSimpleCPU(p), fetchTranslation(this), icachePort(this),
96 dcachePort(this), ifetch_pkt(NULL), dcache_pkt(NULL), previousCycle(0),
97 fetchEvent(this), drainManager(NULL)
98 {
99 _status = Idle;
100
101 system->totalNumInsts = 0;
102 }
103
104
105 TimingSimpleCPU::~TimingSimpleCPU()
106 {
107 }
108
109 unsigned int
110 TimingSimpleCPU::drain(DrainManager *drain_manager)
111 {
112 assert(!drainManager);
113 if (switchedOut())
114 return 0;
115
116 if (_status == Idle ||
117 (_status == BaseSimpleCPU::Running && isDrained())) {
118 assert(!fetchEvent.scheduled());
119 DPRINTF(Drain, "No need to drain.\n");
120 return 0;
121 } else {
122 drainManager = drain_manager;
123 DPRINTF(Drain, "Requesting drain: %s\n", pcState());
124
125 // The fetch event can become descheduled if a drain didn't
126 // succeed on the first attempt. We need to reschedule it if
127 // the CPU is waiting for a microcode routine to complete.
128 if (_status == BaseSimpleCPU::Running && !fetchEvent.scheduled())
129 schedule(fetchEvent, nextCycle());
130
131 return 1;
132 }
133 }
134
135 void
136 TimingSimpleCPU::drainResume()
137 {
138 assert(!fetchEvent.scheduled());
139 assert(!drainManager);
140 if (switchedOut())
141 return;
142
143 DPRINTF(SimpleCPU, "Resume\n");
144 if (system->getMemoryMode() != Enums::timing) {
145 fatal("The timing CPU requires the memory system to be in "
146 "'timing' mode.\n");
147 }
148
149 assert(!threadContexts.empty());
150 if (threadContexts.size() > 1)
151 fatal("The timing CPU only supports one thread.\n");
152
153 if (thread->status() == ThreadContext::Active) {
154 schedule(fetchEvent, nextCycle());
155 _status = BaseSimpleCPU::Running;
156 } else {
157 _status = BaseSimpleCPU::Idle;
158 }
159 }
160
161 bool
162 TimingSimpleCPU::tryCompleteDrain()
163 {
164 if (!drainManager)
165 return false;
166
167 DPRINTF(Drain, "tryCompleteDrain: %s\n", pcState());
168 if (!isDrained())
169 return false;
170
171 DPRINTF(Drain, "CPU done draining, processing drain event\n");
172 drainManager->signalDrainDone();
173 drainManager = NULL;
174
175 return true;
176 }
177
178 void
179 TimingSimpleCPU::switchOut()
180 {
181 BaseSimpleCPU::switchOut();
182
183 assert(!fetchEvent.scheduled());
184 assert(_status == BaseSimpleCPU::Running || _status == Idle);
185 assert(!stayAtPC);
186 assert(microPC() == 0);
187
188 numCycles += curCycle() - previousCycle;
189 }
190
191
192 void
193 TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
194 {
195 BaseSimpleCPU::takeOverFrom(oldCPU);
196
197 previousCycle = curCycle();
198 }
199
200
201 void
202 TimingSimpleCPU::activateContext(ThreadID thread_num, Cycles delay)
203 {
204 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
205
206 assert(thread_num == 0);
207 assert(thread);
208
209 assert(_status == Idle);
210
211 notIdleFraction++;
212 _status = BaseSimpleCPU::Running;
213
214 // kick things off by initiating the fetch of the next instruction
215 schedule(fetchEvent, clockEdge(delay));
216 }
217
218
219 void
220 TimingSimpleCPU::suspendContext(ThreadID thread_num)
221 {
222 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
223
224 assert(thread_num == 0);
225 assert(thread);
226
227 if (_status == Idle)
228 return;
229
230 assert(_status == BaseSimpleCPU::Running);
231
232 // just change status to Idle... if status != Running,
233 // completeInst() will not initiate fetch of next instruction.
234
235 notIdleFraction--;
236 _status = Idle;
237 }
238
239 bool
240 TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
241 {
242 RequestPtr req = pkt->req;
243 if (req->isMmappedIpr()) {
244 Cycles delay = TheISA::handleIprRead(thread->getTC(), pkt);
245 new IprEvent(pkt, this, clockEdge(delay));
246 _status = DcacheWaitResponse;
247 dcache_pkt = NULL;
248 } else if (!dcachePort.sendTimingReq(pkt)) {
249 _status = DcacheRetry;
250 dcache_pkt = pkt;
251 } else {
252 _status = DcacheWaitResponse;
253 // memory system takes ownership of packet
254 dcache_pkt = NULL;
255 }
256 return dcache_pkt == NULL;
257 }
258
259 void
260 TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
261 bool read)
262 {
263 PacketPtr pkt;
264 buildPacket(pkt, req, read);
265 pkt->dataDynamicArray<uint8_t>(data);
266 if (req->getFlags().isSet(Request::NO_ACCESS)) {
267 assert(!dcache_pkt);
268 pkt->makeResponse();
269 completeDataAccess(pkt);
270 } else if (read) {
271 handleReadPacket(pkt);
272 } else {
273 bool do_access = true; // flag to suppress cache access
274
275 if (req->isLLSC()) {
276 do_access = TheISA::handleLockedWrite(thread, req);
277 } else if (req->isCondSwap()) {
278 assert(res);
279 req->setExtraData(*res);
280 }
281
282 if (do_access) {
283 dcache_pkt = pkt;
284 handleWritePacket();
285 } else {
286 _status = DcacheWaitResponse;
287 completeDataAccess(pkt);
288 }
289 }
290 }
291
292 void
293 TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
294 RequestPtr req, uint8_t *data, bool read)
295 {
296 PacketPtr pkt1, pkt2;
297 buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
298 if (req->getFlags().isSet(Request::NO_ACCESS)) {
299 assert(!dcache_pkt);
300 pkt1->makeResponse();
301 completeDataAccess(pkt1);
302 } else if (read) {
303 SplitFragmentSenderState * send_state =
304 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
305 if (handleReadPacket(pkt1)) {
306 send_state->clearFromParent();
307 send_state = dynamic_cast<SplitFragmentSenderState *>(
308 pkt2->senderState);
309 if (handleReadPacket(pkt2)) {
310 send_state->clearFromParent();
311 }
312 }
313 } else {
314 dcache_pkt = pkt1;
315 SplitFragmentSenderState * send_state =
316 dynamic_cast<SplitFragmentSenderState *>(pkt1->senderState);
317 if (handleWritePacket()) {
318 send_state->clearFromParent();
319 dcache_pkt = pkt2;
320 send_state = dynamic_cast<SplitFragmentSenderState *>(
321 pkt2->senderState);
322 if (handleWritePacket()) {
323 send_state->clearFromParent();
324 }
325 }
326 }
327 }
328
329 void
330 TimingSimpleCPU::translationFault(Fault fault)
331 {
332 // fault may be NoFault in cases where a fault is suppressed,
333 // for instance prefetches.
334 numCycles += curCycle() - previousCycle;
335 previousCycle = curCycle();
336
337 if (traceData) {
338 // Since there was a fault, we shouldn't trace this instruction.
339 delete traceData;
340 traceData = NULL;
341 }
342
343 postExecute();
344
345 advanceInst(fault);
346 }
347
348 void
349 TimingSimpleCPU::buildPacket(PacketPtr &pkt, RequestPtr req, bool read)
350 {
351 MemCmd cmd;
352 if (read) {
353 cmd = MemCmd::ReadReq;
354 if (req->isLLSC())
355 cmd = MemCmd::LoadLockedReq;
356 } else {
357 cmd = MemCmd::WriteReq;
358 if (req->isLLSC()) {
359 cmd = MemCmd::StoreCondReq;
360 } else if (req->isSwap()) {
361 cmd = MemCmd::SwapReq;
362 }
363 }
364 pkt = new Packet(req, cmd);
365 }
366
367 void
368 TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
369 RequestPtr req1, RequestPtr req2, RequestPtr req,
370 uint8_t *data, bool read)
371 {
372 pkt1 = pkt2 = NULL;
373
374 assert(!req1->isMmappedIpr() && !req2->isMmappedIpr());
375
376 if (req->getFlags().isSet(Request::NO_ACCESS)) {
377 buildPacket(pkt1, req, read);
378 return;
379 }
380
381 buildPacket(pkt1, req1, read);
382 buildPacket(pkt2, req2, read);
383
384 req->setPhys(req1->getPaddr(), req->getSize(), req1->getFlags(), dataMasterId());
385 PacketPtr pkt = new Packet(req, pkt1->cmd.responseCommand());
386
387 pkt->dataDynamicArray<uint8_t>(data);
388 pkt1->dataStatic<uint8_t>(data);
389 pkt2->dataStatic<uint8_t>(data + req1->getSize());
390
391 SplitMainSenderState * main_send_state = new SplitMainSenderState;
392 pkt->senderState = main_send_state;
393 main_send_state->fragments[0] = pkt1;
394 main_send_state->fragments[1] = pkt2;
395 main_send_state->outstanding = 2;
396 pkt1->senderState = new SplitFragmentSenderState(pkt, 0);
397 pkt2->senderState = new SplitFragmentSenderState(pkt, 1);
398 }
399
400 Fault
401 TimingSimpleCPU::readMem(Addr addr, uint8_t *data,
402 unsigned size, unsigned flags)
403 {
404 Fault fault;
405 const int asid = 0;
406 const ThreadID tid = 0;
407 const Addr pc = thread->instAddr();
408 unsigned block_size = dcachePort.peerBlockSize();
409 BaseTLB::Mode mode = BaseTLB::Read;
410
411 if (traceData) {
412 traceData->setAddr(addr);
413 }
414
415 RequestPtr req = new Request(asid, addr, size,
416 flags, dataMasterId(), pc, _cpuId, tid);
417
418 Addr split_addr = roundDown(addr + size - 1, block_size);
419 assert(split_addr <= addr || split_addr - addr < block_size);
420
421 _status = DTBWaitResponse;
422 if (split_addr > addr) {
423 RequestPtr req1, req2;
424 assert(!req->isLLSC() && !req->isSwap());
425 req->splitOnVaddr(split_addr, req1, req2);
426
427 WholeTranslationState *state =
428 new WholeTranslationState(req, req1, req2, new uint8_t[size],
429 NULL, mode);
430 DataTranslation<TimingSimpleCPU *> *trans1 =
431 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
432 DataTranslation<TimingSimpleCPU *> *trans2 =
433 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
434
435 thread->dtb->translateTiming(req1, tc, trans1, mode);
436 thread->dtb->translateTiming(req2, tc, trans2, mode);
437 } else {
438 WholeTranslationState *state =
439 new WholeTranslationState(req, new uint8_t[size], NULL, mode);
440 DataTranslation<TimingSimpleCPU *> *translation
441 = new DataTranslation<TimingSimpleCPU *>(this, state);
442 thread->dtb->translateTiming(req, tc, translation, mode);
443 }
444
445 return NoFault;
446 }
447
448 bool
449 TimingSimpleCPU::handleWritePacket()
450 {
451 RequestPtr req = dcache_pkt->req;
452 if (req->isMmappedIpr()) {
453 Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
454 new IprEvent(dcache_pkt, this, clockEdge(delay));
455 _status = DcacheWaitResponse;
456 dcache_pkt = NULL;
457 } else if (!dcachePort.sendTimingReq(dcache_pkt)) {
458 _status = DcacheRetry;
459 } else {
460 _status = DcacheWaitResponse;
461 // memory system takes ownership of packet
462 dcache_pkt = NULL;
463 }
464 return dcache_pkt == NULL;
465 }
466
467 Fault
468 TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
469 Addr addr, unsigned flags, uint64_t *res)
470 {
471 uint8_t *newData = new uint8_t[size];
472 memcpy(newData, data, size);
473
474 const int asid = 0;
475 const ThreadID tid = 0;
476 const Addr pc = thread->instAddr();
477 unsigned block_size = dcachePort.peerBlockSize();
478 BaseTLB::Mode mode = BaseTLB::Write;
479
480 if (traceData) {
481 traceData->setAddr(addr);
482 }
483
484 RequestPtr req = new Request(asid, addr, size,
485 flags, dataMasterId(), pc, _cpuId, tid);
486
487 Addr split_addr = roundDown(addr + size - 1, block_size);
488 assert(split_addr <= addr || split_addr - addr < block_size);
489
490 _status = DTBWaitResponse;
491 if (split_addr > addr) {
492 RequestPtr req1, req2;
493 assert(!req->isLLSC() && !req->isSwap());
494 req->splitOnVaddr(split_addr, req1, req2);
495
496 WholeTranslationState *state =
497 new WholeTranslationState(req, req1, req2, newData, res, mode);
498 DataTranslation<TimingSimpleCPU *> *trans1 =
499 new DataTranslation<TimingSimpleCPU *>(this, state, 0);
500 DataTranslation<TimingSimpleCPU *> *trans2 =
501 new DataTranslation<TimingSimpleCPU *>(this, state, 1);
502
503 thread->dtb->translateTiming(req1, tc, trans1, mode);
504 thread->dtb->translateTiming(req2, tc, trans2, mode);
505 } else {
506 WholeTranslationState *state =
507 new WholeTranslationState(req, newData, res, mode);
508 DataTranslation<TimingSimpleCPU *> *translation =
509 new DataTranslation<TimingSimpleCPU *>(this, state);
510 thread->dtb->translateTiming(req, tc, translation, mode);
511 }
512
513 // Translation faults will be returned via finishTranslation()
514 return NoFault;
515 }
516
517
518 void
519 TimingSimpleCPU::finishTranslation(WholeTranslationState *state)
520 {
521 _status = BaseSimpleCPU::Running;
522
523 if (state->getFault() != NoFault) {
524 if (state->isPrefetch()) {
525 state->setNoFault();
526 }
527 delete [] state->data;
528 state->deleteReqs();
529 translationFault(state->getFault());
530 } else {
531 if (!state->isSplit) {
532 sendData(state->mainReq, state->data, state->res,
533 state->mode == BaseTLB::Read);
534 } else {
535 sendSplitData(state->sreqLow, state->sreqHigh, state->mainReq,
536 state->data, state->mode == BaseTLB::Read);
537 }
538 }
539
540 delete state;
541 }
542
543
544 void
545 TimingSimpleCPU::fetch()
546 {
547 DPRINTF(SimpleCPU, "Fetch\n");
548
549 if (!curStaticInst || !curStaticInst->isDelayedCommit())
550 checkForInterrupts();
551
552 checkPcEventQueue();
553
554 // We must have just got suspended by a PC event
555 if (_status == Idle)
556 return;
557
558 TheISA::PCState pcState = thread->pcState();
559 bool needToFetch = !isRomMicroPC(pcState.microPC()) && !curMacroStaticInst;
560
561 if (needToFetch) {
562 _status = BaseSimpleCPU::Running;
563 Request *ifetch_req = new Request();
564 ifetch_req->setThreadContext(_cpuId, /* thread ID */ 0);
565 setupFetchRequest(ifetch_req);
566 DPRINTF(SimpleCPU, "Translating address %#x\n", ifetch_req->getVaddr());
567 thread->itb->translateTiming(ifetch_req, tc, &fetchTranslation,
568 BaseTLB::Execute);
569 } else {
570 _status = IcacheWaitResponse;
571 completeIfetch(NULL);
572
573 numCycles += curCycle() - previousCycle;
574 previousCycle = curCycle();
575 }
576 }
577
578
579 void
580 TimingSimpleCPU::sendFetch(Fault fault, RequestPtr req, ThreadContext *tc)
581 {
582 if (fault == NoFault) {
583 DPRINTF(SimpleCPU, "Sending fetch for addr %#x(pa: %#x)\n",
584 req->getVaddr(), req->getPaddr());
585 ifetch_pkt = new Packet(req, MemCmd::ReadReq);
586 ifetch_pkt->dataStatic(&inst);
587 DPRINTF(SimpleCPU, " -- pkt addr: %#x\n", ifetch_pkt->getAddr());
588
589 if (!icachePort.sendTimingReq(ifetch_pkt)) {
590 // Need to wait for retry
591 _status = IcacheRetry;
592 } else {
593 // Need to wait for cache to respond
594 _status = IcacheWaitResponse;
595 // ownership of packet transferred to memory system
596 ifetch_pkt = NULL;
597 }
598 } else {
599 DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
600 delete req;
601 // fetch fault: advance directly to next instruction (fault handler)
602 _status = BaseSimpleCPU::Running;
603 advanceInst(fault);
604 }
605
606 numCycles += curCycle() - previousCycle;
607 previousCycle = curCycle();
608 }
609
610
611 void
612 TimingSimpleCPU::advanceInst(Fault fault)
613 {
614 if (_status == Faulting)
615 return;
616
617 if (fault != NoFault) {
618 advancePC(fault);
619 DPRINTF(SimpleCPU, "Fault occured, scheduling fetch event\n");
620 reschedule(fetchEvent, nextCycle(), true);
621 _status = Faulting;
622 return;
623 }
624
625
626 if (!stayAtPC)
627 advancePC(fault);
628
629 if (tryCompleteDrain())
630 return;
631
632 if (_status == BaseSimpleCPU::Running) {
633 // kick off fetch of next instruction... callback from icache
634 // response will cause that instruction to be executed,
635 // keeping the CPU running.
636 fetch();
637 }
638 }
639
640
641 void
642 TimingSimpleCPU::completeIfetch(PacketPtr pkt)
643 {
644 DPRINTF(SimpleCPU, "Complete ICache Fetch for addr %#x\n", pkt ?
645 pkt->getAddr() : 0);
646
647 // received a response from the icache: execute the received
648 // instruction
649
650 assert(!pkt || !pkt->isError());
651 assert(_status == IcacheWaitResponse);
652
653 _status = BaseSimpleCPU::Running;
654
655 numCycles += curCycle() - previousCycle;
656 previousCycle = curCycle();
657
658 preExecute();
659 if (curStaticInst && curStaticInst->isMemRef()) {
660 // load or store: just send to dcache
661 Fault fault = curStaticInst->initiateAcc(this, traceData);
662
663 // If we're not running now the instruction will complete in a dcache
664 // response callback or the instruction faulted and has started an
665 // ifetch
666 if (_status == BaseSimpleCPU::Running) {
667 if (fault != NoFault && traceData) {
668 // If there was a fault, we shouldn't trace this instruction.
669 delete traceData;
670 traceData = NULL;
671 }
672
673 postExecute();
674 // @todo remove me after debugging with legion done
675 if (curStaticInst && (!curStaticInst->isMicroop() ||
676 curStaticInst->isFirstMicroop()))
677 instCnt++;
678 advanceInst(fault);
679 }
680 } else if (curStaticInst) {
681 // non-memory instruction: execute completely now
682 Fault fault = curStaticInst->execute(this, traceData);
683
684 // keep an instruction count
685 if (fault == NoFault)
686 countInst();
687 else if (traceData && !DTRACE(ExecFaulting)) {
688 delete traceData;
689 traceData = NULL;
690 }
691
692 postExecute();
693 // @todo remove me after debugging with legion done
694 if (curStaticInst && (!curStaticInst->isMicroop() ||
695 curStaticInst->isFirstMicroop()))
696 instCnt++;
697 advanceInst(fault);
698 } else {
699 advanceInst(NoFault);
700 }
701
702 if (pkt) {
703 delete pkt->req;
704 delete pkt;
705 }
706 }
707
708 void
709 TimingSimpleCPU::IcachePort::ITickEvent::process()
710 {
711 cpu->completeIfetch(pkt);
712 }
713
714 bool
715 TimingSimpleCPU::IcachePort::recvTimingResp(PacketPtr pkt)
716 {
717 DPRINTF(SimpleCPU, "Received timing response %#x\n", pkt->getAddr());
718 // delay processing of returned data until next CPU clock edge
719 Tick next_tick = cpu->nextCycle();
720
721 if (next_tick == curTick())
722 cpu->completeIfetch(pkt);
723 else
724 tickEvent.schedule(pkt, next_tick);
725
726 return true;
727 }
728
729 void
730 TimingSimpleCPU::IcachePort::recvRetry()
731 {
732 // we shouldn't get a retry unless we have a packet that we're
733 // waiting to transmit
734 assert(cpu->ifetch_pkt != NULL);
735 assert(cpu->_status == IcacheRetry);
736 PacketPtr tmp = cpu->ifetch_pkt;
737 if (sendTimingReq(tmp)) {
738 cpu->_status = IcacheWaitResponse;
739 cpu->ifetch_pkt = NULL;
740 }
741 }
742
743 void
744 TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
745 {
746 // received a response from the dcache: complete the load or store
747 // instruction
748 assert(!pkt->isError());
749 assert(_status == DcacheWaitResponse || _status == DTBWaitResponse ||
750 pkt->req->getFlags().isSet(Request::NO_ACCESS));
751
752 numCycles += curCycle() - previousCycle;
753 previousCycle = curCycle();
754
755 if (pkt->senderState) {
756 SplitFragmentSenderState * send_state =
757 dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
758 assert(send_state);
759 delete pkt->req;
760 delete pkt;
761 PacketPtr big_pkt = send_state->bigPkt;
762 delete send_state;
763
764 SplitMainSenderState * main_send_state =
765 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
766 assert(main_send_state);
767 // Record the fact that this packet is no longer outstanding.
768 assert(main_send_state->outstanding != 0);
769 main_send_state->outstanding--;
770
771 if (main_send_state->outstanding) {
772 return;
773 } else {
774 delete main_send_state;
775 big_pkt->senderState = NULL;
776 pkt = big_pkt;
777 }
778 }
779
780 _status = BaseSimpleCPU::Running;
781
782 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
783
784 // keep an instruction count
785 if (fault == NoFault)
786 countInst();
787 else if (traceData) {
788 // If there was a fault, we shouldn't trace this instruction.
789 delete traceData;
790 traceData = NULL;
791 }
792
793 // the locked flag may be cleared on the response packet, so check
794 // pkt->req and not pkt to see if it was a load-locked
795 if (pkt->isRead() && pkt->req->isLLSC()) {
796 TheISA::handleLockedRead(thread, pkt->req);
797 }
798
799 delete pkt->req;
800 delete pkt;
801
802 postExecute();
803
804 advanceInst(fault);
805 }
806
807 bool
808 TimingSimpleCPU::DcachePort::recvTimingResp(PacketPtr pkt)
809 {
810 // delay processing of returned data until next CPU clock edge
811 Tick next_tick = cpu->nextCycle();
812
813 if (next_tick == curTick()) {
814 cpu->completeDataAccess(pkt);
815 } else {
816 if (!tickEvent.scheduled()) {
817 tickEvent.schedule(pkt, next_tick);
818 } else {
819 // In the case of a split transaction and a cache that is
820 // faster than a CPU we could get two responses before
821 // next_tick expires
822 if (!retryEvent.scheduled())
823 cpu->schedule(retryEvent, next_tick);
824 return false;
825 }
826 }
827
828 return true;
829 }
830
831 void
832 TimingSimpleCPU::DcachePort::DTickEvent::process()
833 {
834 cpu->completeDataAccess(pkt);
835 }
836
837 void
838 TimingSimpleCPU::DcachePort::recvRetry()
839 {
840 // we shouldn't get a retry unless we have a packet that we're
841 // waiting to transmit
842 assert(cpu->dcache_pkt != NULL);
843 assert(cpu->_status == DcacheRetry);
844 PacketPtr tmp = cpu->dcache_pkt;
845 if (tmp->senderState) {
846 // This is a packet from a split access.
847 SplitFragmentSenderState * send_state =
848 dynamic_cast<SplitFragmentSenderState *>(tmp->senderState);
849 assert(send_state);
850 PacketPtr big_pkt = send_state->bigPkt;
851
852 SplitMainSenderState * main_send_state =
853 dynamic_cast<SplitMainSenderState *>(big_pkt->senderState);
854 assert(main_send_state);
855
856 if (sendTimingReq(tmp)) {
857 // If we were able to send without retrying, record that fact
858 // and try sending the other fragment.
859 send_state->clearFromParent();
860 int other_index = main_send_state->getPendingFragment();
861 if (other_index > 0) {
862 tmp = main_send_state->fragments[other_index];
863 cpu->dcache_pkt = tmp;
864 if ((big_pkt->isRead() && cpu->handleReadPacket(tmp)) ||
865 (big_pkt->isWrite() && cpu->handleWritePacket())) {
866 main_send_state->fragments[other_index] = NULL;
867 }
868 } else {
869 cpu->_status = DcacheWaitResponse;
870 // memory system takes ownership of packet
871 cpu->dcache_pkt = NULL;
872 }
873 }
874 } else if (sendTimingReq(tmp)) {
875 cpu->_status = DcacheWaitResponse;
876 // memory system takes ownership of packet
877 cpu->dcache_pkt = NULL;
878 }
879 }
880
881 TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
882 Tick t)
883 : pkt(_pkt), cpu(_cpu)
884 {
885 cpu->schedule(this, t);
886 }
887
888 void
889 TimingSimpleCPU::IprEvent::process()
890 {
891 cpu->completeDataAccess(pkt);
892 }
893
894 const char *
895 TimingSimpleCPU::IprEvent::description() const
896 {
897 return "Timing Simple CPU Delay IPR event";
898 }
899
900
901 void
902 TimingSimpleCPU::printAddr(Addr a)
903 {
904 dcachePort.printAddr(a);
905 }
906
907
908 ////////////////////////////////////////////////////////////////////////
909 //
910 // TimingSimpleCPU Simulation Object
911 //
912 TimingSimpleCPU *
913 TimingSimpleCPUParams::create()
914 {
915 numThreads = 1;
916 if (!FullSystem && workload.size() != 1)
917 panic("only one workload allowed");
918 return new TimingSimpleCPU(this);
919 }