CPU: The API change to EventWrapper did not get propagated to the entirety of Timing...
[gem5.git] / src / cpu / simple / timing.cc
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31 #include "arch/locked_mem.hh"
32 #include "arch/mmaped_ipr.hh"
33 #include "arch/utility.hh"
34 #include "base/bigint.hh"
35 #include "cpu/exetrace.hh"
36 #include "cpu/simple/timing.hh"
37 #include "mem/packet.hh"
38 #include "mem/packet_access.hh"
39 #include "params/TimingSimpleCPU.hh"
40 #include "sim/system.hh"
41
42 using namespace std;
43 using namespace TheISA;
44
45 Port *
46 TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47 {
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54 }
55
56 void
57 TimingSimpleCPU::init()
58 {
59 BaseCPU::init();
60 cpuId = tc->readCpuId();
61 #if FULL_SYSTEM
62 for (int i = 0; i < threadContexts.size(); ++i) {
63 ThreadContext *tc = threadContexts[i];
64
65 // initialize CPU, including PC
66 TheISA::initCPU(tc, cpuId);
67 }
68 #endif
69 }
70
71 Tick
72 TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
73 {
74 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
75 return curTick;
76 }
77
78 void
79 TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
80 {
81 //No internal storage to update, jusst return
82 return;
83 }
84
85 void
86 TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
87 {
88 if (status == RangeChange) {
89 if (!snoopRangeSent) {
90 snoopRangeSent = true;
91 sendStatusChange(Port::RangeChange);
92 }
93 return;
94 }
95
96 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
97 }
98
99
100 void
101 TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
102 {
103 pkt = _pkt;
104 cpu->schedule(this, t);
105 }
106
107 TimingSimpleCPU::TimingSimpleCPU(TimingSimpleCPUParams *p)
108 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock), fetchEvent(this)
109 {
110 _status = Idle;
111
112 icachePort.snoopRangeSent = false;
113 dcachePort.snoopRangeSent = false;
114
115 ifetch_pkt = dcache_pkt = NULL;
116 drainEvent = NULL;
117 previousTick = 0;
118 changeState(SimObject::Running);
119 }
120
121
122 TimingSimpleCPU::~TimingSimpleCPU()
123 {
124 }
125
126 void
127 TimingSimpleCPU::serialize(ostream &os)
128 {
129 SimObject::State so_state = SimObject::getState();
130 SERIALIZE_ENUM(so_state);
131 BaseSimpleCPU::serialize(os);
132 }
133
134 void
135 TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
136 {
137 SimObject::State so_state;
138 UNSERIALIZE_ENUM(so_state);
139 BaseSimpleCPU::unserialize(cp, section);
140 }
141
142 unsigned int
143 TimingSimpleCPU::drain(Event *drain_event)
144 {
145 // TimingSimpleCPU is ready to drain if it's not waiting for
146 // an access to complete.
147 if (_status == Idle || _status == Running || _status == SwitchedOut) {
148 changeState(SimObject::Drained);
149 return 0;
150 } else {
151 changeState(SimObject::Draining);
152 drainEvent = drain_event;
153 return 1;
154 }
155 }
156
157 void
158 TimingSimpleCPU::resume()
159 {
160 DPRINTF(SimpleCPU, "Resume\n");
161 if (_status != SwitchedOut && _status != Idle) {
162 assert(system->getMemoryMode() == Enums::timing);
163
164 if (fetchEvent.scheduled())
165 deschedule(fetchEvent);
166
167 schedule(fetchEvent, nextCycle());
168 }
169
170 changeState(SimObject::Running);
171 }
172
173 void
174 TimingSimpleCPU::switchOut()
175 {
176 assert(_status == Running || _status == Idle);
177 _status = SwitchedOut;
178 numCycles += tickToCycles(curTick - previousTick);
179
180 // If we've been scheduled to resume but are then told to switch out,
181 // we'll need to cancel it.
182 if (fetchEvent.scheduled())
183 deschedule(fetchEvent);
184 }
185
186
187 void
188 TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
189 {
190 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
191
192 // if any of this CPU's ThreadContexts are active, mark the CPU as
193 // running and schedule its tick event.
194 for (int i = 0; i < threadContexts.size(); ++i) {
195 ThreadContext *tc = threadContexts[i];
196 if (tc->status() == ThreadContext::Active && _status != Running) {
197 _status = Running;
198 break;
199 }
200 }
201
202 if (_status != Running) {
203 _status = Idle;
204 }
205 assert(threadContexts.size() == 1);
206 cpuId = tc->readCpuId();
207 previousTick = curTick;
208 }
209
210
211 void
212 TimingSimpleCPU::activateContext(int thread_num, int delay)
213 {
214 DPRINTF(SimpleCPU, "ActivateContext %d (%d cycles)\n", thread_num, delay);
215
216 assert(thread_num == 0);
217 assert(thread);
218
219 assert(_status == Idle);
220
221 notIdleFraction++;
222 _status = Running;
223
224 // kick things off by initiating the fetch of the next instruction
225 schedule(fetchEvent, nextCycle(curTick + ticks(delay)));
226 }
227
228
229 void
230 TimingSimpleCPU::suspendContext(int thread_num)
231 {
232 DPRINTF(SimpleCPU, "SuspendContext %d\n", thread_num);
233
234 assert(thread_num == 0);
235 assert(thread);
236
237 assert(_status == Running);
238
239 // just change status to Idle... if status != Running,
240 // completeInst() will not initiate fetch of next instruction.
241
242 notIdleFraction--;
243 _status = Idle;
244 }
245
246
247 template <class T>
248 Fault
249 TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
250 {
251 Request *req =
252 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
253 cpuId, /* thread ID */ 0);
254
255 if (traceData) {
256 traceData->setAddr(req->getVaddr());
257 }
258
259 // translate to physical address
260 Fault fault = thread->translateDataReadReq(req);
261
262 // Now do the access.
263 if (fault == NoFault) {
264 PacketPtr pkt =
265 new Packet(req,
266 (req->isLocked() ?
267 MemCmd::LoadLockedReq : MemCmd::ReadReq),
268 Packet::Broadcast);
269 pkt->dataDynamic<T>(new T);
270
271 if (req->isMmapedIpr()) {
272 Tick delay;
273 delay = TheISA::handleIprRead(thread->getTC(), pkt);
274 new IprEvent(pkt, this, nextCycle(curTick + delay));
275 _status = DcacheWaitResponse;
276 dcache_pkt = NULL;
277 } else if (!dcachePort.sendTiming(pkt)) {
278 _status = DcacheRetry;
279 dcache_pkt = pkt;
280 } else {
281 _status = DcacheWaitResponse;
282 // memory system takes ownership of packet
283 dcache_pkt = NULL;
284 }
285
286 // This will need a new way to tell if it has a dcache attached.
287 if (req->isUncacheable())
288 recordEvent("Uncached Read");
289 } else {
290 delete req;
291 }
292
293 if (traceData) {
294 traceData->setData(data);
295 }
296 return fault;
297 }
298
299 Fault
300 TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr,
301 int size, unsigned flags)
302 {
303 Request *req =
304 new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0);
305
306 if (traceData) {
307 traceData->setAddr(vaddr);
308 }
309
310 Fault fault = thread->translateDataWriteReq(req);
311
312 if (fault == NoFault)
313 paddr = req->getPaddr();
314
315 delete req;
316 return fault;
317 }
318
319 #ifndef DOXYGEN_SHOULD_SKIP_THIS
320
321 template
322 Fault
323 TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
324
325 template
326 Fault
327 TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
328
329 template
330 Fault
331 TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
332
333 template
334 Fault
335 TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
336
337 template
338 Fault
339 TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
340
341 template
342 Fault
343 TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
344
345 #endif //DOXYGEN_SHOULD_SKIP_THIS
346
347 template<>
348 Fault
349 TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
350 {
351 return read(addr, *(uint64_t*)&data, flags);
352 }
353
354 template<>
355 Fault
356 TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
357 {
358 return read(addr, *(uint32_t*)&data, flags);
359 }
360
361
362 template<>
363 Fault
364 TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
365 {
366 return read(addr, (uint32_t&)data, flags);
367 }
368
369
370 template <class T>
371 Fault
372 TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
373 {
374 Request *req =
375 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
376 cpuId, /* thread ID */ 0);
377
378 if (traceData) {
379 traceData->setAddr(req->getVaddr());
380 }
381
382 // translate to physical address
383 Fault fault = thread->translateDataWriteReq(req);
384
385 // Now do the access.
386 if (fault == NoFault) {
387 MemCmd cmd = MemCmd::WriteReq; // default
388 bool do_access = true; // flag to suppress cache access
389
390 if (req->isLocked()) {
391 cmd = MemCmd::StoreCondReq;
392 do_access = TheISA::handleLockedWrite(thread, req);
393 } else if (req->isSwap()) {
394 cmd = MemCmd::SwapReq;
395 if (req->isCondSwap()) {
396 assert(res);
397 req->setExtraData(*res);
398 }
399 }
400
401 // Note: need to allocate dcache_pkt even if do_access is
402 // false, as it's used unconditionally to call completeAcc().
403 assert(dcache_pkt == NULL);
404 dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
405 dcache_pkt->allocate();
406 dcache_pkt->set(data);
407
408 if (do_access) {
409 if (req->isMmapedIpr()) {
410 Tick delay;
411 dcache_pkt->set(htog(data));
412 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
413 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
414 _status = DcacheWaitResponse;
415 dcache_pkt = NULL;
416 } else if (!dcachePort.sendTiming(dcache_pkt)) {
417 _status = DcacheRetry;
418 } else {
419 _status = DcacheWaitResponse;
420 // memory system takes ownership of packet
421 dcache_pkt = NULL;
422 }
423 }
424 // This will need a new way to tell if it's hooked up to a cache or not.
425 if (req->isUncacheable())
426 recordEvent("Uncached Write");
427 } else {
428 delete req;
429 }
430
431 if (traceData) {
432 traceData->setData(data);
433 }
434
435 // If the write needs to have a fault on the access, consider calling
436 // changeStatus() and changing it to "bad addr write" or something.
437 return fault;
438 }
439
440 Fault
441 TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr,
442 int size, unsigned flags)
443 {
444 Request *req =
445 new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0);
446
447 if (traceData) {
448 traceData->setAddr(vaddr);
449 }
450
451 Fault fault = thread->translateDataWriteReq(req);
452
453 if (fault == NoFault)
454 paddr = req->getPaddr();
455
456 delete req;
457 return fault;
458 }
459
460
461 #ifndef DOXYGEN_SHOULD_SKIP_THIS
462 template
463 Fault
464 TimingSimpleCPU::write(Twin32_t data, Addr addr,
465 unsigned flags, uint64_t *res);
466
467 template
468 Fault
469 TimingSimpleCPU::write(Twin64_t data, Addr addr,
470 unsigned flags, uint64_t *res);
471
472 template
473 Fault
474 TimingSimpleCPU::write(uint64_t data, Addr addr,
475 unsigned flags, uint64_t *res);
476
477 template
478 Fault
479 TimingSimpleCPU::write(uint32_t data, Addr addr,
480 unsigned flags, uint64_t *res);
481
482 template
483 Fault
484 TimingSimpleCPU::write(uint16_t data, Addr addr,
485 unsigned flags, uint64_t *res);
486
487 template
488 Fault
489 TimingSimpleCPU::write(uint8_t data, Addr addr,
490 unsigned flags, uint64_t *res);
491
492 #endif //DOXYGEN_SHOULD_SKIP_THIS
493
494 template<>
495 Fault
496 TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
497 {
498 return write(*(uint64_t*)&data, addr, flags, res);
499 }
500
501 template<>
502 Fault
503 TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
504 {
505 return write(*(uint32_t*)&data, addr, flags, res);
506 }
507
508
509 template<>
510 Fault
511 TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
512 {
513 return write((uint32_t)data, addr, flags, res);
514 }
515
516
517 void
518 TimingSimpleCPU::fetch()
519 {
520 DPRINTF(SimpleCPU, "Fetch\n");
521
522 if (!curStaticInst || !curStaticInst->isDelayedCommit())
523 checkForInterrupts();
524
525 checkPcEventQueue();
526
527 bool fromRom = isRomMicroPC(thread->readMicroPC());
528
529 if (!fromRom) {
530 Request *ifetch_req = new Request();
531 ifetch_req->setThreadContext(cpuId, /* thread ID */ 0);
532 Fault fault = setupFetchRequest(ifetch_req);
533
534 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
535 ifetch_pkt->dataStatic(&inst);
536
537 if (fault == NoFault) {
538 if (!icachePort.sendTiming(ifetch_pkt)) {
539 // Need to wait for retry
540 _status = IcacheRetry;
541 } else {
542 // Need to wait for cache to respond
543 _status = IcacheWaitResponse;
544 // ownership of packet transferred to memory system
545 ifetch_pkt = NULL;
546 }
547 } else {
548 delete ifetch_req;
549 delete ifetch_pkt;
550 // fetch fault: advance directly to next instruction (fault handler)
551 advanceInst(fault);
552 }
553 } else {
554 _status = IcacheWaitResponse;
555 completeIfetch(NULL);
556 }
557
558 numCycles += tickToCycles(curTick - previousTick);
559 previousTick = curTick;
560 }
561
562
563 void
564 TimingSimpleCPU::advanceInst(Fault fault)
565 {
566 advancePC(fault);
567
568 if (_status == Running) {
569 // kick off fetch of next instruction... callback from icache
570 // response will cause that instruction to be executed,
571 // keeping the CPU running.
572 fetch();
573 }
574 }
575
576
577 void
578 TimingSimpleCPU::completeIfetch(PacketPtr pkt)
579 {
580 DPRINTF(SimpleCPU, "Complete ICache Fetch\n");
581
582 // received a response from the icache: execute the received
583 // instruction
584
585 assert(!pkt || !pkt->isError());
586 assert(_status == IcacheWaitResponse);
587
588 _status = Running;
589
590 numCycles += tickToCycles(curTick - previousTick);
591 previousTick = curTick;
592
593 if (getState() == SimObject::Draining) {
594 if (pkt) {
595 delete pkt->req;
596 delete pkt;
597 }
598
599 completeDrain();
600 return;
601 }
602
603 preExecute();
604 if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
605 // load or store: just send to dcache
606 Fault fault = curStaticInst->initiateAcc(this, traceData);
607 if (_status != Running) {
608 // instruction will complete in dcache response callback
609 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
610 assert(fault == NoFault);
611 } else {
612 if (fault == NoFault) {
613 // Note that ARM can have NULL packets if the instruction gets
614 // squashed due to predication
615 // early fail on store conditional: complete now
616 assert(dcache_pkt != NULL || THE_ISA == ARM_ISA);
617
618 fault = curStaticInst->completeAcc(dcache_pkt, this,
619 traceData);
620 if (dcache_pkt != NULL)
621 {
622 delete dcache_pkt->req;
623 delete dcache_pkt;
624 dcache_pkt = NULL;
625 }
626
627 // keep an instruction count
628 if (fault == NoFault)
629 countInst();
630 } else if (traceData) {
631 // If there was a fault, we shouldn't trace this instruction.
632 delete traceData;
633 traceData = NULL;
634 }
635
636 postExecute();
637 // @todo remove me after debugging with legion done
638 if (curStaticInst && (!curStaticInst->isMicroop() ||
639 curStaticInst->isFirstMicroop()))
640 instCnt++;
641 advanceInst(fault);
642 }
643 } else {
644 // non-memory instruction: execute completely now
645 Fault fault = curStaticInst->execute(this, traceData);
646
647 // keep an instruction count
648 if (fault == NoFault)
649 countInst();
650 else if (traceData) {
651 // If there was a fault, we shouldn't trace this instruction.
652 delete traceData;
653 traceData = NULL;
654 }
655
656 postExecute();
657 // @todo remove me after debugging with legion done
658 if (curStaticInst && (!curStaticInst->isMicroop() ||
659 curStaticInst->isFirstMicroop()))
660 instCnt++;
661 advanceInst(fault);
662 }
663
664 if (pkt) {
665 delete pkt->req;
666 delete pkt;
667 }
668 }
669
670 void
671 TimingSimpleCPU::IcachePort::ITickEvent::process()
672 {
673 cpu->completeIfetch(pkt);
674 }
675
676 bool
677 TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
678 {
679 if (pkt->isResponse() && !pkt->wasNacked()) {
680 // delay processing of returned data until next CPU clock edge
681 Tick next_tick = cpu->nextCycle(curTick);
682
683 if (next_tick == curTick)
684 cpu->completeIfetch(pkt);
685 else
686 tickEvent.schedule(pkt, next_tick);
687
688 return true;
689 }
690 else if (pkt->wasNacked()) {
691 assert(cpu->_status == IcacheWaitResponse);
692 pkt->reinitNacked();
693 if (!sendTiming(pkt)) {
694 cpu->_status = IcacheRetry;
695 cpu->ifetch_pkt = pkt;
696 }
697 }
698 //Snooping a Coherence Request, do nothing
699 return true;
700 }
701
702 void
703 TimingSimpleCPU::IcachePort::recvRetry()
704 {
705 // we shouldn't get a retry unless we have a packet that we're
706 // waiting to transmit
707 assert(cpu->ifetch_pkt != NULL);
708 assert(cpu->_status == IcacheRetry);
709 PacketPtr tmp = cpu->ifetch_pkt;
710 if (sendTiming(tmp)) {
711 cpu->_status = IcacheWaitResponse;
712 cpu->ifetch_pkt = NULL;
713 }
714 }
715
716 void
717 TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
718 {
719 // received a response from the dcache: complete the load or store
720 // instruction
721 assert(!pkt->isError());
722 assert(_status == DcacheWaitResponse);
723 _status = Running;
724
725 numCycles += tickToCycles(curTick - previousTick);
726 previousTick = curTick;
727
728 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
729
730 // keep an instruction count
731 if (fault == NoFault)
732 countInst();
733 else if (traceData) {
734 // If there was a fault, we shouldn't trace this instruction.
735 delete traceData;
736 traceData = NULL;
737 }
738
739 // the locked flag may be cleared on the response packet, so check
740 // pkt->req and not pkt to see if it was a load-locked
741 if (pkt->isRead() && pkt->req->isLocked()) {
742 TheISA::handleLockedRead(thread, pkt->req);
743 }
744
745 delete pkt->req;
746 delete pkt;
747
748 postExecute();
749
750 if (getState() == SimObject::Draining) {
751 advancePC(fault);
752 completeDrain();
753
754 return;
755 }
756
757 advanceInst(fault);
758 }
759
760
761 void
762 TimingSimpleCPU::completeDrain()
763 {
764 DPRINTF(Config, "Done draining\n");
765 changeState(SimObject::Drained);
766 drainEvent->process();
767 }
768
769 void
770 TimingSimpleCPU::DcachePort::setPeer(Port *port)
771 {
772 Port::setPeer(port);
773
774 #if FULL_SYSTEM
775 // Update the ThreadContext's memory ports (Functional/Virtual
776 // Ports)
777 cpu->tcBase()->connectMemPorts(cpu->tcBase());
778 #endif
779 }
780
781 bool
782 TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
783 {
784 if (pkt->isResponse() && !pkt->wasNacked()) {
785 // delay processing of returned data until next CPU clock edge
786 Tick next_tick = cpu->nextCycle(curTick);
787
788 if (next_tick == curTick)
789 cpu->completeDataAccess(pkt);
790 else
791 tickEvent.schedule(pkt, next_tick);
792
793 return true;
794 }
795 else if (pkt->wasNacked()) {
796 assert(cpu->_status == DcacheWaitResponse);
797 pkt->reinitNacked();
798 if (!sendTiming(pkt)) {
799 cpu->_status = DcacheRetry;
800 cpu->dcache_pkt = pkt;
801 }
802 }
803 //Snooping a Coherence Request, do nothing
804 return true;
805 }
806
807 void
808 TimingSimpleCPU::DcachePort::DTickEvent::process()
809 {
810 cpu->completeDataAccess(pkt);
811 }
812
813 void
814 TimingSimpleCPU::DcachePort::recvRetry()
815 {
816 // we shouldn't get a retry unless we have a packet that we're
817 // waiting to transmit
818 assert(cpu->dcache_pkt != NULL);
819 assert(cpu->_status == DcacheRetry);
820 PacketPtr tmp = cpu->dcache_pkt;
821 if (sendTiming(tmp)) {
822 cpu->_status = DcacheWaitResponse;
823 // memory system takes ownership of packet
824 cpu->dcache_pkt = NULL;
825 }
826 }
827
828 TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu,
829 Tick t)
830 : pkt(_pkt), cpu(_cpu)
831 {
832 cpu->schedule(this, t);
833 }
834
835 void
836 TimingSimpleCPU::IprEvent::process()
837 {
838 cpu->completeDataAccess(pkt);
839 }
840
841 const char *
842 TimingSimpleCPU::IprEvent::description() const
843 {
844 return "Timing Simple CPU Delay IPR event";
845 }
846
847
848 void
849 TimingSimpleCPU::printAddr(Addr a)
850 {
851 dcachePort.printAddr(a);
852 }
853
854
855 ////////////////////////////////////////////////////////////////////////
856 //
857 // TimingSimpleCPU Simulation Object
858 //
859 TimingSimpleCPU *
860 TimingSimpleCPUParams::create()
861 {
862 numThreads = 1;
863 #if !FULL_SYSTEM
864 if (workload.size() != 1)
865 panic("only one workload allowed");
866 #endif
867 return new TimingSimpleCPU(this);
868 }