Merge with head.
[gem5.git] / src / cpu / simple / timing.cc
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Steve Reinhardt
29 */
30
31 #include "arch/locked_mem.hh"
32 #include "arch/mmaped_ipr.hh"
33 #include "arch/utility.hh"
34 #include "base/bigint.hh"
35 #include "cpu/exetrace.hh"
36 #include "cpu/simple/timing.hh"
37 #include "mem/packet.hh"
38 #include "mem/packet_access.hh"
39 #include "params/TimingSimpleCPU.hh"
40 #include "sim/system.hh"
41
42 using namespace std;
43 using namespace TheISA;
44
45 Port *
46 TimingSimpleCPU::getPort(const std::string &if_name, int idx)
47 {
48 if (if_name == "dcache_port")
49 return &dcachePort;
50 else if (if_name == "icache_port")
51 return &icachePort;
52 else
53 panic("No Such Port\n");
54 }
55
56 void
57 TimingSimpleCPU::init()
58 {
59 BaseCPU::init();
60 #if FULL_SYSTEM
61 for (int i = 0; i < threadContexts.size(); ++i) {
62 ThreadContext *tc = threadContexts[i];
63
64 // initialize CPU, including PC
65 TheISA::initCPU(tc, tc->readCpuId());
66 }
67 #endif
68 }
69
70 Tick
71 TimingSimpleCPU::CpuPort::recvAtomic(PacketPtr pkt)
72 {
73 panic("TimingSimpleCPU doesn't expect recvAtomic callback!");
74 return curTick;
75 }
76
77 void
78 TimingSimpleCPU::CpuPort::recvFunctional(PacketPtr pkt)
79 {
80 //No internal storage to update, jusst return
81 return;
82 }
83
84 void
85 TimingSimpleCPU::CpuPort::recvStatusChange(Status status)
86 {
87 if (status == RangeChange) {
88 if (!snoopRangeSent) {
89 snoopRangeSent = true;
90 sendStatusChange(Port::RangeChange);
91 }
92 return;
93 }
94
95 panic("TimingSimpleCPU doesn't expect recvStatusChange callback!");
96 }
97
98
99 void
100 TimingSimpleCPU::CpuPort::TickEvent::schedule(PacketPtr _pkt, Tick t)
101 {
102 pkt = _pkt;
103 Event::schedule(t);
104 }
105
106 TimingSimpleCPU::TimingSimpleCPU(Params *p)
107 : BaseSimpleCPU(p), icachePort(this, p->clock), dcachePort(this, p->clock)
108 {
109 _status = Idle;
110
111 icachePort.snoopRangeSent = false;
112 dcachePort.snoopRangeSent = false;
113
114 ifetch_pkt = dcache_pkt = NULL;
115 drainEvent = NULL;
116 fetchEvent = NULL;
117 previousTick = 0;
118 changeState(SimObject::Running);
119 }
120
121
122 TimingSimpleCPU::~TimingSimpleCPU()
123 {
124 }
125
126 void
127 TimingSimpleCPU::serialize(ostream &os)
128 {
129 SimObject::State so_state = SimObject::getState();
130 SERIALIZE_ENUM(so_state);
131 BaseSimpleCPU::serialize(os);
132 }
133
134 void
135 TimingSimpleCPU::unserialize(Checkpoint *cp, const string &section)
136 {
137 SimObject::State so_state;
138 UNSERIALIZE_ENUM(so_state);
139 BaseSimpleCPU::unserialize(cp, section);
140 }
141
142 unsigned int
143 TimingSimpleCPU::drain(Event *drain_event)
144 {
145 // TimingSimpleCPU is ready to drain if it's not waiting for
146 // an access to complete.
147 if (status() == Idle || status() == Running || status() == SwitchedOut) {
148 changeState(SimObject::Drained);
149 return 0;
150 } else {
151 changeState(SimObject::Draining);
152 drainEvent = drain_event;
153 return 1;
154 }
155 }
156
157 void
158 TimingSimpleCPU::resume()
159 {
160 if (_status != SwitchedOut && _status != Idle) {
161 assert(system->getMemoryMode() == Enums::timing);
162
163 // Delete the old event if it existed.
164 if (fetchEvent) {
165 if (fetchEvent->scheduled())
166 fetchEvent->deschedule();
167
168 delete fetchEvent;
169 }
170
171 fetchEvent = new FetchEvent(this, nextCycle());
172 }
173
174 changeState(SimObject::Running);
175 }
176
177 void
178 TimingSimpleCPU::switchOut()
179 {
180 assert(status() == Running || status() == Idle);
181 _status = SwitchedOut;
182 numCycles += tickToCycles(curTick - previousTick);
183
184 // If we've been scheduled to resume but are then told to switch out,
185 // we'll need to cancel it.
186 if (fetchEvent && fetchEvent->scheduled())
187 fetchEvent->deschedule();
188 }
189
190
191 void
192 TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
193 {
194 BaseCPU::takeOverFrom(oldCPU, &icachePort, &dcachePort);
195
196 // if any of this CPU's ThreadContexts are active, mark the CPU as
197 // running and schedule its tick event.
198 for (int i = 0; i < threadContexts.size(); ++i) {
199 ThreadContext *tc = threadContexts[i];
200 if (tc->status() == ThreadContext::Active && _status != Running) {
201 _status = Running;
202 break;
203 }
204 }
205
206 if (_status != Running) {
207 _status = Idle;
208 }
209 assert(threadContexts.size() == 1);
210 cpuId = tc->readCpuId();
211 previousTick = curTick;
212 }
213
214
215 void
216 TimingSimpleCPU::activateContext(int thread_num, int delay)
217 {
218 assert(thread_num == 0);
219 assert(thread);
220
221 assert(_status == Idle);
222
223 notIdleFraction++;
224 _status = Running;
225
226 // kick things off by initiating the fetch of the next instruction
227 fetchEvent = new FetchEvent(this, nextCycle(curTick + ticks(delay)));
228 }
229
230
231 void
232 TimingSimpleCPU::suspendContext(int thread_num)
233 {
234 assert(thread_num == 0);
235 assert(thread);
236
237 assert(_status == Running);
238
239 // just change status to Idle... if status != Running,
240 // completeInst() will not initiate fetch of next instruction.
241
242 notIdleFraction--;
243 _status = Idle;
244 }
245
246
247 template <class T>
248 Fault
249 TimingSimpleCPU::read(Addr addr, T &data, unsigned flags)
250 {
251 Request *req =
252 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
253 cpuId, /* thread ID */ 0);
254
255 if (traceData) {
256 traceData->setAddr(req->getVaddr());
257 }
258
259 // translate to physical address
260 Fault fault = thread->translateDataReadReq(req);
261
262 // Now do the access.
263 if (fault == NoFault) {
264 PacketPtr pkt =
265 new Packet(req,
266 (req->isLocked() ?
267 MemCmd::LoadLockedReq : MemCmd::ReadReq),
268 Packet::Broadcast);
269 pkt->dataDynamic<T>(new T);
270
271 if (req->isMmapedIpr()) {
272 Tick delay;
273 delay = TheISA::handleIprRead(thread->getTC(), pkt);
274 new IprEvent(pkt, this, nextCycle(curTick + delay));
275 _status = DcacheWaitResponse;
276 dcache_pkt = NULL;
277 } else if (!dcachePort.sendTiming(pkt)) {
278 _status = DcacheRetry;
279 dcache_pkt = pkt;
280 } else {
281 _status = DcacheWaitResponse;
282 // memory system takes ownership of packet
283 dcache_pkt = NULL;
284 }
285
286 // This will need a new way to tell if it has a dcache attached.
287 if (req->isUncacheable())
288 recordEvent("Uncached Read");
289 } else {
290 delete req;
291 }
292
293 return fault;
294 }
295
296 Fault
297 TimingSimpleCPU::translateDataReadAddr(Addr vaddr, Addr &paddr,
298 int size, unsigned flags)
299 {
300 Request *req =
301 new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0);
302
303 if (traceData) {
304 traceData->setAddr(vaddr);
305 }
306
307 Fault fault = thread->translateDataWriteReq(req);
308
309 if (fault == NoFault)
310 paddr = req->getPaddr();
311
312 delete req;
313 return fault;
314 }
315
316 #ifndef DOXYGEN_SHOULD_SKIP_THIS
317
318 template
319 Fault
320 TimingSimpleCPU::read(Addr addr, Twin64_t &data, unsigned flags);
321
322 template
323 Fault
324 TimingSimpleCPU::read(Addr addr, Twin32_t &data, unsigned flags);
325
326 template
327 Fault
328 TimingSimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
329
330 template
331 Fault
332 TimingSimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
333
334 template
335 Fault
336 TimingSimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
337
338 template
339 Fault
340 TimingSimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
341
342 #endif //DOXYGEN_SHOULD_SKIP_THIS
343
344 template<>
345 Fault
346 TimingSimpleCPU::read(Addr addr, double &data, unsigned flags)
347 {
348 return read(addr, *(uint64_t*)&data, flags);
349 }
350
351 template<>
352 Fault
353 TimingSimpleCPU::read(Addr addr, float &data, unsigned flags)
354 {
355 return read(addr, *(uint32_t*)&data, flags);
356 }
357
358
359 template<>
360 Fault
361 TimingSimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
362 {
363 return read(addr, (uint32_t&)data, flags);
364 }
365
366
367 template <class T>
368 Fault
369 TimingSimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
370 {
371 Request *req =
372 new Request(/* asid */ 0, addr, sizeof(T), flags, thread->readPC(),
373 cpuId, /* thread ID */ 0);
374
375 if (traceData) {
376 traceData->setAddr(req->getVaddr());
377 }
378
379 // translate to physical address
380 Fault fault = thread->translateDataWriteReq(req);
381
382 // Now do the access.
383 if (fault == NoFault) {
384 MemCmd cmd = MemCmd::WriteReq; // default
385 bool do_access = true; // flag to suppress cache access
386
387 if (req->isLocked()) {
388 cmd = MemCmd::StoreCondReq;
389 do_access = TheISA::handleLockedWrite(thread, req);
390 } else if (req->isSwap()) {
391 cmd = MemCmd::SwapReq;
392 if (req->isCondSwap()) {
393 assert(res);
394 req->setExtraData(*res);
395 }
396 }
397
398 // Note: need to allocate dcache_pkt even if do_access is
399 // false, as it's used unconditionally to call completeAcc().
400 assert(dcache_pkt == NULL);
401 dcache_pkt = new Packet(req, cmd, Packet::Broadcast);
402 dcache_pkt->allocate();
403 dcache_pkt->set(data);
404
405 if (do_access) {
406 if (req->isMmapedIpr()) {
407 Tick delay;
408 dcache_pkt->set(htog(data));
409 delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
410 new IprEvent(dcache_pkt, this, nextCycle(curTick + delay));
411 _status = DcacheWaitResponse;
412 dcache_pkt = NULL;
413 } else if (!dcachePort.sendTiming(dcache_pkt)) {
414 _status = DcacheRetry;
415 } else {
416 _status = DcacheWaitResponse;
417 // memory system takes ownership of packet
418 dcache_pkt = NULL;
419 }
420 }
421 // This will need a new way to tell if it's hooked up to a cache or not.
422 if (req->isUncacheable())
423 recordEvent("Uncached Write");
424 } else {
425 delete req;
426 }
427
428
429 // If the write needs to have a fault on the access, consider calling
430 // changeStatus() and changing it to "bad addr write" or something.
431 return fault;
432 }
433
434 Fault
435 TimingSimpleCPU::translateDataWriteAddr(Addr vaddr, Addr &paddr,
436 int size, unsigned flags)
437 {
438 Request *req =
439 new Request(0, vaddr, size, flags, thread->readPC(), cpuId, 0);
440
441 if (traceData) {
442 traceData->setAddr(vaddr);
443 }
444
445 Fault fault = thread->translateDataWriteReq(req);
446
447 if (fault == NoFault)
448 paddr = req->getPaddr();
449
450 delete req;
451 return fault;
452 }
453
454
455 #ifndef DOXYGEN_SHOULD_SKIP_THIS
456 template
457 Fault
458 TimingSimpleCPU::write(Twin32_t data, Addr addr,
459 unsigned flags, uint64_t *res);
460
461 template
462 Fault
463 TimingSimpleCPU::write(Twin64_t data, Addr addr,
464 unsigned flags, uint64_t *res);
465
466 template
467 Fault
468 TimingSimpleCPU::write(uint64_t data, Addr addr,
469 unsigned flags, uint64_t *res);
470
471 template
472 Fault
473 TimingSimpleCPU::write(uint32_t data, Addr addr,
474 unsigned flags, uint64_t *res);
475
476 template
477 Fault
478 TimingSimpleCPU::write(uint16_t data, Addr addr,
479 unsigned flags, uint64_t *res);
480
481 template
482 Fault
483 TimingSimpleCPU::write(uint8_t data, Addr addr,
484 unsigned flags, uint64_t *res);
485
486 #endif //DOXYGEN_SHOULD_SKIP_THIS
487
488 template<>
489 Fault
490 TimingSimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
491 {
492 return write(*(uint64_t*)&data, addr, flags, res);
493 }
494
495 template<>
496 Fault
497 TimingSimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
498 {
499 return write(*(uint32_t*)&data, addr, flags, res);
500 }
501
502
503 template<>
504 Fault
505 TimingSimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
506 {
507 return write((uint32_t)data, addr, flags, res);
508 }
509
510
511 void
512 TimingSimpleCPU::fetch()
513 {
514 if (!curStaticInst || !curStaticInst->isDelayedCommit())
515 checkForInterrupts();
516
517 Request *ifetch_req = new Request();
518 ifetch_req->setThreadContext(cpuId, /* thread ID */ 0);
519 Fault fault = setupFetchRequest(ifetch_req);
520
521 ifetch_pkt = new Packet(ifetch_req, MemCmd::ReadReq, Packet::Broadcast);
522 ifetch_pkt->dataStatic(&inst);
523
524 if (fault == NoFault) {
525 if (!icachePort.sendTiming(ifetch_pkt)) {
526 // Need to wait for retry
527 _status = IcacheRetry;
528 } else {
529 // Need to wait for cache to respond
530 _status = IcacheWaitResponse;
531 // ownership of packet transferred to memory system
532 ifetch_pkt = NULL;
533 }
534 } else {
535 delete ifetch_req;
536 delete ifetch_pkt;
537 // fetch fault: advance directly to next instruction (fault handler)
538 advanceInst(fault);
539 }
540
541 numCycles += tickToCycles(curTick - previousTick);
542 previousTick = curTick;
543 }
544
545
546 void
547 TimingSimpleCPU::advanceInst(Fault fault)
548 {
549 advancePC(fault);
550
551 if (_status == Running) {
552 // kick off fetch of next instruction... callback from icache
553 // response will cause that instruction to be executed,
554 // keeping the CPU running.
555 fetch();
556 }
557 }
558
559
560 void
561 TimingSimpleCPU::completeIfetch(PacketPtr pkt)
562 {
563 // received a response from the icache: execute the received
564 // instruction
565 assert(!pkt->isError());
566 assert(_status == IcacheWaitResponse);
567
568 _status = Running;
569
570 numCycles += tickToCycles(curTick - previousTick);
571 previousTick = curTick;
572
573 if (getState() == SimObject::Draining) {
574 delete pkt->req;
575 delete pkt;
576
577 completeDrain();
578 return;
579 }
580
581 preExecute();
582 if (curStaticInst->isMemRef() && !curStaticInst->isDataPrefetch()) {
583 // load or store: just send to dcache
584 Fault fault = curStaticInst->initiateAcc(this, traceData);
585 if (_status != Running) {
586 // instruction will complete in dcache response callback
587 assert(_status == DcacheWaitResponse || _status == DcacheRetry);
588 assert(fault == NoFault);
589 } else {
590 if (fault == NoFault) {
591 // early fail on store conditional: complete now
592 assert(dcache_pkt != NULL);
593 fault = curStaticInst->completeAcc(dcache_pkt, this,
594 traceData);
595 delete dcache_pkt->req;
596 delete dcache_pkt;
597 dcache_pkt = NULL;
598
599 // keep an instruction count
600 if (fault == NoFault)
601 countInst();
602 } else if (traceData) {
603 // If there was a fault, we shouldn't trace this instruction.
604 delete traceData;
605 traceData = NULL;
606 }
607
608 postExecute();
609 // @todo remove me after debugging with legion done
610 if (curStaticInst && (!curStaticInst->isMicroop() ||
611 curStaticInst->isFirstMicroop()))
612 instCnt++;
613 advanceInst(fault);
614 }
615 } else {
616 // non-memory instruction: execute completely now
617 Fault fault = curStaticInst->execute(this, traceData);
618
619 // keep an instruction count
620 if (fault == NoFault)
621 countInst();
622 else if (traceData) {
623 // If there was a fault, we shouldn't trace this instruction.
624 delete traceData;
625 traceData = NULL;
626 }
627
628 postExecute();
629 // @todo remove me after debugging with legion done
630 if (curStaticInst && (!curStaticInst->isMicroop() ||
631 curStaticInst->isFirstMicroop()))
632 instCnt++;
633 advanceInst(fault);
634 }
635
636 delete pkt->req;
637 delete pkt;
638 }
639
640 void
641 TimingSimpleCPU::IcachePort::ITickEvent::process()
642 {
643 cpu->completeIfetch(pkt);
644 }
645
646 bool
647 TimingSimpleCPU::IcachePort::recvTiming(PacketPtr pkt)
648 {
649 if (pkt->isResponse() && !pkt->wasNacked()) {
650 // delay processing of returned data until next CPU clock edge
651 Tick next_tick = cpu->nextCycle(curTick);
652
653 if (next_tick == curTick)
654 cpu->completeIfetch(pkt);
655 else
656 tickEvent.schedule(pkt, next_tick);
657
658 return true;
659 }
660 else if (pkt->wasNacked()) {
661 assert(cpu->_status == IcacheWaitResponse);
662 pkt->reinitNacked();
663 if (!sendTiming(pkt)) {
664 cpu->_status = IcacheRetry;
665 cpu->ifetch_pkt = pkt;
666 }
667 }
668 //Snooping a Coherence Request, do nothing
669 return true;
670 }
671
672 void
673 TimingSimpleCPU::IcachePort::recvRetry()
674 {
675 // we shouldn't get a retry unless we have a packet that we're
676 // waiting to transmit
677 assert(cpu->ifetch_pkt != NULL);
678 assert(cpu->_status == IcacheRetry);
679 PacketPtr tmp = cpu->ifetch_pkt;
680 if (sendTiming(tmp)) {
681 cpu->_status = IcacheWaitResponse;
682 cpu->ifetch_pkt = NULL;
683 }
684 }
685
686 void
687 TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
688 {
689 // received a response from the dcache: complete the load or store
690 // instruction
691 assert(!pkt->isError());
692 assert(_status == DcacheWaitResponse);
693 _status = Running;
694
695 numCycles += tickToCycles(curTick - previousTick);
696 previousTick = curTick;
697
698 Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
699
700 // keep an instruction count
701 if (fault == NoFault)
702 countInst();
703 else if (traceData) {
704 // If there was a fault, we shouldn't trace this instruction.
705 delete traceData;
706 traceData = NULL;
707 }
708
709 if (pkt->isRead() && pkt->isLocked()) {
710 TheISA::handleLockedRead(thread, pkt->req);
711 }
712
713 delete pkt->req;
714 delete pkt;
715
716 postExecute();
717
718 if (getState() == SimObject::Draining) {
719 advancePC(fault);
720 completeDrain();
721
722 return;
723 }
724
725 advanceInst(fault);
726 }
727
728
729 void
730 TimingSimpleCPU::completeDrain()
731 {
732 DPRINTF(Config, "Done draining\n");
733 changeState(SimObject::Drained);
734 drainEvent->process();
735 }
736
737 void
738 TimingSimpleCPU::DcachePort::setPeer(Port *port)
739 {
740 Port::setPeer(port);
741
742 #if FULL_SYSTEM
743 // Update the ThreadContext's memory ports (Functional/Virtual
744 // Ports)
745 cpu->tcBase()->connectMemPorts();
746 #endif
747 }
748
749 bool
750 TimingSimpleCPU::DcachePort::recvTiming(PacketPtr pkt)
751 {
752 if (pkt->isResponse() && !pkt->wasNacked()) {
753 // delay processing of returned data until next CPU clock edge
754 Tick next_tick = cpu->nextCycle(curTick);
755
756 if (next_tick == curTick)
757 cpu->completeDataAccess(pkt);
758 else
759 tickEvent.schedule(pkt, next_tick);
760
761 return true;
762 }
763 else if (pkt->wasNacked()) {
764 assert(cpu->_status == DcacheWaitResponse);
765 pkt->reinitNacked();
766 if (!sendTiming(pkt)) {
767 cpu->_status = DcacheRetry;
768 cpu->dcache_pkt = pkt;
769 }
770 }
771 //Snooping a Coherence Request, do nothing
772 return true;
773 }
774
775 void
776 TimingSimpleCPU::DcachePort::DTickEvent::process()
777 {
778 cpu->completeDataAccess(pkt);
779 }
780
781 void
782 TimingSimpleCPU::DcachePort::recvRetry()
783 {
784 // we shouldn't get a retry unless we have a packet that we're
785 // waiting to transmit
786 assert(cpu->dcache_pkt != NULL);
787 assert(cpu->_status == DcacheRetry);
788 PacketPtr tmp = cpu->dcache_pkt;
789 if (sendTiming(tmp)) {
790 cpu->_status = DcacheWaitResponse;
791 // memory system takes ownership of packet
792 cpu->dcache_pkt = NULL;
793 }
794 }
795
796 TimingSimpleCPU::IprEvent::IprEvent(Packet *_pkt, TimingSimpleCPU *_cpu, Tick t)
797 : Event(&mainEventQueue), pkt(_pkt), cpu(_cpu)
798 {
799 schedule(t);
800 }
801
802 void
803 TimingSimpleCPU::IprEvent::process()
804 {
805 cpu->completeDataAccess(pkt);
806 }
807
808 const char *
809 TimingSimpleCPU::IprEvent::description()
810 {
811 return "Timing Simple CPU Delay IPR event";
812 }
813
814
815 ////////////////////////////////////////////////////////////////////////
816 //
817 // TimingSimpleCPU Simulation Object
818 //
819 TimingSimpleCPU *
820 TimingSimpleCPUParams::create()
821 {
822 TimingSimpleCPU::Params *params = new TimingSimpleCPU::Params();
823 params->name = name;
824 params->numberOfThreads = 1;
825 params->max_insts_any_thread = max_insts_any_thread;
826 params->max_insts_all_threads = max_insts_all_threads;
827 params->max_loads_any_thread = max_loads_any_thread;
828 params->max_loads_all_threads = max_loads_all_threads;
829 params->progress_interval = progress_interval;
830 params->deferRegistration = defer_registration;
831 params->clock = clock;
832 params->phase = phase;
833 params->functionTrace = function_trace;
834 params->functionTraceStart = function_trace_start;
835 params->system = system;
836 params->cpu_id = cpu_id;
837 params->tracer = tracer;
838
839 params->itb = itb;
840 params->dtb = dtb;
841 #if FULL_SYSTEM
842 params->profile = profile;
843 params->do_quiesce = do_quiesce;
844 params->do_checkpoint_insts = do_checkpoint_insts;
845 params->do_statistics_insts = do_statistics_insts;
846 #else
847 if (workload.size() != 1)
848 panic("only one workload allowed");
849 params->process = workload[0];
850 #endif
851
852 TimingSimpleCPU *cpu = new TimingSimpleCPU(params);
853 return cpu;
854 }