Code update for CPU models.
[gem5.git] / cpu / simple / cpu.cc
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <cmath>
30 #include <cstdio>
31 #include <cstdlib>
32 #include <iostream>
33 #include <iomanip>
34 #include <list>
35 #include <sstream>
36 #include <string>
37
38 #include "base/cprintf.hh"
39 #include "base/inifile.hh"
40 #include "base/loader/symtab.hh"
41 #include "base/misc.hh"
42 #include "base/pollevent.hh"
43 #include "base/range.hh"
44 #include "base/stats/events.hh"
45 #include "base/trace.hh"
46 #include "cpu/base.hh"
47 #include "cpu/cpu_exec_context.hh"
48 #include "cpu/exec_context.hh"
49 #include "cpu/exetrace.hh"
50 #include "cpu/profile.hh"
51 #include "cpu/sampler/sampler.hh"
52 #include "cpu/simple/cpu.hh"
53 #include "cpu/smt.hh"
54 #include "cpu/static_inst.hh"
55 #include "kern/kernel_stats.hh"
56 #include "mem/base_mem.hh"
57 #include "mem/mem_interface.hh"
58 #include "sim/byteswap.hh"
59 #include "sim/builder.hh"
60 #include "sim/debug.hh"
61 #include "sim/host.hh"
62 #include "sim/sim_events.hh"
63 #include "sim/sim_object.hh"
64 #include "sim/stats.hh"
65
66 #if FULL_SYSTEM
67 #include "base/remote_gdb.hh"
68 #include "mem/functional/memory_control.hh"
69 #include "mem/functional/physical.hh"
70 #include "sim/system.hh"
71 #include "arch/tlb.hh"
72 #include "arch/stacktrace.hh"
73 #include "arch/vtophys.hh"
74 #else // !FULL_SYSTEM
75 #include "mem/functional/functional.hh"
76 #endif // FULL_SYSTEM
77
78 using namespace std;
79 //The SimpleCPU does alpha only
80 using namespace AlphaISA;
81
82
83 SimpleCPU::TickEvent::TickEvent(SimpleCPU *c, int w)
84 : Event(&mainEventQueue, CPU_Tick_Pri), cpu(c), width(w)
85 {
86 }
87
88
89 void
90 SimpleCPU::init()
91 {
92 BaseCPU::init();
93 #if FULL_SYSTEM
94 for (int i = 0; i < execContexts.size(); ++i) {
95 ExecContext *xc = execContexts[i];
96
97 // initialize CPU, including PC
98 TheISA::initCPU(xc, xc->readCpuId());
99 }
100 #endif
101 }
102
103 void
104 SimpleCPU::TickEvent::process()
105 {
106 int count = width;
107 do {
108 cpu->tick();
109 } while (--count > 0 && cpu->status() == Running);
110 }
111
112 const char *
113 SimpleCPU::TickEvent::description()
114 {
115 return "SimpleCPU tick event";
116 }
117
118
119 SimpleCPU::CacheCompletionEvent::CacheCompletionEvent(SimpleCPU *_cpu)
120 : Event(&mainEventQueue), cpu(_cpu)
121 {
122 }
123
124 void SimpleCPU::CacheCompletionEvent::process()
125 {
126 cpu->processCacheCompletion();
127 }
128
129 const char *
130 SimpleCPU::CacheCompletionEvent::description()
131 {
132 return "SimpleCPU cache completion event";
133 }
134
135 SimpleCPU::SimpleCPU(Params *p)
136 : BaseCPU(p), tickEvent(this, p->width), cpuXC(NULL),
137 cacheCompletionEvent(this)
138 {
139 _status = Idle;
140 #if FULL_SYSTEM
141 cpuXC = new CPUExecContext(this, 0, p->system, p->itb, p->dtb, p->mem);
142
143 #else
144 cpuXC = new CPUExecContext(this, /* thread_num */ 0, p->process,
145 /* asid */ 0);
146 #endif // !FULL_SYSTEM
147 cpuXC->setStatus(ExecContext::Suspended);
148 xcProxy = cpuXC->getProxy();
149
150 icacheInterface = p->icache_interface;
151 dcacheInterface = p->dcache_interface;
152
153 memReq = new MemReq();
154 memReq->xc = xcProxy;
155 memReq->asid = 0;
156 memReq->data = new uint8_t[64];
157
158 numInst = 0;
159 startNumInst = 0;
160 numLoad = 0;
161 startNumLoad = 0;
162 lastIcacheStall = 0;
163 lastDcacheStall = 0;
164
165 execContexts.push_back(xcProxy);
166 }
167
168 SimpleCPU::~SimpleCPU()
169 {
170 }
171
172 void
173 SimpleCPU::switchOut(Sampler *s)
174 {
175 sampler = s;
176 if (status() == DcacheMissStall) {
177 DPRINTF(Sampler,"Outstanding dcache access, waiting for completion\n");
178 _status = DcacheMissSwitch;
179 }
180 else {
181 _status = SwitchedOut;
182
183 if (tickEvent.scheduled())
184 tickEvent.squash();
185
186 sampler->signalSwitched();
187 }
188 }
189
190
191 void
192 SimpleCPU::takeOverFrom(BaseCPU *oldCPU)
193 {
194 BaseCPU::takeOverFrom(oldCPU);
195
196 assert(!tickEvent.scheduled());
197
198 // if any of this CPU's ExecContexts are active, mark the CPU as
199 // running and schedule its tick event.
200 for (int i = 0; i < execContexts.size(); ++i) {
201 ExecContext *xc = execContexts[i];
202 if (xc->status() == ExecContext::Active && _status != Running) {
203 _status = Running;
204 tickEvent.schedule(curTick);
205 }
206 }
207 }
208
209
210 void
211 SimpleCPU::activateContext(int thread_num, int delay)
212 {
213 assert(thread_num == 0);
214 assert(cpuXC);
215
216 assert(_status == Idle || _status == SwitchedOut);
217 notIdleFraction++;
218 scheduleTickEvent(delay);
219 _status = Running;
220 }
221
222
223 void
224 SimpleCPU::suspendContext(int thread_num)
225 {
226 assert(thread_num == 0);
227 assert(cpuXC);
228
229 assert(_status == Running || _status == SwitchedOut);
230 notIdleFraction--;
231 unscheduleTickEvent();
232 _status = Idle;
233 }
234
235
236 void
237 SimpleCPU::deallocateContext(int thread_num)
238 {
239 // for now, these are equivalent
240 suspendContext(thread_num);
241 }
242
243
244 void
245 SimpleCPU::haltContext(int thread_num)
246 {
247 // for now, these are equivalent
248 suspendContext(thread_num);
249 }
250
251
252 void
253 SimpleCPU::regStats()
254 {
255 using namespace Stats;
256
257 BaseCPU::regStats();
258
259 numInsts
260 .name(name() + ".num_insts")
261 .desc("Number of instructions executed")
262 ;
263
264 numMemRefs
265 .name(name() + ".num_refs")
266 .desc("Number of memory references")
267 ;
268
269 notIdleFraction
270 .name(name() + ".not_idle_fraction")
271 .desc("Percentage of non-idle cycles")
272 ;
273
274 idleFraction
275 .name(name() + ".idle_fraction")
276 .desc("Percentage of idle cycles")
277 ;
278
279 icacheStallCycles
280 .name(name() + ".icache_stall_cycles")
281 .desc("ICache total stall cycles")
282 .prereq(icacheStallCycles)
283 ;
284
285 dcacheStallCycles
286 .name(name() + ".dcache_stall_cycles")
287 .desc("DCache total stall cycles")
288 .prereq(dcacheStallCycles)
289 ;
290
291 idleFraction = constant(1.0) - notIdleFraction;
292 }
293
294 void
295 SimpleCPU::resetStats()
296 {
297 startNumInst = numInst;
298 notIdleFraction = (_status != Idle);
299 }
300
301 void
302 SimpleCPU::serialize(ostream &os)
303 {
304 BaseCPU::serialize(os);
305 SERIALIZE_ENUM(_status);
306 SERIALIZE_SCALAR(inst);
307 nameOut(os, csprintf("%s.xc.0", name()));
308 cpuXC->serialize(os);
309 nameOut(os, csprintf("%s.tickEvent", name()));
310 tickEvent.serialize(os);
311 nameOut(os, csprintf("%s.cacheCompletionEvent", name()));
312 cacheCompletionEvent.serialize(os);
313 }
314
315 void
316 SimpleCPU::unserialize(Checkpoint *cp, const string &section)
317 {
318 BaseCPU::unserialize(cp, section);
319 UNSERIALIZE_ENUM(_status);
320 UNSERIALIZE_SCALAR(inst);
321 cpuXC->unserialize(cp, csprintf("%s.xc.0", section));
322 tickEvent.unserialize(cp, csprintf("%s.tickEvent", section));
323 cacheCompletionEvent
324 .unserialize(cp, csprintf("%s.cacheCompletionEvent", section));
325 }
326
327 void
328 change_thread_state(int thread_number, int activate, int priority)
329 {
330 }
331
332 Fault
333 SimpleCPU::copySrcTranslate(Addr src)
334 {
335 static bool no_warn = true;
336 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
337 // Only support block sizes of 64 atm.
338 assert(blk_size == 64);
339 int offset = src & (blk_size - 1);
340
341 // Make sure block doesn't span page
342 if (no_warn &&
343 (src & PageMask) != ((src + blk_size) & PageMask) &&
344 (src >> 40) != 0xfffffc) {
345 warn("Copied block source spans pages %x.", src);
346 no_warn = false;
347 }
348
349 memReq->reset(src & ~(blk_size - 1), blk_size);
350
351 // translate to physical address
352 Fault fault = cpuXC->translateDataReadReq(memReq);
353
354 if (fault == NoFault) {
355 cpuXC->copySrcAddr = src;
356 cpuXC->copySrcPhysAddr = memReq->paddr + offset;
357 } else {
358 assert(!fault->isAlignmentFault());
359
360 cpuXC->copySrcAddr = 0;
361 cpuXC->copySrcPhysAddr = 0;
362 }
363 return fault;
364 }
365
366 Fault
367 SimpleCPU::copy(Addr dest)
368 {
369 static bool no_warn = true;
370 int blk_size = (dcacheInterface) ? dcacheInterface->getBlockSize() : 64;
371 // Only support block sizes of 64 atm.
372 assert(blk_size == 64);
373 uint8_t data[blk_size];
374 //assert(cpuXC->copySrcAddr);
375 int offset = dest & (blk_size - 1);
376
377 // Make sure block doesn't span page
378 if (no_warn &&
379 (dest & PageMask) != ((dest + blk_size) & PageMask) &&
380 (dest >> 40) != 0xfffffc) {
381 no_warn = false;
382 warn("Copied block destination spans pages %x. ", dest);
383 }
384
385 memReq->reset(dest & ~(blk_size -1), blk_size);
386 // translate to physical address
387 Fault fault = cpuXC->translateDataWriteReq(memReq);
388
389 if (fault == NoFault) {
390 Addr dest_addr = memReq->paddr + offset;
391 // Need to read straight from memory since we have more than 8 bytes.
392 memReq->paddr = cpuXC->copySrcPhysAddr;
393 cpuXC->mem->read(memReq, data);
394 memReq->paddr = dest_addr;
395 cpuXC->mem->write(memReq, data);
396 if (dcacheInterface) {
397 memReq->cmd = Copy;
398 memReq->completionEvent = NULL;
399 memReq->paddr = cpuXC->copySrcPhysAddr;
400 memReq->dest = dest_addr;
401 memReq->size = 64;
402 memReq->time = curTick;
403 memReq->flags &= ~INST_READ;
404 dcacheInterface->access(memReq);
405 }
406 }
407 else
408 assert(!fault->isAlignmentFault());
409
410 return fault;
411 }
412
413 // precise architected memory state accessor macros
414 template <class T>
415 Fault
416 SimpleCPU::read(Addr addr, T &data, unsigned flags)
417 {
418 if (status() == DcacheMissStall || status() == DcacheMissSwitch) {
419 Fault fault = cpuXC->read(memReq,data);
420
421 if (traceData) {
422 traceData->setAddr(memReq->vaddr);
423 }
424 return fault;
425 }
426
427 memReq->reset(addr, sizeof(T), flags);
428
429 // translate to physical address
430 Fault fault = cpuXC->translateDataReadReq(memReq);
431
432 // if we have a cache, do cache access too
433 if (fault == NoFault && dcacheInterface) {
434 memReq->cmd = Read;
435 memReq->completionEvent = NULL;
436 memReq->time = curTick;
437 memReq->flags &= ~INST_READ;
438 MemAccessResult result = dcacheInterface->access(memReq);
439
440 // Ugly hack to get an event scheduled *only* if the access is
441 // a miss. We really should add first-class support for this
442 // at some point.
443 if (result != MA_HIT && dcacheInterface->doEvents()) {
444 memReq->completionEvent = &cacheCompletionEvent;
445 lastDcacheStall = curTick;
446 unscheduleTickEvent();
447 _status = DcacheMissStall;
448 } else {
449 // do functional access
450 fault = cpuXC->read(memReq, data);
451
452 }
453 } else if(fault == NoFault) {
454 // do functional access
455 fault = cpuXC->read(memReq, data);
456
457 }
458
459 if (!dcacheInterface && (memReq->flags & UNCACHEABLE))
460 recordEvent("Uncached Read");
461
462 return fault;
463 }
464
465 #ifndef DOXYGEN_SHOULD_SKIP_THIS
466
467 template
468 Fault
469 SimpleCPU::read(Addr addr, uint64_t &data, unsigned flags);
470
471 template
472 Fault
473 SimpleCPU::read(Addr addr, uint32_t &data, unsigned flags);
474
475 template
476 Fault
477 SimpleCPU::read(Addr addr, uint16_t &data, unsigned flags);
478
479 template
480 Fault
481 SimpleCPU::read(Addr addr, uint8_t &data, unsigned flags);
482
483 #endif //DOXYGEN_SHOULD_SKIP_THIS
484
485 template<>
486 Fault
487 SimpleCPU::read(Addr addr, double &data, unsigned flags)
488 {
489 return read(addr, *(uint64_t*)&data, flags);
490 }
491
492 template<>
493 Fault
494 SimpleCPU::read(Addr addr, float &data, unsigned flags)
495 {
496 return read(addr, *(uint32_t*)&data, flags);
497 }
498
499
500 template<>
501 Fault
502 SimpleCPU::read(Addr addr, int32_t &data, unsigned flags)
503 {
504 return read(addr, (uint32_t&)data, flags);
505 }
506
507
508 template <class T>
509 Fault
510 SimpleCPU::write(T data, Addr addr, unsigned flags, uint64_t *res)
511 {
512 memReq->reset(addr, sizeof(T), flags);
513
514 // translate to physical address
515 Fault fault = cpuXC->translateDataWriteReq(memReq);
516
517 // do functional access
518 if (fault == NoFault)
519 fault = cpuXC->write(memReq, data);
520
521 if (fault == NoFault && dcacheInterface) {
522 memReq->cmd = Write;
523 memcpy(memReq->data,(uint8_t *)&data,memReq->size);
524 memReq->completionEvent = NULL;
525 memReq->time = curTick;
526 memReq->flags &= ~INST_READ;
527 MemAccessResult result = dcacheInterface->access(memReq);
528
529 // Ugly hack to get an event scheduled *only* if the access is
530 // a miss. We really should add first-class support for this
531 // at some point.
532 if (result != MA_HIT && dcacheInterface->doEvents()) {
533 memReq->completionEvent = &cacheCompletionEvent;
534 lastDcacheStall = curTick;
535 unscheduleTickEvent();
536 _status = DcacheMissStall;
537 }
538 }
539
540 if (res && (fault == NoFault))
541 *res = memReq->result;
542
543 if (!dcacheInterface && (memReq->flags & UNCACHEABLE))
544 recordEvent("Uncached Write");
545
546 return fault;
547 }
548
549
550 #ifndef DOXYGEN_SHOULD_SKIP_THIS
551 template
552 Fault
553 SimpleCPU::write(uint64_t data, Addr addr, unsigned flags, uint64_t *res);
554
555 template
556 Fault
557 SimpleCPU::write(uint32_t data, Addr addr, unsigned flags, uint64_t *res);
558
559 template
560 Fault
561 SimpleCPU::write(uint16_t data, Addr addr, unsigned flags, uint64_t *res);
562
563 template
564 Fault
565 SimpleCPU::write(uint8_t data, Addr addr, unsigned flags, uint64_t *res);
566
567 #endif //DOXYGEN_SHOULD_SKIP_THIS
568
569 template<>
570 Fault
571 SimpleCPU::write(double data, Addr addr, unsigned flags, uint64_t *res)
572 {
573 return write(*(uint64_t*)&data, addr, flags, res);
574 }
575
576 template<>
577 Fault
578 SimpleCPU::write(float data, Addr addr, unsigned flags, uint64_t *res)
579 {
580 return write(*(uint32_t*)&data, addr, flags, res);
581 }
582
583
584 template<>
585 Fault
586 SimpleCPU::write(int32_t data, Addr addr, unsigned flags, uint64_t *res)
587 {
588 return write((uint32_t)data, addr, flags, res);
589 }
590
591
592 #if FULL_SYSTEM
593 Addr
594 SimpleCPU::dbg_vtophys(Addr addr)
595 {
596 return vtophys(xcProxy, addr);
597 }
598 #endif // FULL_SYSTEM
599
600 void
601 SimpleCPU::processCacheCompletion()
602 {
603 switch (status()) {
604 case IcacheMissStall:
605 icacheStallCycles += curTick - lastIcacheStall;
606 _status = IcacheMissComplete;
607 scheduleTickEvent(1);
608 break;
609 case DcacheMissStall:
610 if (memReq->cmd.isRead()) {
611 curStaticInst->execute(this,traceData);
612 if (traceData)
613 traceData->finalize();
614 }
615 dcacheStallCycles += curTick - lastDcacheStall;
616 _status = Running;
617 scheduleTickEvent(1);
618 break;
619 case DcacheMissSwitch:
620 if (memReq->cmd.isRead()) {
621 curStaticInst->execute(this,traceData);
622 if (traceData)
623 traceData->finalize();
624 }
625 _status = SwitchedOut;
626 sampler->signalSwitched();
627 case SwitchedOut:
628 // If this CPU has been switched out due to sampling/warm-up,
629 // ignore any further status changes (e.g., due to cache
630 // misses outstanding at the time of the switch).
631 return;
632 default:
633 panic("SimpleCPU::processCacheCompletion: bad state");
634 break;
635 }
636 }
637
638 #if FULL_SYSTEM
639 void
640 SimpleCPU::post_interrupt(int int_num, int index)
641 {
642 BaseCPU::post_interrupt(int_num, index);
643
644 if (cpuXC->status() == ExecContext::Suspended) {
645 DPRINTF(IPI,"Suspended Processor awoke\n");
646 cpuXC->activate();
647 }
648 }
649 #endif // FULL_SYSTEM
650
651 /* start simulation, program loaded, processor precise state initialized */
652 void
653 SimpleCPU::tick()
654 {
655 numCycles++;
656
657 traceData = NULL;
658
659 Fault fault = NoFault;
660
661 #if FULL_SYSTEM
662 if (checkInterrupts && check_interrupts() && !cpuXC->inPalMode() &&
663 status() != IcacheMissComplete) {
664 int ipl = 0;
665 int summary = 0;
666 checkInterrupts = false;
667
668 if (cpuXC->readMiscReg(IPR_SIRR)) {
669 for (int i = INTLEVEL_SOFTWARE_MIN;
670 i < INTLEVEL_SOFTWARE_MAX; i++) {
671 if (cpuXC->readMiscReg(IPR_SIRR) & (ULL(1) << i)) {
672 // See table 4-19 of 21164 hardware reference
673 ipl = (i - INTLEVEL_SOFTWARE_MIN) + 1;
674 summary |= (ULL(1) << i);
675 }
676 }
677 }
678
679 uint64_t interrupts = cpuXC->cpu->intr_status();
680 for (int i = INTLEVEL_EXTERNAL_MIN;
681 i < INTLEVEL_EXTERNAL_MAX; i++) {
682 if (interrupts & (ULL(1) << i)) {
683 // See table 4-19 of 21164 hardware reference
684 ipl = i;
685 summary |= (ULL(1) << i);
686 }
687 }
688
689 if (cpuXC->readMiscReg(IPR_ASTRR))
690 panic("asynchronous traps not implemented\n");
691
692 if (ipl && ipl > cpuXC->readMiscReg(IPR_IPLR)) {
693 cpuXC->setMiscReg(IPR_ISR, summary);
694 cpuXC->setMiscReg(IPR_INTID, ipl);
695
696 Fault(new InterruptFault)->invoke(xcProxy);
697
698 DPRINTF(Flow, "Interrupt! IPLR=%d ipl=%d summary=%x\n",
699 cpuXC->readMiscReg(IPR_IPLR), ipl, summary);
700 }
701 }
702 #endif
703
704 // maintain $r0 semantics
705 cpuXC->setIntReg(ZeroReg, 0);
706 #ifdef TARGET_ALPHA
707 cpuXC->setFloatRegDouble(ZeroReg, 0.0);
708 #endif // TARGET_ALPHA
709
710 if (status() == IcacheMissComplete) {
711 // We've already fetched an instruction and were stalled on an
712 // I-cache miss. No need to fetch it again.
713
714 // Set status to running; tick event will get rescheduled if
715 // necessary at end of tick() function.
716 _status = Running;
717 }
718 else {
719 // Try to fetch an instruction
720
721 // set up memory request for instruction fetch
722 #if FULL_SYSTEM
723 #define IFETCH_FLAGS(pc) ((pc) & 1) ? PHYSICAL : 0
724 #else
725 #define IFETCH_FLAGS(pc) 0
726 #endif
727
728 memReq->cmd = Read;
729 memReq->reset(cpuXC->readPC() & ~3, sizeof(uint32_t),
730 IFETCH_FLAGS(cpuXC->readPC()));
731
732 fault = cpuXC->translateInstReq(memReq);
733
734 if (fault == NoFault)
735 fault = cpuXC->mem->read(memReq, inst);
736
737 if (icacheInterface && fault == NoFault) {
738 memReq->completionEvent = NULL;
739
740 memReq->time = curTick;
741 memReq->flags |= INST_READ;
742 MemAccessResult result = icacheInterface->access(memReq);
743
744 // Ugly hack to get an event scheduled *only* if the access is
745 // a miss. We really should add first-class support for this
746 // at some point.
747 if (result != MA_HIT && icacheInterface->doEvents()) {
748 memReq->completionEvent = &cacheCompletionEvent;
749 lastIcacheStall = curTick;
750 unscheduleTickEvent();
751 _status = IcacheMissStall;
752 return;
753 }
754 }
755 }
756
757 // If we've got a valid instruction (i.e., no fault on instruction
758 // fetch), then execute it.
759 if (fault == NoFault) {
760
761 // keep an instruction count
762 numInst++;
763 numInsts++;
764
765 // check for instruction-count-based events
766 comInstEventQueue[0]->serviceEvents(numInst);
767
768 // decode the instruction
769 inst = gtoh(inst);
770 curStaticInst = StaticInst::decode(makeExtMI(inst, cpuXC->readPC()));
771
772 traceData = Trace::getInstRecord(curTick, xcProxy, this, curStaticInst,
773 cpuXC->readPC());
774
775 #if FULL_SYSTEM
776 cpuXC->setInst(inst);
777 #endif // FULL_SYSTEM
778
779 cpuXC->func_exe_inst++;
780
781 fault = curStaticInst->execute(this, traceData);
782
783 #if FULL_SYSTEM
784 if (system->kernelBinning->fnbin) {
785 assert(cpuXC->getKernelStats());
786 system->kernelBinning->execute(xcProxy, inst);
787 }
788
789 if (cpuXC->profile) {
790 bool usermode =
791 (cpuXC->readMiscReg(AlphaISA::IPR_DTB_CM) & 0x18) != 0;
792 cpuXC->profilePC = usermode ? 1 : cpuXC->readPC();
793 ProfileNode *node = cpuXC->profile->consume(xcProxy, inst);
794 if (node)
795 cpuXC->profileNode = node;
796 }
797 #endif
798
799 if (curStaticInst->isMemRef()) {
800 numMemRefs++;
801 }
802
803 if (curStaticInst->isLoad()) {
804 ++numLoad;
805 comLoadEventQueue[0]->serviceEvents(numLoad);
806 }
807
808 // If we have a dcache miss, then we can't finialize the instruction
809 // trace yet because we want to populate it with the data later
810 if (traceData &&
811 !(status() == DcacheMissStall && memReq->cmd.isRead())) {
812 traceData->finalize();
813 }
814
815 traceFunctions(cpuXC->readPC());
816
817 } // if (fault == NoFault)
818
819 if (fault != NoFault) {
820 #if FULL_SYSTEM
821 fault->invoke(xcProxy);
822 #else // !FULL_SYSTEM
823 fatal("fault (%d) detected @ PC 0x%08p", fault, cpuXC->readPC());
824 #endif // FULL_SYSTEM
825 }
826 else {
827 #if THE_ISA != MIPS_ISA
828 // go to the next instruction
829 cpuXC->setPC(cpuXC->readNextPC());
830 cpuXC->setNextPC(cpuXC->readNextPC() + sizeof(MachInst));
831 #else
832 // go to the next instruction
833 cpuXC->setPC(cpuXC->readNextPC());
834 cpuXC->setNextPC(cpuXC->readNextNPC());
835 cpuXC->setNextNPC(cpuXC->readNextNPC() + sizeof(MachInst));
836 #endif
837
838 }
839
840 #if FULL_SYSTEM
841 Addr oldpc;
842 do {
843 oldpc = cpuXC->readPC();
844 system->pcEventQueue.service(xcProxy);
845 } while (oldpc != cpuXC->readPC());
846 #endif
847
848 assert(status() == Running ||
849 status() == Idle ||
850 status() == DcacheMissStall);
851
852 if (status() == Running && !tickEvent.scheduled())
853 tickEvent.schedule(curTick + cycles(1));
854 }
855
856 ////////////////////////////////////////////////////////////////////////
857 //
858 // SimpleCPU Simulation Object
859 //
860 BEGIN_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
861
862 Param<Counter> max_insts_any_thread;
863 Param<Counter> max_insts_all_threads;
864 Param<Counter> max_loads_any_thread;
865 Param<Counter> max_loads_all_threads;
866 Param<Tick> progress_interval;
867
868 #if FULL_SYSTEM
869 SimObjectParam<AlphaITB *> itb;
870 SimObjectParam<AlphaDTB *> dtb;
871 SimObjectParam<FunctionalMemory *> mem;
872 SimObjectParam<System *> system;
873 Param<int> cpu_id;
874 Param<Tick> profile;
875 #else
876 SimObjectParam<Process *> workload;
877 #endif // FULL_SYSTEM
878
879 Param<int> clock;
880 SimObjectParam<BaseMem *> icache;
881 SimObjectParam<BaseMem *> dcache;
882
883 Param<bool> defer_registration;
884 Param<int> width;
885 Param<bool> function_trace;
886 Param<Tick> function_trace_start;
887
888 END_DECLARE_SIM_OBJECT_PARAMS(SimpleCPU)
889
890 BEGIN_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
891
892 INIT_PARAM(max_insts_any_thread,
893 "terminate when any thread reaches this inst count"),
894 INIT_PARAM(max_insts_all_threads,
895 "terminate when all threads have reached this inst count"),
896 INIT_PARAM(max_loads_any_thread,
897 "terminate when any thread reaches this load count"),
898 INIT_PARAM(max_loads_all_threads,
899 "terminate when all threads have reached this load count"),
900 INIT_PARAM_DFLT(progress_interval, "CPU Progress interval", 0),
901
902 #if FULL_SYSTEM
903 INIT_PARAM(itb, "Instruction TLB"),
904 INIT_PARAM(dtb, "Data TLB"),
905 INIT_PARAM(mem, "memory"),
906 INIT_PARAM(system, "system object"),
907 INIT_PARAM(cpu_id, "processor ID"),
908 INIT_PARAM(profile, ""),
909 #else
910 INIT_PARAM(workload, "processes to run"),
911 #endif // FULL_SYSTEM
912
913 INIT_PARAM(clock, "clock speed"),
914 INIT_PARAM(icache, "L1 instruction cache object"),
915 INIT_PARAM(dcache, "L1 data cache object"),
916 INIT_PARAM(defer_registration, "defer system registration (for sampling)"),
917 INIT_PARAM(width, "cpu width"),
918 INIT_PARAM(function_trace, "Enable function trace"),
919 INIT_PARAM(function_trace_start, "Cycle to start function trace")
920
921 END_INIT_SIM_OBJECT_PARAMS(SimpleCPU)
922
923
924 CREATE_SIM_OBJECT(SimpleCPU)
925 {
926 SimpleCPU::Params *params = new SimpleCPU::Params();
927 params->name = getInstanceName();
928 params->numberOfThreads = 1;
929 params->max_insts_any_thread = max_insts_any_thread;
930 params->max_insts_all_threads = max_insts_all_threads;
931 params->max_loads_any_thread = max_loads_any_thread;
932 params->max_loads_all_threads = max_loads_all_threads;
933 params->deferRegistration = defer_registration;
934 params->clock = clock;
935 params->functionTrace = function_trace;
936 params->functionTraceStart = function_trace_start;
937 params->icache_interface = (icache) ? icache->getInterface() : NULL;
938 params->dcache_interface = (dcache) ? dcache->getInterface() : NULL;
939 params->width = width;
940
941 params->progress_interval = progress_interval;
942 #if FULL_SYSTEM
943 params->itb = itb;
944 params->dtb = dtb;
945 params->mem = mem;
946 params->system = system;
947 params->cpu_id = cpu_id;
948 params->profile = profile;
949 #else
950 params->process = workload;
951 #endif
952
953 SimpleCPU *cpu = new SimpleCPU(params);
954 return cpu;
955 }
956
957 REGISTER_SIM_OBJECT("SimpleCPU", SimpleCPU)
958