sim-se: don't wake up SE futex syscalls on ARM events
[gem5.git] / src / cpu / base.cc
1 /*
2 * Copyright (c) 2011-2012,2016-2017, 2019 ARM Limited
3 * All rights reserved
4 *
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
13 *
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
19 *
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
30 *
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 */
43
44 #include "cpu/base.hh"
45
46 #include <iostream>
47 #include <sstream>
48 #include <string>
49
50 #include "arch/generic/tlb.hh"
51 #include "base/cprintf.hh"
52 #include "base/loader/symtab.hh"
53 #include "base/logging.hh"
54 #include "base/output.hh"
55 #include "base/trace.hh"
56 #include "cpu/checker/cpu.hh"
57 #include "cpu/thread_context.hh"
58 #include "debug/Mwait.hh"
59 #include "debug/SyscallVerbose.hh"
60 #include "debug/Thread.hh"
61 #include "mem/page_table.hh"
62 #include "params/BaseCPU.hh"
63 #include "sim/clocked_object.hh"
64 #include "sim/full_system.hh"
65 #include "sim/process.hh"
66 #include "sim/sim_events.hh"
67 #include "sim/sim_exit.hh"
68 #include "sim/system.hh"
69
70 // Hack
71 #include "sim/stat_control.hh"
72
73 using namespace std;
74
75 vector<BaseCPU *> BaseCPU::cpuList;
76
77 // This variable reflects the max number of threads in any CPU. Be
78 // careful to only use it once all the CPUs that you care about have
79 // been initialized
80 int maxThreadsPerCPU = 1;
81
82 CPUProgressEvent::CPUProgressEvent(BaseCPU *_cpu, Tick ival)
83 : Event(Event::Progress_Event_Pri), _interval(ival), lastNumInst(0),
84 cpu(_cpu), _repeatEvent(true)
85 {
86 if (_interval)
87 cpu->schedule(this, curTick() + _interval);
88 }
89
90 void
91 CPUProgressEvent::process()
92 {
93 Counter temp = cpu->totalOps();
94
95 if (_repeatEvent)
96 cpu->schedule(this, curTick() + _interval);
97
98 if (cpu->switchedOut()) {
99 return;
100 }
101
102 #ifndef NDEBUG
103 double ipc = double(temp - lastNumInst) / (_interval / cpu->clockPeriod());
104
105 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
106 "%lli, IPC: %0.8d\n", cpu->name(), temp, temp - lastNumInst,
107 ipc);
108 ipc = 0.0;
109 #else
110 cprintf("%lli: %s progress event, total committed:%i, progress insts "
111 "committed: %lli\n", curTick(), cpu->name(), temp,
112 temp - lastNumInst);
113 #endif
114 lastNumInst = temp;
115 }
116
117 const char *
118 CPUProgressEvent::description() const
119 {
120 return "CPU Progress";
121 }
122
123 BaseCPU::BaseCPU(Params *p, bool is_checker)
124 : ClockedObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id),
125 _instMasterId(p->system->getMasterId(this, "inst")),
126 _dataMasterId(p->system->getMasterId(this, "data")),
127 _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid),
128 _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()),
129 interrupts(p->interrupts), numThreads(p->numThreads), system(p->system),
130 previousCycle(0), previousState(CPU_STATE_SLEEP),
131 functionTraceStream(nullptr), currentFunctionStart(0),
132 currentFunctionEnd(0), functionEntryTick(0),
133 addressMonitor(p->numThreads),
134 syscallRetryLatency(p->syscallRetryLatency),
135 pwrGatingLatency(p->pwr_gating_latency),
136 powerGatingOnIdle(p->power_gating_on_idle),
137 enterPwrGatingEvent([this]{ enterPwrGating(); }, name())
138 {
139 // if Python did not provide a valid ID, do it here
140 if (_cpuId == -1 ) {
141 _cpuId = cpuList.size();
142 }
143
144 // add self to global list of CPUs
145 cpuList.push_back(this);
146
147 DPRINTF(SyscallVerbose, "Constructing CPU with id %d, socket id %d\n",
148 _cpuId, _socketId);
149
150 if (numThreads > maxThreadsPerCPU)
151 maxThreadsPerCPU = numThreads;
152
153 functionTracingEnabled = false;
154 if (p->function_trace) {
155 const string fname = csprintf("ftrace.%s", name());
156 functionTraceStream = simout.findOrCreate(fname)->stream();
157
158 currentFunctionStart = currentFunctionEnd = 0;
159 functionEntryTick = p->function_trace_start;
160
161 if (p->function_trace_start == 0) {
162 functionTracingEnabled = true;
163 } else {
164 Event *event = new EventFunctionWrapper(
165 [this]{ enableFunctionTrace(); }, name(), true);
166 schedule(event, p->function_trace_start);
167 }
168 }
169
170 tracer = params()->tracer;
171
172 if (params()->isa.size() != numThreads) {
173 fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
174 "of threads (%i).\n", params()->isa.size(), numThreads);
175 }
176 }
177
178 void
179 BaseCPU::enableFunctionTrace()
180 {
181 functionTracingEnabled = true;
182 }
183
184 BaseCPU::~BaseCPU()
185 {
186 }
187
188 void
189 BaseCPU::postInterrupt(ThreadID tid, int int_num, int index)
190 {
191 interrupts[tid]->post(int_num, index);
192 // Only wake up syscall emulation if it is not waiting on a futex.
193 // This is to model the fact that instructions such as ARM SEV
194 // should wake up a WFE sleep, but not a futex syscall WAIT. */
195 if (FullSystem || !system->futexMap.is_waiting(threadContexts[tid]))
196 wakeup(tid);
197 }
198
199 void
200 BaseCPU::armMonitor(ThreadID tid, Addr address)
201 {
202 assert(tid < numThreads);
203 AddressMonitor &monitor = addressMonitor[tid];
204
205 monitor.armed = true;
206 monitor.vAddr = address;
207 monitor.pAddr = 0x0;
208 DPRINTF(Mwait,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid, address);
209 }
210
211 bool
212 BaseCPU::mwait(ThreadID tid, PacketPtr pkt)
213 {
214 assert(tid < numThreads);
215 AddressMonitor &monitor = addressMonitor[tid];
216
217 if (!monitor.gotWakeup) {
218 int block_size = cacheLineSize();
219 uint64_t mask = ~((uint64_t)(block_size - 1));
220
221 assert(pkt->req->hasPaddr());
222 monitor.pAddr = pkt->getAddr() & mask;
223 monitor.waiting = true;
224
225 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, "
226 "line's paddr=0x%lx)\n", tid, monitor.vAddr, monitor.pAddr);
227 return true;
228 } else {
229 monitor.gotWakeup = false;
230 return false;
231 }
232 }
233
234 void
235 BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb)
236 {
237 assert(tid < numThreads);
238 AddressMonitor &monitor = addressMonitor[tid];
239
240 RequestPtr req = std::make_shared<Request>();
241
242 Addr addr = monitor.vAddr;
243 int block_size = cacheLineSize();
244 uint64_t mask = ~((uint64_t)(block_size - 1));
245 int size = block_size;
246
247 //The address of the next line if it crosses a cache line boundary.
248 Addr secondAddr = roundDown(addr + size - 1, block_size);
249
250 if (secondAddr > addr)
251 size = secondAddr - addr;
252
253 req->setVirt(addr, size, 0x0, dataMasterId(), tc->instAddr());
254
255 // translate to physical address
256 Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read);
257 assert(fault == NoFault);
258
259 monitor.pAddr = req->getPaddr() & mask;
260 monitor.waiting = true;
261
262 DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
263 tid, monitor.vAddr, monitor.pAddr);
264 }
265
266 void
267 BaseCPU::init()
268 {
269 // Set up instruction-count-based termination events, if any. This needs
270 // to happen after threadContexts has been constructed.
271 if (params()->max_insts_any_thread != 0) {
272 const char *cause = "a thread reached the max instruction count";
273 for (ThreadID tid = 0; tid < numThreads; ++tid)
274 scheduleInstStop(tid, params()->max_insts_any_thread, cause);
275 }
276
277 // Set up instruction-count-based termination events for SimPoints
278 // Typically, there are more than one action points.
279 // Simulation.py is responsible to take the necessary actions upon
280 // exitting the simulation loop.
281 if (!params()->simpoint_start_insts.empty()) {
282 const char *cause = "simpoint starting point found";
283 for (size_t i = 0; i < params()->simpoint_start_insts.size(); ++i)
284 scheduleInstStop(0, params()->simpoint_start_insts[i], cause);
285 }
286
287 if (params()->max_insts_all_threads != 0) {
288 const char *cause = "all threads reached the max instruction count";
289
290 // allocate & initialize shared downcounter: each event will
291 // decrement this when triggered; simulation will terminate
292 // when counter reaches 0
293 int *counter = new int;
294 *counter = numThreads;
295 for (ThreadID tid = 0; tid < numThreads; ++tid) {
296 Event *event = new CountedExitEvent(cause, *counter);
297 threadContexts[tid]->scheduleInstCountEvent(
298 event, params()->max_insts_all_threads);
299 }
300 }
301
302 if (!params()->switched_out) {
303 registerThreadContexts();
304
305 verifyMemoryMode();
306 }
307 }
308
309 void
310 BaseCPU::startup()
311 {
312 if (params()->progress_interval) {
313 new CPUProgressEvent(this, params()->progress_interval);
314 }
315
316 if (_switchedOut)
317 powerState->set(Enums::PwrState::OFF);
318
319 // Assumption CPU start to operate instantaneously without any latency
320 if (powerState->get() == Enums::PwrState::UNDEFINED)
321 powerState->set(Enums::PwrState::ON);
322
323 }
324
325 ProbePoints::PMUUPtr
326 BaseCPU::pmuProbePoint(const char *name)
327 {
328 ProbePoints::PMUUPtr ptr;
329 ptr.reset(new ProbePoints::PMU(getProbeManager(), name));
330
331 return ptr;
332 }
333
334 void
335 BaseCPU::regProbePoints()
336 {
337 ppAllCycles = pmuProbePoint("Cycles");
338 ppActiveCycles = pmuProbePoint("ActiveCycles");
339
340 ppRetiredInsts = pmuProbePoint("RetiredInsts");
341 ppRetiredInstsPC = pmuProbePoint("RetiredInstsPC");
342 ppRetiredLoads = pmuProbePoint("RetiredLoads");
343 ppRetiredStores = pmuProbePoint("RetiredStores");
344 ppRetiredBranches = pmuProbePoint("RetiredBranches");
345
346 ppSleeping = new ProbePointArg<bool>(this->getProbeManager(),
347 "Sleeping");
348 }
349
350 void
351 BaseCPU::probeInstCommit(const StaticInstPtr &inst, Addr pc)
352 {
353 if (!inst->isMicroop() || inst->isLastMicroop()) {
354 ppRetiredInsts->notify(1);
355 ppRetiredInstsPC->notify(pc);
356 }
357
358 if (inst->isLoad())
359 ppRetiredLoads->notify(1);
360
361 if (inst->isStore() || inst->isAtomic())
362 ppRetiredStores->notify(1);
363
364 if (inst->isControl())
365 ppRetiredBranches->notify(1);
366 }
367
368 void
369 BaseCPU::regStats()
370 {
371 ClockedObject::regStats();
372
373 using namespace Stats;
374
375 numCycles
376 .name(name() + ".numCycles")
377 .desc("number of cpu cycles simulated")
378 ;
379
380 numWorkItemsStarted
381 .name(name() + ".numWorkItemsStarted")
382 .desc("number of work items this cpu started")
383 ;
384
385 numWorkItemsCompleted
386 .name(name() + ".numWorkItemsCompleted")
387 .desc("number of work items this cpu completed")
388 ;
389
390 int size = threadContexts.size();
391 if (size > 1) {
392 for (int i = 0; i < size; ++i) {
393 stringstream namestr;
394 ccprintf(namestr, "%s.ctx%d", name(), i);
395 threadContexts[i]->regStats(namestr.str());
396 }
397 } else if (size == 1)
398 threadContexts[0]->regStats(name());
399 }
400
401 Port &
402 BaseCPU::getPort(const string &if_name, PortID idx)
403 {
404 // Get the right port based on name. This applies to all the
405 // subclasses of the base CPU and relies on their implementation
406 // of getDataPort and getInstPort.
407 if (if_name == "dcache_port")
408 return getDataPort();
409 else if (if_name == "icache_port")
410 return getInstPort();
411 else
412 return ClockedObject::getPort(if_name, idx);
413 }
414
415 void
416 BaseCPU::registerThreadContexts()
417 {
418 assert(system->multiThread || numThreads == 1);
419
420 fatal_if(interrupts.size() != numThreads,
421 "CPU %s has %i interrupt controllers, but is expecting one "
422 "per thread (%i)\n",
423 name(), interrupts.size(), numThreads);
424
425 ThreadID size = threadContexts.size();
426 for (ThreadID tid = 0; tid < size; ++tid) {
427 ThreadContext *tc = threadContexts[tid];
428
429 if (system->multiThread) {
430 tc->setContextId(system->registerThreadContext(tc));
431 } else {
432 tc->setContextId(system->registerThreadContext(tc, _cpuId));
433 }
434
435 if (!FullSystem)
436 tc->getProcessPtr()->assignThreadContext(tc->contextId());
437
438 interrupts[tid]->setThreadContext(tc);
439 tc->getIsaPtr()->setThreadContext(tc);
440 }
441 }
442
443 void
444 BaseCPU::deschedulePowerGatingEvent()
445 {
446 if (enterPwrGatingEvent.scheduled()){
447 deschedule(enterPwrGatingEvent);
448 }
449 }
450
451 void
452 BaseCPU::schedulePowerGatingEvent()
453 {
454 for (auto tc : threadContexts) {
455 if (tc->status() == ThreadContext::Active)
456 return;
457 }
458
459 if (powerState->get() == Enums::PwrState::CLK_GATED &&
460 powerGatingOnIdle) {
461 assert(!enterPwrGatingEvent.scheduled());
462 // Schedule a power gating event when clock gated for the specified
463 // amount of time
464 schedule(enterPwrGatingEvent, clockEdge(pwrGatingLatency));
465 }
466 }
467
468 int
469 BaseCPU::findContext(ThreadContext *tc)
470 {
471 ThreadID size = threadContexts.size();
472 for (ThreadID tid = 0; tid < size; ++tid) {
473 if (tc == threadContexts[tid])
474 return tid;
475 }
476 return 0;
477 }
478
479 void
480 BaseCPU::activateContext(ThreadID thread_num)
481 {
482 DPRINTF(Thread, "activate contextId %d\n",
483 threadContexts[thread_num]->contextId());
484 // Squash enter power gating event while cpu gets activated
485 if (enterPwrGatingEvent.scheduled())
486 deschedule(enterPwrGatingEvent);
487 // For any active thread running, update CPU power state to active (ON)
488 powerState->set(Enums::PwrState::ON);
489
490 updateCycleCounters(CPU_STATE_WAKEUP);
491 }
492
493 void
494 BaseCPU::suspendContext(ThreadID thread_num)
495 {
496 DPRINTF(Thread, "suspend contextId %d\n",
497 threadContexts[thread_num]->contextId());
498 // Check if all threads are suspended
499 for (auto t : threadContexts) {
500 if (t->status() != ThreadContext::Suspended) {
501 return;
502 }
503 }
504
505 // All CPU thread are suspended, update cycle count
506 updateCycleCounters(CPU_STATE_SLEEP);
507
508 // All CPU threads suspended, enter lower power state for the CPU
509 powerState->set(Enums::PwrState::CLK_GATED);
510
511 // If pwrGatingLatency is set to 0 then this mechanism is disabled
512 if (powerGatingOnIdle) {
513 // Schedule power gating event when clock gated for pwrGatingLatency
514 // cycles
515 schedule(enterPwrGatingEvent, clockEdge(pwrGatingLatency));
516 }
517 }
518
519 void
520 BaseCPU::haltContext(ThreadID thread_num)
521 {
522 updateCycleCounters(BaseCPU::CPU_STATE_SLEEP);
523 }
524
525 void
526 BaseCPU::enterPwrGating(void)
527 {
528 powerState->set(Enums::PwrState::OFF);
529 }
530
531 void
532 BaseCPU::switchOut()
533 {
534 assert(!_switchedOut);
535 _switchedOut = true;
536
537 // Flush all TLBs in the CPU to avoid having stale translations if
538 // it gets switched in later.
539 flushTLBs();
540
541 // Go to the power gating state
542 powerState->set(Enums::PwrState::OFF);
543 }
544
545 void
546 BaseCPU::takeOverFrom(BaseCPU *oldCPU)
547 {
548 assert(threadContexts.size() == oldCPU->threadContexts.size());
549 assert(_cpuId == oldCPU->cpuId());
550 assert(_switchedOut);
551 assert(oldCPU != this);
552 _pid = oldCPU->getPid();
553 _taskId = oldCPU->taskId();
554 // Take over the power state of the switchedOut CPU
555 powerState->set(oldCPU->powerState->get());
556
557 previousState = oldCPU->previousState;
558 previousCycle = oldCPU->previousCycle;
559
560 _switchedOut = false;
561
562 ThreadID size = threadContexts.size();
563 for (ThreadID i = 0; i < size; ++i) {
564 ThreadContext *newTC = threadContexts[i];
565 ThreadContext *oldTC = oldCPU->threadContexts[i];
566
567 newTC->getIsaPtr()->setThreadContext(newTC);
568
569 newTC->takeOverFrom(oldTC);
570
571 assert(newTC->contextId() == oldTC->contextId());
572 assert(newTC->threadId() == oldTC->threadId());
573 system->replaceThreadContext(newTC, newTC->contextId());
574
575 /* This code no longer works since the zero register (e.g.,
576 * r31 on Alpha) doesn't necessarily contain zero at this
577 * point.
578 if (DTRACE(Context))
579 ThreadContext::compare(oldTC, newTC);
580 */
581
582 Port *old_itb_port = oldTC->getITBPtr()->getTableWalkerPort();
583 Port *old_dtb_port = oldTC->getDTBPtr()->getTableWalkerPort();
584 Port *new_itb_port = newTC->getITBPtr()->getTableWalkerPort();
585 Port *new_dtb_port = newTC->getDTBPtr()->getTableWalkerPort();
586
587 // Move over any table walker ports if they exist
588 if (new_itb_port)
589 new_itb_port->takeOverFrom(old_itb_port);
590 if (new_dtb_port)
591 new_dtb_port->takeOverFrom(old_dtb_port);
592 newTC->getITBPtr()->takeOverFrom(oldTC->getITBPtr());
593 newTC->getDTBPtr()->takeOverFrom(oldTC->getDTBPtr());
594
595 // Checker whether or not we have to transfer CheckerCPU
596 // objects over in the switch
597 CheckerCPU *oldChecker = oldTC->getCheckerCpuPtr();
598 CheckerCPU *newChecker = newTC->getCheckerCpuPtr();
599 if (oldChecker && newChecker) {
600 Port *old_checker_itb_port =
601 oldChecker->getITBPtr()->getTableWalkerPort();
602 Port *old_checker_dtb_port =
603 oldChecker->getDTBPtr()->getTableWalkerPort();
604 Port *new_checker_itb_port =
605 newChecker->getITBPtr()->getTableWalkerPort();
606 Port *new_checker_dtb_port =
607 newChecker->getDTBPtr()->getTableWalkerPort();
608
609 newChecker->getITBPtr()->takeOverFrom(oldChecker->getITBPtr());
610 newChecker->getDTBPtr()->takeOverFrom(oldChecker->getDTBPtr());
611
612 // Move over any table walker ports if they exist for checker
613 if (new_checker_itb_port)
614 new_checker_itb_port->takeOverFrom(old_checker_itb_port);
615 if (new_checker_dtb_port)
616 new_checker_dtb_port->takeOverFrom(old_checker_dtb_port);
617 }
618 }
619
620 interrupts = oldCPU->interrupts;
621 for (ThreadID tid = 0; tid < numThreads; tid++) {
622 interrupts[tid]->setThreadContext(threadContexts[tid]);
623 }
624 oldCPU->interrupts.clear();
625
626 // All CPUs have an instruction and a data port, and the new CPU's
627 // ports are dangling while the old CPU has its ports connected
628 // already. Unbind the old CPU and then bind the ports of the one
629 // we are switching to.
630 getInstPort().takeOverFrom(&oldCPU->getInstPort());
631 getDataPort().takeOverFrom(&oldCPU->getDataPort());
632 }
633
634 void
635 BaseCPU::flushTLBs()
636 {
637 for (ThreadID i = 0; i < threadContexts.size(); ++i) {
638 ThreadContext &tc(*threadContexts[i]);
639 CheckerCPU *checker(tc.getCheckerCpuPtr());
640
641 tc.getITBPtr()->flushAll();
642 tc.getDTBPtr()->flushAll();
643 if (checker) {
644 checker->getITBPtr()->flushAll();
645 checker->getDTBPtr()->flushAll();
646 }
647 }
648 }
649
650 void
651 BaseCPU::serialize(CheckpointOut &cp) const
652 {
653 SERIALIZE_SCALAR(instCnt);
654
655 if (!_switchedOut) {
656 /* Unlike _pid, _taskId is not serialized, as they are dynamically
657 * assigned unique ids that are only meaningful for the duration of
658 * a specific run. We will need to serialize the entire taskMap in
659 * system. */
660 SERIALIZE_SCALAR(_pid);
661
662 // Serialize the threads, this is done by the CPU implementation.
663 for (ThreadID i = 0; i < numThreads; ++i) {
664 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
665 interrupts[i]->serialize(cp);
666 serializeThread(cp, i);
667 }
668 }
669 }
670
671 void
672 BaseCPU::unserialize(CheckpointIn &cp)
673 {
674 UNSERIALIZE_SCALAR(instCnt);
675
676 if (!_switchedOut) {
677 UNSERIALIZE_SCALAR(_pid);
678
679 // Unserialize the threads, this is done by the CPU implementation.
680 for (ThreadID i = 0; i < numThreads; ++i) {
681 ScopedCheckpointSection sec(cp, csprintf("xc.%i", i));
682 interrupts[i]->unserialize(cp);
683 unserializeThread(cp, i);
684 }
685 }
686 }
687
688 void
689 BaseCPU::scheduleInstStop(ThreadID tid, Counter insts, const char *cause)
690 {
691 const Tick now(getCurrentInstCount(tid));
692 Event *event(new LocalSimLoopExitEvent(cause, 0));
693
694 threadContexts[tid]->scheduleInstCountEvent(event, now + insts);
695 }
696
697 Tick
698 BaseCPU::getCurrentInstCount(ThreadID tid)
699 {
700 return threadContexts[tid]->getCurrentInstCount();
701 }
702
703 AddressMonitor::AddressMonitor() {
704 armed = false;
705 waiting = false;
706 gotWakeup = false;
707 }
708
709 bool AddressMonitor::doMonitor(PacketPtr pkt) {
710 assert(pkt->req->hasPaddr());
711 if (armed && waiting) {
712 if (pAddr == pkt->getAddr()) {
713 DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n",
714 pkt->getAddr());
715 waiting = false;
716 return true;
717 }
718 }
719 return false;
720 }
721
722
723 void
724 BaseCPU::traceFunctionsInternal(Addr pc)
725 {
726 if (Loader::debugSymbolTable.empty())
727 return;
728
729 // if pc enters different function, print new function symbol and
730 // update saved range. Otherwise do nothing.
731 if (pc < currentFunctionStart || pc >= currentFunctionEnd) {
732 auto it = Loader::debugSymbolTable.findNearest(
733 pc, currentFunctionEnd);
734
735 string sym_str;
736 if (it == Loader::debugSymbolTable.end()) {
737 // no symbol found: use addr as label
738 sym_str = csprintf("%#x", pc);
739 currentFunctionStart = pc;
740 currentFunctionEnd = pc + 1;
741 } else {
742 sym_str = it->name;
743 currentFunctionStart = it->address;
744 }
745
746 ccprintf(*functionTraceStream, " (%d)\n%d: %s",
747 curTick() - functionEntryTick, curTick(), sym_str);
748 functionEntryTick = curTick();
749 }
750 }
751
752 bool
753 BaseCPU::waitForRemoteGDB() const
754 {
755 return params()->wait_for_remote_gdb;
756 }