2 * Copyright (c) 2011-2012,2016-2017 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Authors: Steve Reinhardt
48 #include "cpu/base.hh"
54 #include "arch/generic/tlb.hh"
55 #include "base/cprintf.hh"
56 #include "base/loader/symtab.hh"
57 #include "base/logging.hh"
58 #include "base/output.hh"
59 #include "base/trace.hh"
60 #include "cpu/checker/cpu.hh"
61 #include "cpu/cpuevent.hh"
62 #include "cpu/profile.hh"
63 #include "cpu/thread_context.hh"
64 #include "debug/Mwait.hh"
65 #include "debug/SyscallVerbose.hh"
66 #include "mem/page_table.hh"
67 #include "params/BaseCPU.hh"
68 #include "sim/clocked_object.hh"
69 #include "sim/full_system.hh"
70 #include "sim/process.hh"
71 #include "sim/sim_events.hh"
72 #include "sim/sim_exit.hh"
73 #include "sim/system.hh"
76 #include "sim/stat_control.hh"
80 vector
<BaseCPU
*> BaseCPU::cpuList
;
82 // This variable reflects the max number of threads in any CPU. Be
83 // careful to only use it once all the CPUs that you care about have
85 int maxThreadsPerCPU
= 1;
87 CPUProgressEvent::CPUProgressEvent(BaseCPU
*_cpu
, Tick ival
)
88 : Event(Event::Progress_Event_Pri
), _interval(ival
), lastNumInst(0),
89 cpu(_cpu
), _repeatEvent(true)
92 cpu
->schedule(this, curTick() + _interval
);
96 CPUProgressEvent::process()
98 Counter temp
= cpu
->totalOps();
101 cpu
->schedule(this, curTick() + _interval
);
103 if (cpu
->switchedOut()) {
108 double ipc
= double(temp
- lastNumInst
) / (_interval
/ cpu
->clockPeriod());
110 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
111 "%lli, IPC: %0.8d\n", cpu
->name(), temp
, temp
- lastNumInst
,
115 cprintf("%lli: %s progress event, total committed:%i, progress insts "
116 "committed: %lli\n", curTick(), cpu
->name(), temp
,
123 CPUProgressEvent::description() const
125 return "CPU Progress";
128 BaseCPU::BaseCPU(Params
*p
, bool is_checker
)
129 : ClockedObject(p
), instCnt(0), _cpuId(p
->cpu_id
), _socketId(p
->socket_id
),
130 _instMasterId(p
->system
->getMasterId(this, "inst")),
131 _dataMasterId(p
->system
->getMasterId(this, "data")),
132 _taskId(ContextSwitchTaskId::Unknown
), _pid(invldPid
),
133 _switchedOut(p
->switched_out
), _cacheLineSize(p
->system
->cacheLineSize()),
134 interrupts(p
->interrupts
), profileEvent(NULL
),
135 numThreads(p
->numThreads
), system(p
->system
),
136 previousCycle(0), previousState(CPU_STATE_SLEEP
),
137 functionTraceStream(nullptr), currentFunctionStart(0),
138 currentFunctionEnd(0), functionEntryTick(0),
139 addressMonitor(p
->numThreads
),
140 syscallRetryLatency(p
->syscallRetryLatency
),
141 pwrGatingLatency(p
->pwr_gating_latency
),
142 powerGatingOnIdle(p
->power_gating_on_idle
),
143 enterPwrGatingEvent([this]{ enterPwrGating(); }, name())
145 // if Python did not provide a valid ID, do it here
147 _cpuId
= cpuList
.size();
150 // add self to global list of CPUs
151 cpuList
.push_back(this);
153 DPRINTF(SyscallVerbose
, "Constructing CPU with id %d, socket id %d\n",
156 if (numThreads
> maxThreadsPerCPU
)
157 maxThreadsPerCPU
= numThreads
;
159 // allocate per-thread instruction-based event queues
160 comInstEventQueue
= new EventQueue
*[numThreads
];
161 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
162 comInstEventQueue
[tid
] =
163 new EventQueue("instruction-based event queue");
166 // set up instruction-count-based termination events, if any
168 if (p
->max_insts_any_thread
!= 0) {
169 const char *cause
= "a thread reached the max instruction count";
170 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
171 scheduleInstStop(tid
, p
->max_insts_any_thread
, cause
);
174 // Set up instruction-count-based termination events for SimPoints
175 // Typically, there are more than one action points.
176 // Simulation.py is responsible to take the necessary actions upon
177 // exitting the simulation loop.
178 if (!p
->simpoint_start_insts
.empty()) {
179 const char *cause
= "simpoint starting point found";
180 for (size_t i
= 0; i
< p
->simpoint_start_insts
.size(); ++i
)
181 scheduleInstStop(0, p
->simpoint_start_insts
[i
], cause
);
184 if (p
->max_insts_all_threads
!= 0) {
185 const char *cause
= "all threads reached the max instruction count";
187 // allocate & initialize shared downcounter: each event will
188 // decrement this when triggered; simulation will terminate
189 // when counter reaches 0
190 int *counter
= new int;
191 *counter
= numThreads
;
192 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
) {
193 Event
*event
= new CountedExitEvent(cause
, *counter
);
194 scheduleInstCountEvent(tid
, event
, p
->max_insts_all_threads
);
199 // set up instruction-count-based termination events, if any
202 functionTracingEnabled
= false;
203 if (p
->function_trace
) {
204 const string fname
= csprintf("ftrace.%s", name());
205 functionTraceStream
= simout
.findOrCreate(fname
)->stream();
207 currentFunctionStart
= currentFunctionEnd
= 0;
208 functionEntryTick
= p
->function_trace_start
;
210 if (p
->function_trace_start
== 0) {
211 functionTracingEnabled
= true;
213 Event
*event
= new EventFunctionWrapper(
214 [this]{ enableFunctionTrace(); }, name(), true);
215 schedule(event
, p
->function_trace_start
);
219 // The interrupts should always be present unless this CPU is
220 // switched in later or in case it is a checker CPU
221 if (!params()->switched_out
&& !is_checker
) {
222 fatal_if(interrupts
.size() != numThreads
,
223 "CPU %s has %i interrupt controllers, but is expecting one "
225 name(), interrupts
.size(), numThreads
);
226 for (ThreadID tid
= 0; tid
< numThreads
; tid
++)
227 interrupts
[tid
]->setCPU(this);
231 if (params()->profile
)
232 profileEvent
= new EventFunctionWrapper(
233 [this]{ processProfileEvent(); },
236 tracer
= params()->tracer
;
238 if (params()->isa
.size() != numThreads
) {
239 fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
240 "of threads (%i).\n", params()->isa
.size(), numThreads
);
245 BaseCPU::enableFunctionTrace()
247 functionTracingEnabled
= true;
253 delete[] comInstEventQueue
;
257 BaseCPU::armMonitor(ThreadID tid
, Addr address
)
259 assert(tid
< numThreads
);
260 AddressMonitor
&monitor
= addressMonitor
[tid
];
262 monitor
.armed
= true;
263 monitor
.vAddr
= address
;
265 DPRINTF(Mwait
,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid
, address
);
269 BaseCPU::mwait(ThreadID tid
, PacketPtr pkt
)
271 assert(tid
< numThreads
);
272 AddressMonitor
&monitor
= addressMonitor
[tid
];
274 if (!monitor
.gotWakeup
) {
275 int block_size
= cacheLineSize();
276 uint64_t mask
= ~((uint64_t)(block_size
- 1));
278 assert(pkt
->req
->hasPaddr());
279 monitor
.pAddr
= pkt
->getAddr() & mask
;
280 monitor
.waiting
= true;
282 DPRINTF(Mwait
,"[tid:%d] mwait called (vAddr=0x%lx, "
283 "line's paddr=0x%lx)\n", tid
, monitor
.vAddr
, monitor
.pAddr
);
286 monitor
.gotWakeup
= false;
292 BaseCPU::mwaitAtomic(ThreadID tid
, ThreadContext
*tc
, BaseTLB
*dtb
)
294 assert(tid
< numThreads
);
295 AddressMonitor
&monitor
= addressMonitor
[tid
];
297 RequestPtr req
= std::make_shared
<Request
>();
299 Addr addr
= monitor
.vAddr
;
300 int block_size
= cacheLineSize();
301 uint64_t mask
= ~((uint64_t)(block_size
- 1));
302 int size
= block_size
;
304 //The address of the next line if it crosses a cache line boundary.
305 Addr secondAddr
= roundDown(addr
+ size
- 1, block_size
);
307 if (secondAddr
> addr
)
308 size
= secondAddr
- addr
;
310 req
->setVirt(0, addr
, size
, 0x0, dataMasterId(), tc
->instAddr());
312 // translate to physical address
313 Fault fault
= dtb
->translateAtomic(req
, tc
, BaseTLB::Read
);
314 assert(fault
== NoFault
);
316 monitor
.pAddr
= req
->getPaddr() & mask
;
317 monitor
.waiting
= true;
319 DPRINTF(Mwait
,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
320 tid
, monitor
.vAddr
, monitor
.pAddr
);
326 if (!params()->switched_out
) {
327 registerThreadContexts();
337 if (!params()->switched_out
&& profileEvent
)
338 schedule(profileEvent
, curTick());
341 if (params()->progress_interval
) {
342 new CPUProgressEvent(this, params()->progress_interval
);
346 ClockedObject::pwrState(Enums::PwrState::OFF
);
348 // Assumption CPU start to operate instantaneously without any latency
349 if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED
)
350 ClockedObject::pwrState(Enums::PwrState::ON
);
355 BaseCPU::pmuProbePoint(const char *name
)
357 ProbePoints::PMUUPtr ptr
;
358 ptr
.reset(new ProbePoints::PMU(getProbeManager(), name
));
364 BaseCPU::regProbePoints()
366 ppAllCycles
= pmuProbePoint("Cycles");
367 ppActiveCycles
= pmuProbePoint("ActiveCycles");
369 ppRetiredInsts
= pmuProbePoint("RetiredInsts");
370 ppRetiredInstsPC
= pmuProbePoint("RetiredInstsPC");
371 ppRetiredLoads
= pmuProbePoint("RetiredLoads");
372 ppRetiredStores
= pmuProbePoint("RetiredStores");
373 ppRetiredBranches
= pmuProbePoint("RetiredBranches");
375 ppSleeping
= new ProbePointArg
<bool>(this->getProbeManager(),
380 BaseCPU::probeInstCommit(const StaticInstPtr
&inst
, Addr pc
)
382 if (!inst
->isMicroop() || inst
->isLastMicroop()) {
383 ppRetiredInsts
->notify(1);
384 ppRetiredInstsPC
->notify(pc
);
388 ppRetiredLoads
->notify(1);
390 if (inst
->isStore() || inst
->isAtomic())
391 ppRetiredStores
->notify(1);
393 if (inst
->isControl())
394 ppRetiredBranches
->notify(1);
400 ClockedObject::regStats();
402 using namespace Stats
;
405 .name(name() + ".numCycles")
406 .desc("number of cpu cycles simulated")
410 .name(name() + ".numWorkItemsStarted")
411 .desc("number of work items this cpu started")
414 numWorkItemsCompleted
415 .name(name() + ".numWorkItemsCompleted")
416 .desc("number of work items this cpu completed")
419 int size
= threadContexts
.size();
421 for (int i
= 0; i
< size
; ++i
) {
422 stringstream namestr
;
423 ccprintf(namestr
, "%s.ctx%d", name(), i
);
424 threadContexts
[i
]->regStats(namestr
.str());
426 } else if (size
== 1)
427 threadContexts
[0]->regStats(name());
431 BaseCPU::getPort(const string
&if_name
, PortID idx
)
433 // Get the right port based on name. This applies to all the
434 // subclasses of the base CPU and relies on their implementation
435 // of getDataPort and getInstPort.
436 if (if_name
== "dcache_port")
437 return getDataPort();
438 else if (if_name
== "icache_port")
439 return getInstPort();
441 return ClockedObject::getPort(if_name
, idx
);
445 BaseCPU::registerThreadContexts()
447 assert(system
->multiThread
|| numThreads
== 1);
449 ThreadID size
= threadContexts
.size();
450 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
451 ThreadContext
*tc
= threadContexts
[tid
];
453 if (system
->multiThread
) {
454 tc
->setContextId(system
->registerThreadContext(tc
));
456 tc
->setContextId(system
->registerThreadContext(tc
, _cpuId
));
460 tc
->getProcessPtr()->assignThreadContext(tc
->contextId());
465 BaseCPU::deschedulePowerGatingEvent()
467 if (enterPwrGatingEvent
.scheduled()){
468 deschedule(enterPwrGatingEvent
);
473 BaseCPU::schedulePowerGatingEvent()
475 for (auto tc
: threadContexts
) {
476 if (tc
->status() == ThreadContext::Active
)
480 if (ClockedObject::pwrState() == Enums::PwrState::CLK_GATED
&&
482 assert(!enterPwrGatingEvent
.scheduled());
483 // Schedule a power gating event when clock gated for the specified
485 schedule(enterPwrGatingEvent
, clockEdge(pwrGatingLatency
));
490 BaseCPU::findContext(ThreadContext
*tc
)
492 ThreadID size
= threadContexts
.size();
493 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
494 if (tc
== threadContexts
[tid
])
501 BaseCPU::activateContext(ThreadID thread_num
)
503 // Squash enter power gating event while cpu gets activated
504 if (enterPwrGatingEvent
.scheduled())
505 deschedule(enterPwrGatingEvent
);
506 // For any active thread running, update CPU power state to active (ON)
507 ClockedObject::pwrState(Enums::PwrState::ON
);
509 updateCycleCounters(CPU_STATE_WAKEUP
);
513 BaseCPU::suspendContext(ThreadID thread_num
)
515 // Check if all threads are suspended
516 for (auto t
: threadContexts
) {
517 if (t
->status() != ThreadContext::Suspended
) {
522 // All CPU thread are suspended, update cycle count
523 updateCycleCounters(CPU_STATE_SLEEP
);
525 // All CPU threads suspended, enter lower power state for the CPU
526 ClockedObject::pwrState(Enums::PwrState::CLK_GATED
);
528 // If pwrGatingLatency is set to 0 then this mechanism is disabled
529 if (powerGatingOnIdle
) {
530 // Schedule power gating event when clock gated for pwrGatingLatency
532 schedule(enterPwrGatingEvent
, clockEdge(pwrGatingLatency
));
537 BaseCPU::haltContext(ThreadID thread_num
)
539 updateCycleCounters(BaseCPU::CPU_STATE_SLEEP
);
543 BaseCPU::enterPwrGating(void)
545 ClockedObject::pwrState(Enums::PwrState::OFF
);
551 assert(!_switchedOut
);
553 if (profileEvent
&& profileEvent
->scheduled())
554 deschedule(profileEvent
);
556 // Flush all TLBs in the CPU to avoid having stale translations if
557 // it gets switched in later.
560 // Go to the power gating state
561 ClockedObject::pwrState(Enums::PwrState::OFF
);
565 BaseCPU::takeOverFrom(BaseCPU
*oldCPU
)
567 assert(threadContexts
.size() == oldCPU
->threadContexts
.size());
568 assert(_cpuId
== oldCPU
->cpuId());
569 assert(_switchedOut
);
570 assert(oldCPU
!= this);
571 _pid
= oldCPU
->getPid();
572 _taskId
= oldCPU
->taskId();
573 // Take over the power state of the switchedOut CPU
574 ClockedObject::pwrState(oldCPU
->pwrState());
576 previousState
= oldCPU
->previousState
;
577 previousCycle
= oldCPU
->previousCycle
;
579 _switchedOut
= false;
581 ThreadID size
= threadContexts
.size();
582 for (ThreadID i
= 0; i
< size
; ++i
) {
583 ThreadContext
*newTC
= threadContexts
[i
];
584 ThreadContext
*oldTC
= oldCPU
->threadContexts
[i
];
586 newTC
->takeOverFrom(oldTC
);
588 CpuEvent::replaceThreadContext(oldTC
, newTC
);
590 assert(newTC
->contextId() == oldTC
->contextId());
591 assert(newTC
->threadId() == oldTC
->threadId());
592 system
->replaceThreadContext(newTC
, newTC
->contextId());
594 /* This code no longer works since the zero register (e.g.,
595 * r31 on Alpha) doesn't necessarily contain zero at this
598 ThreadContext::compare(oldTC, newTC);
601 Port
*old_itb_port
= oldTC
->getITBPtr()->getTableWalkerPort();
602 Port
*old_dtb_port
= oldTC
->getDTBPtr()->getTableWalkerPort();
603 Port
*new_itb_port
= newTC
->getITBPtr()->getTableWalkerPort();
604 Port
*new_dtb_port
= newTC
->getDTBPtr()->getTableWalkerPort();
606 // Move over any table walker ports if they exist
608 new_itb_port
->takeOverFrom(old_itb_port
);
610 new_dtb_port
->takeOverFrom(old_dtb_port
);
611 newTC
->getITBPtr()->takeOverFrom(oldTC
->getITBPtr());
612 newTC
->getDTBPtr()->takeOverFrom(oldTC
->getDTBPtr());
614 // Checker whether or not we have to transfer CheckerCPU
615 // objects over in the switch
616 CheckerCPU
*oldChecker
= oldTC
->getCheckerCpuPtr();
617 CheckerCPU
*newChecker
= newTC
->getCheckerCpuPtr();
618 if (oldChecker
&& newChecker
) {
619 Port
*old_checker_itb_port
=
620 oldChecker
->getITBPtr()->getTableWalkerPort();
621 Port
*old_checker_dtb_port
=
622 oldChecker
->getDTBPtr()->getTableWalkerPort();
623 Port
*new_checker_itb_port
=
624 newChecker
->getITBPtr()->getTableWalkerPort();
625 Port
*new_checker_dtb_port
=
626 newChecker
->getDTBPtr()->getTableWalkerPort();
628 newChecker
->getITBPtr()->takeOverFrom(oldChecker
->getITBPtr());
629 newChecker
->getDTBPtr()->takeOverFrom(oldChecker
->getDTBPtr());
631 // Move over any table walker ports if they exist for checker
632 if (new_checker_itb_port
)
633 new_checker_itb_port
->takeOverFrom(old_checker_itb_port
);
634 if (new_checker_dtb_port
)
635 new_checker_dtb_port
->takeOverFrom(old_checker_dtb_port
);
639 interrupts
= oldCPU
->interrupts
;
640 for (ThreadID tid
= 0; tid
< numThreads
; tid
++) {
641 interrupts
[tid
]->setCPU(this);
643 oldCPU
->interrupts
.clear();
646 for (ThreadID i
= 0; i
< size
; ++i
)
647 threadContexts
[i
]->profileClear();
650 schedule(profileEvent
, curTick());
653 // All CPUs have an instruction and a data port, and the new CPU's
654 // ports are dangling while the old CPU has its ports connected
655 // already. Unbind the old CPU and then bind the ports of the one
656 // we are switching to.
657 getInstPort().takeOverFrom(&oldCPU
->getInstPort());
658 getDataPort().takeOverFrom(&oldCPU
->getDataPort());
664 for (ThreadID i
= 0; i
< threadContexts
.size(); ++i
) {
665 ThreadContext
&tc(*threadContexts
[i
]);
666 CheckerCPU
*checker(tc
.getCheckerCpuPtr());
668 tc
.getITBPtr()->flushAll();
669 tc
.getDTBPtr()->flushAll();
671 checker
->getITBPtr()->flushAll();
672 checker
->getDTBPtr()->flushAll();
678 BaseCPU::processProfileEvent()
680 ThreadID size
= threadContexts
.size();
682 for (ThreadID i
= 0; i
< size
; ++i
)
683 threadContexts
[i
]->profileSample();
685 schedule(profileEvent
, curTick() + params()->profile
);
689 BaseCPU::serialize(CheckpointOut
&cp
) const
691 SERIALIZE_SCALAR(instCnt
);
694 /* Unlike _pid, _taskId is not serialized, as they are dynamically
695 * assigned unique ids that are only meaningful for the duration of
696 * a specific run. We will need to serialize the entire taskMap in
698 SERIALIZE_SCALAR(_pid
);
700 // Serialize the threads, this is done by the CPU implementation.
701 for (ThreadID i
= 0; i
< numThreads
; ++i
) {
702 ScopedCheckpointSection
sec(cp
, csprintf("xc.%i", i
));
703 interrupts
[i
]->serialize(cp
);
704 serializeThread(cp
, i
);
710 BaseCPU::unserialize(CheckpointIn
&cp
)
712 UNSERIALIZE_SCALAR(instCnt
);
715 UNSERIALIZE_SCALAR(_pid
);
717 // Unserialize the threads, this is done by the CPU implementation.
718 for (ThreadID i
= 0; i
< numThreads
; ++i
) {
719 ScopedCheckpointSection
sec(cp
, csprintf("xc.%i", i
));
720 interrupts
[i
]->unserialize(cp
);
721 unserializeThread(cp
, i
);
727 BaseCPU::scheduleInstStop(ThreadID tid
, Counter insts
, const char *cause
)
729 const Tick
now(getCurrentInstCount(tid
));
730 Event
*event(new LocalSimLoopExitEvent(cause
, 0));
732 scheduleInstCountEvent(tid
, event
, now
+ insts
);
736 BaseCPU::getCurrentInstCount(ThreadID tid
)
738 return comInstEventQueue
[tid
]->getCurTick();
741 AddressMonitor::AddressMonitor() {
747 bool AddressMonitor::doMonitor(PacketPtr pkt
) {
748 assert(pkt
->req
->hasPaddr());
749 if (armed
&& waiting
) {
750 if (pAddr
== pkt
->getAddr()) {
751 DPRINTF(Mwait
,"pAddr=0x%lx invalidated: waking up core\n",
762 BaseCPU::traceFunctionsInternal(Addr pc
)
764 if (!debugSymbolTable
)
767 // if pc enters different function, print new function symbol and
768 // update saved range. Otherwise do nothing.
769 if (pc
< currentFunctionStart
|| pc
>= currentFunctionEnd
) {
771 bool found
= debugSymbolTable
->findNearestSymbol(pc
, sym_str
,
772 currentFunctionStart
,
776 // no symbol found: use addr as label
777 sym_str
= csprintf("0x%x", pc
);
778 currentFunctionStart
= pc
;
779 currentFunctionEnd
= pc
+ 1;
782 ccprintf(*functionTraceStream
, " (%d)\n%d: %s",
783 curTick() - functionEntryTick
, curTick(), sym_str
);
784 functionEntryTick
= curTick();
789 BaseCPU::waitForRemoteGDB() const
791 return params()->wait_for_remote_gdb
;