2 * Copyright (c) 2011-2012,2016-2017, 2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 #include "cpu/base.hh"
50 #include "arch/generic/tlb.hh"
51 #include "base/cprintf.hh"
52 #include "base/loader/symtab.hh"
53 #include "base/logging.hh"
54 #include "base/output.hh"
55 #include "base/trace.hh"
56 #include "cpu/checker/cpu.hh"
57 #include "cpu/profile.hh"
58 #include "cpu/thread_context.hh"
59 #include "debug/Mwait.hh"
60 #include "debug/SyscallVerbose.hh"
61 #include "debug/Thread.hh"
62 #include "mem/page_table.hh"
63 #include "params/BaseCPU.hh"
64 #include "sim/clocked_object.hh"
65 #include "sim/full_system.hh"
66 #include "sim/process.hh"
67 #include "sim/sim_events.hh"
68 #include "sim/sim_exit.hh"
69 #include "sim/system.hh"
72 #include "sim/stat_control.hh"
76 vector
<BaseCPU
*> BaseCPU::cpuList
;
78 // This variable reflects the max number of threads in any CPU. Be
79 // careful to only use it once all the CPUs that you care about have
81 int maxThreadsPerCPU
= 1;
83 CPUProgressEvent::CPUProgressEvent(BaseCPU
*_cpu
, Tick ival
)
84 : Event(Event::Progress_Event_Pri
), _interval(ival
), lastNumInst(0),
85 cpu(_cpu
), _repeatEvent(true)
88 cpu
->schedule(this, curTick() + _interval
);
92 CPUProgressEvent::process()
94 Counter temp
= cpu
->totalOps();
97 cpu
->schedule(this, curTick() + _interval
);
99 if (cpu
->switchedOut()) {
104 double ipc
= double(temp
- lastNumInst
) / (_interval
/ cpu
->clockPeriod());
106 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
107 "%lli, IPC: %0.8d\n", cpu
->name(), temp
, temp
- lastNumInst
,
111 cprintf("%lli: %s progress event, total committed:%i, progress insts "
112 "committed: %lli\n", curTick(), cpu
->name(), temp
,
119 CPUProgressEvent::description() const
121 return "CPU Progress";
124 BaseCPU::BaseCPU(Params
*p
, bool is_checker
)
125 : ClockedObject(p
), instCnt(0), _cpuId(p
->cpu_id
), _socketId(p
->socket_id
),
126 _instMasterId(p
->system
->getMasterId(this, "inst")),
127 _dataMasterId(p
->system
->getMasterId(this, "data")),
128 _taskId(ContextSwitchTaskId::Unknown
), _pid(invldPid
),
129 _switchedOut(p
->switched_out
), _cacheLineSize(p
->system
->cacheLineSize()),
130 interrupts(p
->interrupts
), profileEvent(NULL
),
131 numThreads(p
->numThreads
), system(p
->system
),
132 previousCycle(0), previousState(CPU_STATE_SLEEP
),
133 functionTraceStream(nullptr), currentFunctionStart(0),
134 currentFunctionEnd(0), functionEntryTick(0),
135 addressMonitor(p
->numThreads
),
136 syscallRetryLatency(p
->syscallRetryLatency
),
137 pwrGatingLatency(p
->pwr_gating_latency
),
138 powerGatingOnIdle(p
->power_gating_on_idle
),
139 enterPwrGatingEvent([this]{ enterPwrGating(); }, name())
141 // if Python did not provide a valid ID, do it here
143 _cpuId
= cpuList
.size();
146 // add self to global list of CPUs
147 cpuList
.push_back(this);
149 DPRINTF(SyscallVerbose
, "Constructing CPU with id %d, socket id %d\n",
152 if (numThreads
> maxThreadsPerCPU
)
153 maxThreadsPerCPU
= numThreads
;
155 functionTracingEnabled
= false;
156 if (p
->function_trace
) {
157 const string fname
= csprintf("ftrace.%s", name());
158 functionTraceStream
= simout
.findOrCreate(fname
)->stream();
160 currentFunctionStart
= currentFunctionEnd
= 0;
161 functionEntryTick
= p
->function_trace_start
;
163 if (p
->function_trace_start
== 0) {
164 functionTracingEnabled
= true;
166 Event
*event
= new EventFunctionWrapper(
167 [this]{ enableFunctionTrace(); }, name(), true);
168 schedule(event
, p
->function_trace_start
);
173 if (params()->profile
)
174 profileEvent
= new EventFunctionWrapper(
175 [this]{ processProfileEvent(); },
178 tracer
= params()->tracer
;
180 if (params()->isa
.size() != numThreads
) {
181 fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
182 "of threads (%i).\n", params()->isa
.size(), numThreads
);
187 BaseCPU::enableFunctionTrace()
189 functionTracingEnabled
= true;
198 BaseCPU::armMonitor(ThreadID tid
, Addr address
)
200 assert(tid
< numThreads
);
201 AddressMonitor
&monitor
= addressMonitor
[tid
];
203 monitor
.armed
= true;
204 monitor
.vAddr
= address
;
206 DPRINTF(Mwait
,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid
, address
);
210 BaseCPU::mwait(ThreadID tid
, PacketPtr pkt
)
212 assert(tid
< numThreads
);
213 AddressMonitor
&monitor
= addressMonitor
[tid
];
215 if (!monitor
.gotWakeup
) {
216 int block_size
= cacheLineSize();
217 uint64_t mask
= ~((uint64_t)(block_size
- 1));
219 assert(pkt
->req
->hasPaddr());
220 monitor
.pAddr
= pkt
->getAddr() & mask
;
221 monitor
.waiting
= true;
223 DPRINTF(Mwait
,"[tid:%d] mwait called (vAddr=0x%lx, "
224 "line's paddr=0x%lx)\n", tid
, monitor
.vAddr
, monitor
.pAddr
);
227 monitor
.gotWakeup
= false;
233 BaseCPU::mwaitAtomic(ThreadID tid
, ThreadContext
*tc
, BaseTLB
*dtb
)
235 assert(tid
< numThreads
);
236 AddressMonitor
&monitor
= addressMonitor
[tid
];
238 RequestPtr req
= std::make_shared
<Request
>();
240 Addr addr
= monitor
.vAddr
;
241 int block_size
= cacheLineSize();
242 uint64_t mask
= ~((uint64_t)(block_size
- 1));
243 int size
= block_size
;
245 //The address of the next line if it crosses a cache line boundary.
246 Addr secondAddr
= roundDown(addr
+ size
- 1, block_size
);
248 if (secondAddr
> addr
)
249 size
= secondAddr
- addr
;
251 req
->setVirt(addr
, size
, 0x0, dataMasterId(), tc
->instAddr());
253 // translate to physical address
254 Fault fault
= dtb
->translateAtomic(req
, tc
, BaseTLB::Read
);
255 assert(fault
== NoFault
);
257 monitor
.pAddr
= req
->getPaddr() & mask
;
258 monitor
.waiting
= true;
260 DPRINTF(Mwait
,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
261 tid
, monitor
.vAddr
, monitor
.pAddr
);
267 // Set up instruction-count-based termination events, if any. This needs
268 // to happen after threadContexts has been constructed.
269 if (params()->max_insts_any_thread
!= 0) {
270 const char *cause
= "a thread reached the max instruction count";
271 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
272 scheduleInstStop(tid
, params()->max_insts_any_thread
, cause
);
275 // Set up instruction-count-based termination events for SimPoints
276 // Typically, there are more than one action points.
277 // Simulation.py is responsible to take the necessary actions upon
278 // exitting the simulation loop.
279 if (!params()->simpoint_start_insts
.empty()) {
280 const char *cause
= "simpoint starting point found";
281 for (size_t i
= 0; i
< params()->simpoint_start_insts
.size(); ++i
)
282 scheduleInstStop(0, params()->simpoint_start_insts
[i
], cause
);
285 if (params()->max_insts_all_threads
!= 0) {
286 const char *cause
= "all threads reached the max instruction count";
288 // allocate & initialize shared downcounter: each event will
289 // decrement this when triggered; simulation will terminate
290 // when counter reaches 0
291 int *counter
= new int;
292 *counter
= numThreads
;
293 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
) {
294 Event
*event
= new CountedExitEvent(cause
, *counter
);
295 threadContexts
[tid
]->scheduleInstCountEvent(
296 event
, params()->max_insts_all_threads
);
300 if (!params()->switched_out
) {
301 registerThreadContexts();
311 if (!params()->switched_out
&& profileEvent
)
312 schedule(profileEvent
, curTick());
315 if (params()->progress_interval
) {
316 new CPUProgressEvent(this, params()->progress_interval
);
320 powerState
->set(Enums::PwrState::OFF
);
322 // Assumption CPU start to operate instantaneously without any latency
323 if (powerState
->get() == Enums::PwrState::UNDEFINED
)
324 powerState
->set(Enums::PwrState::ON
);
329 BaseCPU::pmuProbePoint(const char *name
)
331 ProbePoints::PMUUPtr ptr
;
332 ptr
.reset(new ProbePoints::PMU(getProbeManager(), name
));
338 BaseCPU::regProbePoints()
340 ppAllCycles
= pmuProbePoint("Cycles");
341 ppActiveCycles
= pmuProbePoint("ActiveCycles");
343 ppRetiredInsts
= pmuProbePoint("RetiredInsts");
344 ppRetiredInstsPC
= pmuProbePoint("RetiredInstsPC");
345 ppRetiredLoads
= pmuProbePoint("RetiredLoads");
346 ppRetiredStores
= pmuProbePoint("RetiredStores");
347 ppRetiredBranches
= pmuProbePoint("RetiredBranches");
349 ppSleeping
= new ProbePointArg
<bool>(this->getProbeManager(),
354 BaseCPU::probeInstCommit(const StaticInstPtr
&inst
, Addr pc
)
356 if (!inst
->isMicroop() || inst
->isLastMicroop()) {
357 ppRetiredInsts
->notify(1);
358 ppRetiredInstsPC
->notify(pc
);
362 ppRetiredLoads
->notify(1);
364 if (inst
->isStore() || inst
->isAtomic())
365 ppRetiredStores
->notify(1);
367 if (inst
->isControl())
368 ppRetiredBranches
->notify(1);
374 ClockedObject::regStats();
376 using namespace Stats
;
379 .name(name() + ".numCycles")
380 .desc("number of cpu cycles simulated")
384 .name(name() + ".numWorkItemsStarted")
385 .desc("number of work items this cpu started")
388 numWorkItemsCompleted
389 .name(name() + ".numWorkItemsCompleted")
390 .desc("number of work items this cpu completed")
393 int size
= threadContexts
.size();
395 for (int i
= 0; i
< size
; ++i
) {
396 stringstream namestr
;
397 ccprintf(namestr
, "%s.ctx%d", name(), i
);
398 threadContexts
[i
]->regStats(namestr
.str());
400 } else if (size
== 1)
401 threadContexts
[0]->regStats(name());
405 BaseCPU::getPort(const string
&if_name
, PortID idx
)
407 // Get the right port based on name. This applies to all the
408 // subclasses of the base CPU and relies on their implementation
409 // of getDataPort and getInstPort.
410 if (if_name
== "dcache_port")
411 return getDataPort();
412 else if (if_name
== "icache_port")
413 return getInstPort();
415 return ClockedObject::getPort(if_name
, idx
);
419 BaseCPU::registerThreadContexts()
421 assert(system
->multiThread
|| numThreads
== 1);
423 fatal_if(interrupts
.size() != numThreads
,
424 "CPU %s has %i interrupt controllers, but is expecting one "
426 name(), interrupts
.size(), numThreads
);
428 ThreadID size
= threadContexts
.size();
429 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
430 ThreadContext
*tc
= threadContexts
[tid
];
432 if (system
->multiThread
) {
433 tc
->setContextId(system
->registerThreadContext(tc
));
435 tc
->setContextId(system
->registerThreadContext(tc
, _cpuId
));
439 tc
->getProcessPtr()->assignThreadContext(tc
->contextId());
441 interrupts
[tid
]->setThreadContext(tc
);
442 tc
->getIsaPtr()->setThreadContext(tc
);
447 BaseCPU::deschedulePowerGatingEvent()
449 if (enterPwrGatingEvent
.scheduled()){
450 deschedule(enterPwrGatingEvent
);
455 BaseCPU::schedulePowerGatingEvent()
457 for (auto tc
: threadContexts
) {
458 if (tc
->status() == ThreadContext::Active
)
462 if (powerState
->get() == Enums::PwrState::CLK_GATED
&&
464 assert(!enterPwrGatingEvent
.scheduled());
465 // Schedule a power gating event when clock gated for the specified
467 schedule(enterPwrGatingEvent
, clockEdge(pwrGatingLatency
));
472 BaseCPU::findContext(ThreadContext
*tc
)
474 ThreadID size
= threadContexts
.size();
475 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
476 if (tc
== threadContexts
[tid
])
483 BaseCPU::activateContext(ThreadID thread_num
)
485 DPRINTF(Thread
, "activate contextId %d\n",
486 threadContexts
[thread_num
]->contextId());
487 // Squash enter power gating event while cpu gets activated
488 if (enterPwrGatingEvent
.scheduled())
489 deschedule(enterPwrGatingEvent
);
490 // For any active thread running, update CPU power state to active (ON)
491 powerState
->set(Enums::PwrState::ON
);
493 updateCycleCounters(CPU_STATE_WAKEUP
);
497 BaseCPU::suspendContext(ThreadID thread_num
)
499 DPRINTF(Thread
, "suspend contextId %d\n",
500 threadContexts
[thread_num
]->contextId());
501 // Check if all threads are suspended
502 for (auto t
: threadContexts
) {
503 if (t
->status() != ThreadContext::Suspended
) {
508 // All CPU thread are suspended, update cycle count
509 updateCycleCounters(CPU_STATE_SLEEP
);
511 // All CPU threads suspended, enter lower power state for the CPU
512 powerState
->set(Enums::PwrState::CLK_GATED
);
514 // If pwrGatingLatency is set to 0 then this mechanism is disabled
515 if (powerGatingOnIdle
) {
516 // Schedule power gating event when clock gated for pwrGatingLatency
518 schedule(enterPwrGatingEvent
, clockEdge(pwrGatingLatency
));
523 BaseCPU::haltContext(ThreadID thread_num
)
525 updateCycleCounters(BaseCPU::CPU_STATE_SLEEP
);
529 BaseCPU::enterPwrGating(void)
531 powerState
->set(Enums::PwrState::OFF
);
537 assert(!_switchedOut
);
539 if (profileEvent
&& profileEvent
->scheduled())
540 deschedule(profileEvent
);
542 // Flush all TLBs in the CPU to avoid having stale translations if
543 // it gets switched in later.
546 // Go to the power gating state
547 powerState
->set(Enums::PwrState::OFF
);
551 BaseCPU::takeOverFrom(BaseCPU
*oldCPU
)
553 assert(threadContexts
.size() == oldCPU
->threadContexts
.size());
554 assert(_cpuId
== oldCPU
->cpuId());
555 assert(_switchedOut
);
556 assert(oldCPU
!= this);
557 _pid
= oldCPU
->getPid();
558 _taskId
= oldCPU
->taskId();
559 // Take over the power state of the switchedOut CPU
560 powerState
->set(oldCPU
->powerState
->get());
562 previousState
= oldCPU
->previousState
;
563 previousCycle
= oldCPU
->previousCycle
;
565 _switchedOut
= false;
567 ThreadID size
= threadContexts
.size();
568 for (ThreadID i
= 0; i
< size
; ++i
) {
569 ThreadContext
*newTC
= threadContexts
[i
];
570 ThreadContext
*oldTC
= oldCPU
->threadContexts
[i
];
572 newTC
->getIsaPtr()->setThreadContext(newTC
);
574 newTC
->takeOverFrom(oldTC
);
576 assert(newTC
->contextId() == oldTC
->contextId());
577 assert(newTC
->threadId() == oldTC
->threadId());
578 system
->replaceThreadContext(newTC
, newTC
->contextId());
580 /* This code no longer works since the zero register (e.g.,
581 * r31 on Alpha) doesn't necessarily contain zero at this
584 ThreadContext::compare(oldTC, newTC);
587 Port
*old_itb_port
= oldTC
->getITBPtr()->getTableWalkerPort();
588 Port
*old_dtb_port
= oldTC
->getDTBPtr()->getTableWalkerPort();
589 Port
*new_itb_port
= newTC
->getITBPtr()->getTableWalkerPort();
590 Port
*new_dtb_port
= newTC
->getDTBPtr()->getTableWalkerPort();
592 // Move over any table walker ports if they exist
594 new_itb_port
->takeOverFrom(old_itb_port
);
596 new_dtb_port
->takeOverFrom(old_dtb_port
);
597 newTC
->getITBPtr()->takeOverFrom(oldTC
->getITBPtr());
598 newTC
->getDTBPtr()->takeOverFrom(oldTC
->getDTBPtr());
600 // Checker whether or not we have to transfer CheckerCPU
601 // objects over in the switch
602 CheckerCPU
*oldChecker
= oldTC
->getCheckerCpuPtr();
603 CheckerCPU
*newChecker
= newTC
->getCheckerCpuPtr();
604 if (oldChecker
&& newChecker
) {
605 Port
*old_checker_itb_port
=
606 oldChecker
->getITBPtr()->getTableWalkerPort();
607 Port
*old_checker_dtb_port
=
608 oldChecker
->getDTBPtr()->getTableWalkerPort();
609 Port
*new_checker_itb_port
=
610 newChecker
->getITBPtr()->getTableWalkerPort();
611 Port
*new_checker_dtb_port
=
612 newChecker
->getDTBPtr()->getTableWalkerPort();
614 newChecker
->getITBPtr()->takeOverFrom(oldChecker
->getITBPtr());
615 newChecker
->getDTBPtr()->takeOverFrom(oldChecker
->getDTBPtr());
617 // Move over any table walker ports if they exist for checker
618 if (new_checker_itb_port
)
619 new_checker_itb_port
->takeOverFrom(old_checker_itb_port
);
620 if (new_checker_dtb_port
)
621 new_checker_dtb_port
->takeOverFrom(old_checker_dtb_port
);
625 interrupts
= oldCPU
->interrupts
;
626 for (ThreadID tid
= 0; tid
< numThreads
; tid
++) {
627 interrupts
[tid
]->setThreadContext(threadContexts
[tid
]);
629 oldCPU
->interrupts
.clear();
632 for (ThreadID i
= 0; i
< size
; ++i
)
633 threadContexts
[i
]->profileClear();
636 schedule(profileEvent
, curTick());
639 // All CPUs have an instruction and a data port, and the new CPU's
640 // ports are dangling while the old CPU has its ports connected
641 // already. Unbind the old CPU and then bind the ports of the one
642 // we are switching to.
643 getInstPort().takeOverFrom(&oldCPU
->getInstPort());
644 getDataPort().takeOverFrom(&oldCPU
->getDataPort());
650 for (ThreadID i
= 0; i
< threadContexts
.size(); ++i
) {
651 ThreadContext
&tc(*threadContexts
[i
]);
652 CheckerCPU
*checker(tc
.getCheckerCpuPtr());
654 tc
.getITBPtr()->flushAll();
655 tc
.getDTBPtr()->flushAll();
657 checker
->getITBPtr()->flushAll();
658 checker
->getDTBPtr()->flushAll();
664 BaseCPU::processProfileEvent()
666 ThreadID size
= threadContexts
.size();
668 for (ThreadID i
= 0; i
< size
; ++i
)
669 threadContexts
[i
]->profileSample();
671 schedule(profileEvent
, curTick() + params()->profile
);
675 BaseCPU::serialize(CheckpointOut
&cp
) const
677 SERIALIZE_SCALAR(instCnt
);
680 /* Unlike _pid, _taskId is not serialized, as they are dynamically
681 * assigned unique ids that are only meaningful for the duration of
682 * a specific run. We will need to serialize the entire taskMap in
684 SERIALIZE_SCALAR(_pid
);
686 // Serialize the threads, this is done by the CPU implementation.
687 for (ThreadID i
= 0; i
< numThreads
; ++i
) {
688 ScopedCheckpointSection
sec(cp
, csprintf("xc.%i", i
));
689 interrupts
[i
]->serialize(cp
);
690 serializeThread(cp
, i
);
696 BaseCPU::unserialize(CheckpointIn
&cp
)
698 UNSERIALIZE_SCALAR(instCnt
);
701 UNSERIALIZE_SCALAR(_pid
);
703 // Unserialize the threads, this is done by the CPU implementation.
704 for (ThreadID i
= 0; i
< numThreads
; ++i
) {
705 ScopedCheckpointSection
sec(cp
, csprintf("xc.%i", i
));
706 interrupts
[i
]->unserialize(cp
);
707 unserializeThread(cp
, i
);
713 BaseCPU::scheduleInstStop(ThreadID tid
, Counter insts
, const char *cause
)
715 const Tick
now(getCurrentInstCount(tid
));
716 Event
*event(new LocalSimLoopExitEvent(cause
, 0));
718 threadContexts
[tid
]->scheduleInstCountEvent(event
, now
+ insts
);
722 BaseCPU::getCurrentInstCount(ThreadID tid
)
724 return threadContexts
[tid
]->getCurrentInstCount();
727 AddressMonitor::AddressMonitor() {
733 bool AddressMonitor::doMonitor(PacketPtr pkt
) {
734 assert(pkt
->req
->hasPaddr());
735 if (armed
&& waiting
) {
736 if (pAddr
== pkt
->getAddr()) {
737 DPRINTF(Mwait
,"pAddr=0x%lx invalidated: waking up core\n",
748 BaseCPU::traceFunctionsInternal(Addr pc
)
750 if (Loader::debugSymbolTable
.empty())
753 // if pc enters different function, print new function symbol and
754 // update saved range. Otherwise do nothing.
755 if (pc
< currentFunctionStart
|| pc
>= currentFunctionEnd
) {
756 auto it
= Loader::debugSymbolTable
.findNearest(
757 pc
, currentFunctionEnd
);
760 if (it
== Loader::debugSymbolTable
.end()) {
761 // no symbol found: use addr as label
762 sym_str
= csprintf("%#x", pc
);
763 currentFunctionStart
= pc
;
764 currentFunctionEnd
= pc
+ 1;
767 currentFunctionStart
= it
->address
;
770 ccprintf(*functionTraceStream
, " (%d)\n%d: %s",
771 curTick() - functionEntryTick
, curTick(), sym_str
);
772 functionEntryTick
= curTick();
777 BaseCPU::waitForRemoteGDB() const
779 return params()->wait_for_remote_gdb
;