2 * Copyright (c) 2011-2012,2016-2017, 2019-2020 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
44 #include "cpu/base.hh"
50 #include "arch/generic/tlb.hh"
51 #include "base/cprintf.hh"
52 #include "base/loader/symtab.hh"
53 #include "base/logging.hh"
54 #include "base/output.hh"
55 #include "base/trace.hh"
56 #include "cpu/checker/cpu.hh"
57 #include "cpu/thread_context.hh"
58 #include "debug/Mwait.hh"
59 #include "debug/SyscallVerbose.hh"
60 #include "debug/Thread.hh"
61 #include "mem/page_table.hh"
62 #include "params/BaseCPU.hh"
63 #include "sim/clocked_object.hh"
64 #include "sim/full_system.hh"
65 #include "sim/process.hh"
66 #include "sim/root.hh"
67 #include "sim/sim_events.hh"
68 #include "sim/sim_exit.hh"
69 #include "sim/system.hh"
72 #include "sim/stat_control.hh"
76 std::unique_ptr
<BaseCPU::GlobalStats
> BaseCPU::globalStats
;
78 vector
<BaseCPU
*> BaseCPU::cpuList
;
80 // This variable reflects the max number of threads in any CPU. Be
81 // careful to only use it once all the CPUs that you care about have
83 int maxThreadsPerCPU
= 1;
85 CPUProgressEvent::CPUProgressEvent(BaseCPU
*_cpu
, Tick ival
)
86 : Event(Event::Progress_Event_Pri
), _interval(ival
), lastNumInst(0),
87 cpu(_cpu
), _repeatEvent(true)
90 cpu
->schedule(this, curTick() + _interval
);
94 CPUProgressEvent::process()
96 Counter temp
= cpu
->totalOps();
99 cpu
->schedule(this, curTick() + _interval
);
101 if (cpu
->switchedOut()) {
106 double ipc
= double(temp
- lastNumInst
) / (_interval
/ cpu
->clockPeriod());
108 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
109 "%lli, IPC: %0.8d\n", cpu
->name(), temp
, temp
- lastNumInst
,
113 cprintf("%lli: %s progress event, total committed:%i, progress insts "
114 "committed: %lli\n", curTick(), cpu
->name(), temp
,
121 CPUProgressEvent::description() const
123 return "CPU Progress";
126 BaseCPU::BaseCPU(Params
*p
, bool is_checker
)
127 : ClockedObject(p
), instCnt(0), _cpuId(p
->cpu_id
), _socketId(p
->socket_id
),
128 _instRequestorId(p
->system
->getRequestorId(this, "inst")),
129 _dataRequestorId(p
->system
->getRequestorId(this, "data")),
130 _taskId(ContextSwitchTaskId::Unknown
), _pid(invldPid
),
131 _switchedOut(p
->switched_out
), _cacheLineSize(p
->system
->cacheLineSize()),
132 interrupts(p
->interrupts
), numThreads(p
->numThreads
), system(p
->system
),
133 previousCycle(0), previousState(CPU_STATE_SLEEP
),
134 functionTraceStream(nullptr), currentFunctionStart(0),
135 currentFunctionEnd(0), functionEntryTick(0),
136 addressMonitor(p
->numThreads
),
137 syscallRetryLatency(p
->syscallRetryLatency
),
138 pwrGatingLatency(p
->pwr_gating_latency
),
139 powerGatingOnIdle(p
->power_gating_on_idle
),
140 enterPwrGatingEvent([this]{ enterPwrGating(); }, name())
142 // if Python did not provide a valid ID, do it here
144 _cpuId
= cpuList
.size();
147 // add self to global list of CPUs
148 cpuList
.push_back(this);
150 DPRINTF(SyscallVerbose
, "Constructing CPU with id %d, socket id %d\n",
153 if (numThreads
> maxThreadsPerCPU
)
154 maxThreadsPerCPU
= numThreads
;
156 functionTracingEnabled
= false;
157 if (p
->function_trace
) {
158 const string fname
= csprintf("ftrace.%s", name());
159 functionTraceStream
= simout
.findOrCreate(fname
)->stream();
161 currentFunctionStart
= currentFunctionEnd
= 0;
162 functionEntryTick
= p
->function_trace_start
;
164 if (p
->function_trace_start
== 0) {
165 functionTracingEnabled
= true;
167 Event
*event
= new EventFunctionWrapper(
168 [this]{ enableFunctionTrace(); }, name(), true);
169 schedule(event
, p
->function_trace_start
);
173 tracer
= params()->tracer
;
175 if (params()->isa
.size() != numThreads
) {
176 fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
177 "of threads (%i).\n", params()->isa
.size(), numThreads
);
182 BaseCPU::enableFunctionTrace()
184 functionTracingEnabled
= true;
192 BaseCPU::postInterrupt(ThreadID tid
, int int_num
, int index
)
194 interrupts
[tid
]->post(int_num
, index
);
195 // Only wake up syscall emulation if it is not waiting on a futex.
196 // This is to model the fact that instructions such as ARM SEV
197 // should wake up a WFE sleep, but not a futex syscall WAIT. */
198 if (FullSystem
|| !system
->futexMap
.is_waiting(threadContexts
[tid
]))
203 BaseCPU::armMonitor(ThreadID tid
, Addr address
)
205 assert(tid
< numThreads
);
206 AddressMonitor
&monitor
= addressMonitor
[tid
];
208 monitor
.armed
= true;
209 monitor
.vAddr
= address
;
211 DPRINTF(Mwait
,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid
, address
);
215 BaseCPU::mwait(ThreadID tid
, PacketPtr pkt
)
217 assert(tid
< numThreads
);
218 AddressMonitor
&monitor
= addressMonitor
[tid
];
220 if (!monitor
.gotWakeup
) {
221 int block_size
= cacheLineSize();
222 uint64_t mask
= ~((uint64_t)(block_size
- 1));
224 assert(pkt
->req
->hasPaddr());
225 monitor
.pAddr
= pkt
->getAddr() & mask
;
226 monitor
.waiting
= true;
228 DPRINTF(Mwait
,"[tid:%d] mwait called (vAddr=0x%lx, "
229 "line's paddr=0x%lx)\n", tid
, monitor
.vAddr
, monitor
.pAddr
);
232 monitor
.gotWakeup
= false;
238 BaseCPU::mwaitAtomic(ThreadID tid
, ThreadContext
*tc
, BaseTLB
*dtb
)
240 assert(tid
< numThreads
);
241 AddressMonitor
&monitor
= addressMonitor
[tid
];
243 RequestPtr req
= std::make_shared
<Request
>();
245 Addr addr
= monitor
.vAddr
;
246 int block_size
= cacheLineSize();
247 uint64_t mask
= ~((uint64_t)(block_size
- 1));
248 int size
= block_size
;
250 //The address of the next line if it crosses a cache line boundary.
251 Addr secondAddr
= roundDown(addr
+ size
- 1, block_size
);
253 if (secondAddr
> addr
)
254 size
= secondAddr
- addr
;
256 req
->setVirt(addr
, size
, 0x0, dataRequestorId(), tc
->instAddr());
258 // translate to physical address
259 Fault fault
= dtb
->translateAtomic(req
, tc
, BaseTLB::Read
);
260 assert(fault
== NoFault
);
262 monitor
.pAddr
= req
->getPaddr() & mask
;
263 monitor
.waiting
= true;
265 DPRINTF(Mwait
,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
266 tid
, monitor
.vAddr
, monitor
.pAddr
);
272 // Set up instruction-count-based termination events, if any. This needs
273 // to happen after threadContexts has been constructed.
274 if (params()->max_insts_any_thread
!= 0) {
275 const char *cause
= "a thread reached the max instruction count";
276 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
277 scheduleInstStop(tid
, params()->max_insts_any_thread
, cause
);
280 // Set up instruction-count-based termination events for SimPoints
281 // Typically, there are more than one action points.
282 // Simulation.py is responsible to take the necessary actions upon
283 // exitting the simulation loop.
284 if (!params()->simpoint_start_insts
.empty()) {
285 const char *cause
= "simpoint starting point found";
286 for (size_t i
= 0; i
< params()->simpoint_start_insts
.size(); ++i
)
287 scheduleInstStop(0, params()->simpoint_start_insts
[i
], cause
);
290 if (params()->max_insts_all_threads
!= 0) {
291 const char *cause
= "all threads reached the max instruction count";
293 // allocate & initialize shared downcounter: each event will
294 // decrement this when triggered; simulation will terminate
295 // when counter reaches 0
296 int *counter
= new int;
297 *counter
= numThreads
;
298 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
) {
299 Event
*event
= new CountedExitEvent(cause
, *counter
);
300 threadContexts
[tid
]->scheduleInstCountEvent(
301 event
, params()->max_insts_all_threads
);
305 if (!params()->switched_out
) {
306 registerThreadContexts();
315 if (params()->progress_interval
) {
316 new CPUProgressEvent(this, params()->progress_interval
);
320 powerState
->set(Enums::PwrState::OFF
);
322 // Assumption CPU start to operate instantaneously without any latency
323 if (powerState
->get() == Enums::PwrState::UNDEFINED
)
324 powerState
->set(Enums::PwrState::ON
);
329 BaseCPU::pmuProbePoint(const char *name
)
331 ProbePoints::PMUUPtr ptr
;
332 ptr
.reset(new ProbePoints::PMU(getProbeManager(), name
));
338 BaseCPU::regProbePoints()
340 ppAllCycles
= pmuProbePoint("Cycles");
341 ppActiveCycles
= pmuProbePoint("ActiveCycles");
343 ppRetiredInsts
= pmuProbePoint("RetiredInsts");
344 ppRetiredInstsPC
= pmuProbePoint("RetiredInstsPC");
345 ppRetiredLoads
= pmuProbePoint("RetiredLoads");
346 ppRetiredStores
= pmuProbePoint("RetiredStores");
347 ppRetiredBranches
= pmuProbePoint("RetiredBranches");
349 ppSleeping
= new ProbePointArg
<bool>(this->getProbeManager(),
354 BaseCPU::probeInstCommit(const StaticInstPtr
&inst
, Addr pc
)
356 if (!inst
->isMicroop() || inst
->isLastMicroop()) {
357 ppRetiredInsts
->notify(1);
358 ppRetiredInstsPC
->notify(pc
);
362 ppRetiredLoads
->notify(1);
364 if (inst
->isStore() || inst
->isAtomic())
365 ppRetiredStores
->notify(1);
367 if (inst
->isControl())
368 ppRetiredBranches
->notify(1);
374 ClockedObject::regStats();
377 /* We need to construct the global CPU stat structure here
378 * since it needs a pointer to the Root object. */
379 globalStats
.reset(new GlobalStats(Root::root()));
382 using namespace Stats
;
385 .name(name() + ".numCycles")
386 .desc("number of cpu cycles simulated")
390 .name(name() + ".numWorkItemsStarted")
391 .desc("number of work items this cpu started")
394 numWorkItemsCompleted
395 .name(name() + ".numWorkItemsCompleted")
396 .desc("number of work items this cpu completed")
399 int size
= threadContexts
.size();
401 for (int i
= 0; i
< size
; ++i
) {
402 stringstream namestr
;
403 ccprintf(namestr
, "%s.ctx%d", name(), i
);
404 threadContexts
[i
]->regStats(namestr
.str());
406 } else if (size
== 1)
407 threadContexts
[0]->regStats(name());
411 BaseCPU::getPort(const string
&if_name
, PortID idx
)
413 // Get the right port based on name. This applies to all the
414 // subclasses of the base CPU and relies on their implementation
415 // of getDataPort and getInstPort.
416 if (if_name
== "dcache_port")
417 return getDataPort();
418 else if (if_name
== "icache_port")
419 return getInstPort();
421 return ClockedObject::getPort(if_name
, idx
);
425 BaseCPU::registerThreadContexts()
427 assert(system
->multiThread
|| numThreads
== 1);
429 fatal_if(interrupts
.size() != numThreads
,
430 "CPU %s has %i interrupt controllers, but is expecting one "
432 name(), interrupts
.size(), numThreads
);
434 ThreadID size
= threadContexts
.size();
435 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
436 ThreadContext
*tc
= threadContexts
[tid
];
438 if (system
->multiThread
) {
439 tc
->setContextId(system
->registerThreadContext(tc
));
441 tc
->setContextId(system
->registerThreadContext(tc
, _cpuId
));
445 tc
->getProcessPtr()->assignThreadContext(tc
->contextId());
447 interrupts
[tid
]->setThreadContext(tc
);
448 tc
->getIsaPtr()->setThreadContext(tc
);
453 BaseCPU::deschedulePowerGatingEvent()
455 if (enterPwrGatingEvent
.scheduled()){
456 deschedule(enterPwrGatingEvent
);
461 BaseCPU::schedulePowerGatingEvent()
463 for (auto tc
: threadContexts
) {
464 if (tc
->status() == ThreadContext::Active
)
468 if (powerState
->get() == Enums::PwrState::CLK_GATED
&&
470 assert(!enterPwrGatingEvent
.scheduled());
471 // Schedule a power gating event when clock gated for the specified
473 schedule(enterPwrGatingEvent
, clockEdge(pwrGatingLatency
));
478 BaseCPU::findContext(ThreadContext
*tc
)
480 ThreadID size
= threadContexts
.size();
481 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
482 if (tc
== threadContexts
[tid
])
489 BaseCPU::activateContext(ThreadID thread_num
)
491 DPRINTF(Thread
, "activate contextId %d\n",
492 threadContexts
[thread_num
]->contextId());
493 // Squash enter power gating event while cpu gets activated
494 if (enterPwrGatingEvent
.scheduled())
495 deschedule(enterPwrGatingEvent
);
496 // For any active thread running, update CPU power state to active (ON)
497 powerState
->set(Enums::PwrState::ON
);
499 updateCycleCounters(CPU_STATE_WAKEUP
);
503 BaseCPU::suspendContext(ThreadID thread_num
)
505 DPRINTF(Thread
, "suspend contextId %d\n",
506 threadContexts
[thread_num
]->contextId());
507 // Check if all threads are suspended
508 for (auto t
: threadContexts
) {
509 if (t
->status() != ThreadContext::Suspended
) {
514 // All CPU thread are suspended, update cycle count
515 updateCycleCounters(CPU_STATE_SLEEP
);
517 // All CPU threads suspended, enter lower power state for the CPU
518 powerState
->set(Enums::PwrState::CLK_GATED
);
520 // If pwrGatingLatency is set to 0 then this mechanism is disabled
521 if (powerGatingOnIdle
) {
522 // Schedule power gating event when clock gated for pwrGatingLatency
524 schedule(enterPwrGatingEvent
, clockEdge(pwrGatingLatency
));
529 BaseCPU::haltContext(ThreadID thread_num
)
531 updateCycleCounters(BaseCPU::CPU_STATE_SLEEP
);
535 BaseCPU::enterPwrGating(void)
537 powerState
->set(Enums::PwrState::OFF
);
543 assert(!_switchedOut
);
546 // Flush all TLBs in the CPU to avoid having stale translations if
547 // it gets switched in later.
550 // Go to the power gating state
551 powerState
->set(Enums::PwrState::OFF
);
555 BaseCPU::takeOverFrom(BaseCPU
*oldCPU
)
557 assert(threadContexts
.size() == oldCPU
->threadContexts
.size());
558 assert(_cpuId
== oldCPU
->cpuId());
559 assert(_switchedOut
);
560 assert(oldCPU
!= this);
561 _pid
= oldCPU
->getPid();
562 _taskId
= oldCPU
->taskId();
563 // Take over the power state of the switchedOut CPU
564 powerState
->set(oldCPU
->powerState
->get());
566 previousState
= oldCPU
->previousState
;
567 previousCycle
= oldCPU
->previousCycle
;
569 _switchedOut
= false;
571 ThreadID size
= threadContexts
.size();
572 for (ThreadID i
= 0; i
< size
; ++i
) {
573 ThreadContext
*newTC
= threadContexts
[i
];
574 ThreadContext
*oldTC
= oldCPU
->threadContexts
[i
];
576 newTC
->getIsaPtr()->setThreadContext(newTC
);
578 newTC
->takeOverFrom(oldTC
);
580 assert(newTC
->contextId() == oldTC
->contextId());
581 assert(newTC
->threadId() == oldTC
->threadId());
582 system
->replaceThreadContext(newTC
, newTC
->contextId());
584 /* This code no longer works since the zero register (e.g.,
585 * r31 on Alpha) doesn't necessarily contain zero at this
588 ThreadContext::compare(oldTC, newTC);
591 Port
*old_itb_port
= oldTC
->getITBPtr()->getTableWalkerPort();
592 Port
*old_dtb_port
= oldTC
->getDTBPtr()->getTableWalkerPort();
593 Port
*new_itb_port
= newTC
->getITBPtr()->getTableWalkerPort();
594 Port
*new_dtb_port
= newTC
->getDTBPtr()->getTableWalkerPort();
596 // Move over any table walker ports if they exist
598 new_itb_port
->takeOverFrom(old_itb_port
);
600 new_dtb_port
->takeOverFrom(old_dtb_port
);
601 newTC
->getITBPtr()->takeOverFrom(oldTC
->getITBPtr());
602 newTC
->getDTBPtr()->takeOverFrom(oldTC
->getDTBPtr());
604 // Checker whether or not we have to transfer CheckerCPU
605 // objects over in the switch
606 CheckerCPU
*oldChecker
= oldTC
->getCheckerCpuPtr();
607 CheckerCPU
*newChecker
= newTC
->getCheckerCpuPtr();
608 if (oldChecker
&& newChecker
) {
609 Port
*old_checker_itb_port
=
610 oldChecker
->getITBPtr()->getTableWalkerPort();
611 Port
*old_checker_dtb_port
=
612 oldChecker
->getDTBPtr()->getTableWalkerPort();
613 Port
*new_checker_itb_port
=
614 newChecker
->getITBPtr()->getTableWalkerPort();
615 Port
*new_checker_dtb_port
=
616 newChecker
->getDTBPtr()->getTableWalkerPort();
618 newChecker
->getITBPtr()->takeOverFrom(oldChecker
->getITBPtr());
619 newChecker
->getDTBPtr()->takeOverFrom(oldChecker
->getDTBPtr());
621 // Move over any table walker ports if they exist for checker
622 if (new_checker_itb_port
)
623 new_checker_itb_port
->takeOverFrom(old_checker_itb_port
);
624 if (new_checker_dtb_port
)
625 new_checker_dtb_port
->takeOverFrom(old_checker_dtb_port
);
629 interrupts
= oldCPU
->interrupts
;
630 for (ThreadID tid
= 0; tid
< numThreads
; tid
++) {
631 interrupts
[tid
]->setThreadContext(threadContexts
[tid
]);
633 oldCPU
->interrupts
.clear();
635 // All CPUs have an instruction and a data port, and the new CPU's
636 // ports are dangling while the old CPU has its ports connected
637 // already. Unbind the old CPU and then bind the ports of the one
638 // we are switching to.
639 getInstPort().takeOverFrom(&oldCPU
->getInstPort());
640 getDataPort().takeOverFrom(&oldCPU
->getDataPort());
646 for (ThreadID i
= 0; i
< threadContexts
.size(); ++i
) {
647 ThreadContext
&tc(*threadContexts
[i
]);
648 CheckerCPU
*checker(tc
.getCheckerCpuPtr());
650 tc
.getITBPtr()->flushAll();
651 tc
.getDTBPtr()->flushAll();
653 checker
->getITBPtr()->flushAll();
654 checker
->getDTBPtr()->flushAll();
660 BaseCPU::serialize(CheckpointOut
&cp
) const
662 SERIALIZE_SCALAR(instCnt
);
665 /* Unlike _pid, _taskId is not serialized, as they are dynamically
666 * assigned unique ids that are only meaningful for the duration of
667 * a specific run. We will need to serialize the entire taskMap in
669 SERIALIZE_SCALAR(_pid
);
671 // Serialize the threads, this is done by the CPU implementation.
672 for (ThreadID i
= 0; i
< numThreads
; ++i
) {
673 ScopedCheckpointSection
sec(cp
, csprintf("xc.%i", i
));
674 interrupts
[i
]->serialize(cp
);
675 serializeThread(cp
, i
);
681 BaseCPU::unserialize(CheckpointIn
&cp
)
683 UNSERIALIZE_SCALAR(instCnt
);
686 UNSERIALIZE_SCALAR(_pid
);
688 // Unserialize the threads, this is done by the CPU implementation.
689 for (ThreadID i
= 0; i
< numThreads
; ++i
) {
690 ScopedCheckpointSection
sec(cp
, csprintf("xc.%i", i
));
691 interrupts
[i
]->unserialize(cp
);
692 unserializeThread(cp
, i
);
698 BaseCPU::scheduleInstStop(ThreadID tid
, Counter insts
, const char *cause
)
700 const Tick
now(getCurrentInstCount(tid
));
701 Event
*event(new LocalSimLoopExitEvent(cause
, 0));
703 threadContexts
[tid
]->scheduleInstCountEvent(event
, now
+ insts
);
707 BaseCPU::getCurrentInstCount(ThreadID tid
)
709 return threadContexts
[tid
]->getCurrentInstCount();
712 AddressMonitor::AddressMonitor() {
718 bool AddressMonitor::doMonitor(PacketPtr pkt
) {
719 assert(pkt
->req
->hasPaddr());
720 if (armed
&& waiting
) {
721 if (pAddr
== pkt
->getAddr()) {
722 DPRINTF(Mwait
,"pAddr=0x%lx invalidated: waking up core\n",
733 BaseCPU::traceFunctionsInternal(Addr pc
)
735 if (Loader::debugSymbolTable
.empty())
738 // if pc enters different function, print new function symbol and
739 // update saved range. Otherwise do nothing.
740 if (pc
< currentFunctionStart
|| pc
>= currentFunctionEnd
) {
741 auto it
= Loader::debugSymbolTable
.findNearest(
742 pc
, currentFunctionEnd
);
745 if (it
== Loader::debugSymbolTable
.end()) {
746 // no symbol found: use addr as label
747 sym_str
= csprintf("%#x", pc
);
748 currentFunctionStart
= pc
;
749 currentFunctionEnd
= pc
+ 1;
752 currentFunctionStart
= it
->address
;
755 ccprintf(*functionTraceStream
, " (%d)\n%d: %s",
756 curTick() - functionEntryTick
, curTick(), sym_str
);
757 functionEntryTick
= curTick();
762 BaseCPU::waitForRemoteGDB() const
764 return params()->wait_for_remote_gdb
;
768 BaseCPU::GlobalStats::GlobalStats(::Stats::Group
*parent
)
769 : ::Stats::Group(parent
),
770 simInsts(this, "sim_insts", "Number of instructions simulated"),
771 simOps(this, "sim_ops", "Number of ops (including micro ops) simulated"),
772 hostInstRate(this, "host_inst_rate",
773 "Simulator instruction rate (inst/s)"),
774 hostOpRate(this, "host_op_rate",
775 "Simulator op (including micro ops) rate (op/s)")
778 .functor(BaseCPU::numSimulatedInsts
)
784 .functor(BaseCPU::numSimulatedOps
)
799 hostInstRate
= simInsts
/ hostSeconds
;
800 hostOpRate
= simOps
/ hostSeconds
;