2 * Copyright (c) 2011-2012,2016-2017, 2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Authors: Steve Reinhardt
48 #include "cpu/base.hh"
54 #include "arch/generic/tlb.hh"
55 #include "base/cprintf.hh"
56 #include "base/loader/symtab.hh"
57 #include "base/logging.hh"
58 #include "base/output.hh"
59 #include "base/trace.hh"
60 #include "cpu/checker/cpu.hh"
61 #include "cpu/cpuevent.hh"
62 #include "cpu/profile.hh"
63 #include "cpu/thread_context.hh"
64 #include "debug/Mwait.hh"
65 #include "debug/SyscallVerbose.hh"
66 #include "debug/Thread.hh"
67 #include "mem/page_table.hh"
68 #include "params/BaseCPU.hh"
69 #include "sim/clocked_object.hh"
70 #include "sim/full_system.hh"
71 #include "sim/process.hh"
72 #include "sim/sim_events.hh"
73 #include "sim/sim_exit.hh"
74 #include "sim/system.hh"
77 #include "sim/stat_control.hh"
81 vector
<BaseCPU
*> BaseCPU::cpuList
;
83 // This variable reflects the max number of threads in any CPU. Be
84 // careful to only use it once all the CPUs that you care about have
86 int maxThreadsPerCPU
= 1;
88 CPUProgressEvent::CPUProgressEvent(BaseCPU
*_cpu
, Tick ival
)
89 : Event(Event::Progress_Event_Pri
), _interval(ival
), lastNumInst(0),
90 cpu(_cpu
), _repeatEvent(true)
93 cpu
->schedule(this, curTick() + _interval
);
97 CPUProgressEvent::process()
99 Counter temp
= cpu
->totalOps();
102 cpu
->schedule(this, curTick() + _interval
);
104 if (cpu
->switchedOut()) {
109 double ipc
= double(temp
- lastNumInst
) / (_interval
/ cpu
->clockPeriod());
111 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
112 "%lli, IPC: %0.8d\n", cpu
->name(), temp
, temp
- lastNumInst
,
116 cprintf("%lli: %s progress event, total committed:%i, progress insts "
117 "committed: %lli\n", curTick(), cpu
->name(), temp
,
124 CPUProgressEvent::description() const
126 return "CPU Progress";
129 BaseCPU::BaseCPU(Params
*p
, bool is_checker
)
130 : ClockedObject(p
), instCnt(0), _cpuId(p
->cpu_id
), _socketId(p
->socket_id
),
131 _instMasterId(p
->system
->getMasterId(this, "inst")),
132 _dataMasterId(p
->system
->getMasterId(this, "data")),
133 _taskId(ContextSwitchTaskId::Unknown
), _pid(invldPid
),
134 _switchedOut(p
->switched_out
), _cacheLineSize(p
->system
->cacheLineSize()),
135 interrupts(p
->interrupts
), profileEvent(NULL
),
136 numThreads(p
->numThreads
), system(p
->system
),
137 previousCycle(0), previousState(CPU_STATE_SLEEP
),
138 functionTraceStream(nullptr), currentFunctionStart(0),
139 currentFunctionEnd(0), functionEntryTick(0),
140 addressMonitor(p
->numThreads
),
141 syscallRetryLatency(p
->syscallRetryLatency
),
142 pwrGatingLatency(p
->pwr_gating_latency
),
143 powerGatingOnIdle(p
->power_gating_on_idle
),
144 enterPwrGatingEvent([this]{ enterPwrGating(); }, name())
146 // if Python did not provide a valid ID, do it here
148 _cpuId
= cpuList
.size();
151 // add self to global list of CPUs
152 cpuList
.push_back(this);
154 DPRINTF(SyscallVerbose
, "Constructing CPU with id %d, socket id %d\n",
157 if (numThreads
> maxThreadsPerCPU
)
158 maxThreadsPerCPU
= numThreads
;
160 functionTracingEnabled
= false;
161 if (p
->function_trace
) {
162 const string fname
= csprintf("ftrace.%s", name());
163 functionTraceStream
= simout
.findOrCreate(fname
)->stream();
165 currentFunctionStart
= currentFunctionEnd
= 0;
166 functionEntryTick
= p
->function_trace_start
;
168 if (p
->function_trace_start
== 0) {
169 functionTracingEnabled
= true;
171 Event
*event
= new EventFunctionWrapper(
172 [this]{ enableFunctionTrace(); }, name(), true);
173 schedule(event
, p
->function_trace_start
);
177 // The interrupts should always be present unless this CPU is
178 // switched in later or in case it is a checker CPU
179 if (!params()->switched_out
&& !is_checker
) {
180 fatal_if(interrupts
.size() != numThreads
,
181 "CPU %s has %i interrupt controllers, but is expecting one "
183 name(), interrupts
.size(), numThreads
);
184 for (ThreadID tid
= 0; tid
< numThreads
; tid
++)
185 interrupts
[tid
]->setCPU(this);
189 if (params()->profile
)
190 profileEvent
= new EventFunctionWrapper(
191 [this]{ processProfileEvent(); },
194 tracer
= params()->tracer
;
196 if (params()->isa
.size() != numThreads
) {
197 fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
198 "of threads (%i).\n", params()->isa
.size(), numThreads
);
203 BaseCPU::enableFunctionTrace()
205 functionTracingEnabled
= true;
214 BaseCPU::armMonitor(ThreadID tid
, Addr address
)
216 assert(tid
< numThreads
);
217 AddressMonitor
&monitor
= addressMonitor
[tid
];
219 monitor
.armed
= true;
220 monitor
.vAddr
= address
;
222 DPRINTF(Mwait
,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid
, address
);
226 BaseCPU::mwait(ThreadID tid
, PacketPtr pkt
)
228 assert(tid
< numThreads
);
229 AddressMonitor
&monitor
= addressMonitor
[tid
];
231 if (!monitor
.gotWakeup
) {
232 int block_size
= cacheLineSize();
233 uint64_t mask
= ~((uint64_t)(block_size
- 1));
235 assert(pkt
->req
->hasPaddr());
236 monitor
.pAddr
= pkt
->getAddr() & mask
;
237 monitor
.waiting
= true;
239 DPRINTF(Mwait
,"[tid:%d] mwait called (vAddr=0x%lx, "
240 "line's paddr=0x%lx)\n", tid
, monitor
.vAddr
, monitor
.pAddr
);
243 monitor
.gotWakeup
= false;
249 BaseCPU::mwaitAtomic(ThreadID tid
, ThreadContext
*tc
, BaseTLB
*dtb
)
251 assert(tid
< numThreads
);
252 AddressMonitor
&monitor
= addressMonitor
[tid
];
254 RequestPtr req
= std::make_shared
<Request
>();
256 Addr addr
= monitor
.vAddr
;
257 int block_size
= cacheLineSize();
258 uint64_t mask
= ~((uint64_t)(block_size
- 1));
259 int size
= block_size
;
261 //The address of the next line if it crosses a cache line boundary.
262 Addr secondAddr
= roundDown(addr
+ size
- 1, block_size
);
264 if (secondAddr
> addr
)
265 size
= secondAddr
- addr
;
267 req
->setVirt(0, addr
, size
, 0x0, dataMasterId(), tc
->instAddr());
269 // translate to physical address
270 Fault fault
= dtb
->translateAtomic(req
, tc
, BaseTLB::Read
);
271 assert(fault
== NoFault
);
273 monitor
.pAddr
= req
->getPaddr() & mask
;
274 monitor
.waiting
= true;
276 DPRINTF(Mwait
,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
277 tid
, monitor
.vAddr
, monitor
.pAddr
);
283 // Set up instruction-count-based termination events, if any. This needs
284 // to happen after threadContexts has been constructed.
285 if (params()->max_insts_any_thread
!= 0) {
286 const char *cause
= "a thread reached the max instruction count";
287 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
288 scheduleInstStop(tid
, params()->max_insts_any_thread
, cause
);
291 // Set up instruction-count-based termination events for SimPoints
292 // Typically, there are more than one action points.
293 // Simulation.py is responsible to take the necessary actions upon
294 // exitting the simulation loop.
295 if (!params()->simpoint_start_insts
.empty()) {
296 const char *cause
= "simpoint starting point found";
297 for (size_t i
= 0; i
< params()->simpoint_start_insts
.size(); ++i
)
298 scheduleInstStop(0, params()->simpoint_start_insts
[i
], cause
);
301 if (params()->max_insts_all_threads
!= 0) {
302 const char *cause
= "all threads reached the max instruction count";
304 // allocate & initialize shared downcounter: each event will
305 // decrement this when triggered; simulation will terminate
306 // when counter reaches 0
307 int *counter
= new int;
308 *counter
= numThreads
;
309 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
) {
310 Event
*event
= new CountedExitEvent(cause
, *counter
);
311 threadContexts
[tid
]->scheduleInstCountEvent(
312 event
, params()->max_insts_all_threads
);
316 if (!params()->switched_out
) {
317 registerThreadContexts();
322 //These calls eventually need to be moved to initState
323 if (FullSystem
&& !params()->switched_out
) {
324 for (auto *tc
: threadContexts
)
325 TheISA::initCPU(tc
, tc
->contextId());
333 if (!params()->switched_out
&& profileEvent
)
334 schedule(profileEvent
, curTick());
337 if (params()->progress_interval
) {
338 new CPUProgressEvent(this, params()->progress_interval
);
342 ClockedObject::pwrState(Enums::PwrState::OFF
);
344 // Assumption CPU start to operate instantaneously without any latency
345 if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED
)
346 ClockedObject::pwrState(Enums::PwrState::ON
);
351 BaseCPU::pmuProbePoint(const char *name
)
353 ProbePoints::PMUUPtr ptr
;
354 ptr
.reset(new ProbePoints::PMU(getProbeManager(), name
));
360 BaseCPU::regProbePoints()
362 ppAllCycles
= pmuProbePoint("Cycles");
363 ppActiveCycles
= pmuProbePoint("ActiveCycles");
365 ppRetiredInsts
= pmuProbePoint("RetiredInsts");
366 ppRetiredInstsPC
= pmuProbePoint("RetiredInstsPC");
367 ppRetiredLoads
= pmuProbePoint("RetiredLoads");
368 ppRetiredStores
= pmuProbePoint("RetiredStores");
369 ppRetiredBranches
= pmuProbePoint("RetiredBranches");
371 ppSleeping
= new ProbePointArg
<bool>(this->getProbeManager(),
376 BaseCPU::probeInstCommit(const StaticInstPtr
&inst
, Addr pc
)
378 if (!inst
->isMicroop() || inst
->isLastMicroop()) {
379 ppRetiredInsts
->notify(1);
380 ppRetiredInstsPC
->notify(pc
);
384 ppRetiredLoads
->notify(1);
386 if (inst
->isStore() || inst
->isAtomic())
387 ppRetiredStores
->notify(1);
389 if (inst
->isControl())
390 ppRetiredBranches
->notify(1);
396 ClockedObject::regStats();
398 using namespace Stats
;
401 .name(name() + ".numCycles")
402 .desc("number of cpu cycles simulated")
406 .name(name() + ".numWorkItemsStarted")
407 .desc("number of work items this cpu started")
410 numWorkItemsCompleted
411 .name(name() + ".numWorkItemsCompleted")
412 .desc("number of work items this cpu completed")
415 int size
= threadContexts
.size();
417 for (int i
= 0; i
< size
; ++i
) {
418 stringstream namestr
;
419 ccprintf(namestr
, "%s.ctx%d", name(), i
);
420 threadContexts
[i
]->regStats(namestr
.str());
422 } else if (size
== 1)
423 threadContexts
[0]->regStats(name());
427 BaseCPU::getPort(const string
&if_name
, PortID idx
)
429 // Get the right port based on name. This applies to all the
430 // subclasses of the base CPU and relies on their implementation
431 // of getDataPort and getInstPort.
432 if (if_name
== "dcache_port")
433 return getDataPort();
434 else if (if_name
== "icache_port")
435 return getInstPort();
437 return ClockedObject::getPort(if_name
, idx
);
441 BaseCPU::registerThreadContexts()
443 assert(system
->multiThread
|| numThreads
== 1);
445 ThreadID size
= threadContexts
.size();
446 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
447 ThreadContext
*tc
= threadContexts
[tid
];
449 if (system
->multiThread
) {
450 tc
->setContextId(system
->registerThreadContext(tc
));
452 tc
->setContextId(system
->registerThreadContext(tc
, _cpuId
));
456 tc
->getProcessPtr()->assignThreadContext(tc
->contextId());
461 BaseCPU::deschedulePowerGatingEvent()
463 if (enterPwrGatingEvent
.scheduled()){
464 deschedule(enterPwrGatingEvent
);
469 BaseCPU::schedulePowerGatingEvent()
471 for (auto tc
: threadContexts
) {
472 if (tc
->status() == ThreadContext::Active
)
476 if (ClockedObject::pwrState() == Enums::PwrState::CLK_GATED
&&
478 assert(!enterPwrGatingEvent
.scheduled());
479 // Schedule a power gating event when clock gated for the specified
481 schedule(enterPwrGatingEvent
, clockEdge(pwrGatingLatency
));
486 BaseCPU::findContext(ThreadContext
*tc
)
488 ThreadID size
= threadContexts
.size();
489 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
490 if (tc
== threadContexts
[tid
])
497 BaseCPU::activateContext(ThreadID thread_num
)
499 DPRINTF(Thread
, "activate contextId %d\n",
500 threadContexts
[thread_num
]->contextId());
501 // Squash enter power gating event while cpu gets activated
502 if (enterPwrGatingEvent
.scheduled())
503 deschedule(enterPwrGatingEvent
);
504 // For any active thread running, update CPU power state to active (ON)
505 ClockedObject::pwrState(Enums::PwrState::ON
);
507 updateCycleCounters(CPU_STATE_WAKEUP
);
511 BaseCPU::suspendContext(ThreadID thread_num
)
513 DPRINTF(Thread
, "suspend contextId %d\n",
514 threadContexts
[thread_num
]->contextId());
515 // Check if all threads are suspended
516 for (auto t
: threadContexts
) {
517 if (t
->status() != ThreadContext::Suspended
) {
522 // All CPU thread are suspended, update cycle count
523 updateCycleCounters(CPU_STATE_SLEEP
);
525 // All CPU threads suspended, enter lower power state for the CPU
526 ClockedObject::pwrState(Enums::PwrState::CLK_GATED
);
528 // If pwrGatingLatency is set to 0 then this mechanism is disabled
529 if (powerGatingOnIdle
) {
530 // Schedule power gating event when clock gated for pwrGatingLatency
532 schedule(enterPwrGatingEvent
, clockEdge(pwrGatingLatency
));
537 BaseCPU::haltContext(ThreadID thread_num
)
539 updateCycleCounters(BaseCPU::CPU_STATE_SLEEP
);
543 BaseCPU::enterPwrGating(void)
545 ClockedObject::pwrState(Enums::PwrState::OFF
);
551 assert(!_switchedOut
);
553 if (profileEvent
&& profileEvent
->scheduled())
554 deschedule(profileEvent
);
556 // Flush all TLBs in the CPU to avoid having stale translations if
557 // it gets switched in later.
560 // Go to the power gating state
561 ClockedObject::pwrState(Enums::PwrState::OFF
);
565 BaseCPU::takeOverFrom(BaseCPU
*oldCPU
)
567 assert(threadContexts
.size() == oldCPU
->threadContexts
.size());
568 assert(_cpuId
== oldCPU
->cpuId());
569 assert(_switchedOut
);
570 assert(oldCPU
!= this);
571 _pid
= oldCPU
->getPid();
572 _taskId
= oldCPU
->taskId();
573 // Take over the power state of the switchedOut CPU
574 ClockedObject::pwrState(oldCPU
->pwrState());
576 previousState
= oldCPU
->previousState
;
577 previousCycle
= oldCPU
->previousCycle
;
579 _switchedOut
= false;
581 ThreadID size
= threadContexts
.size();
582 for (ThreadID i
= 0; i
< size
; ++i
) {
583 ThreadContext
*newTC
= threadContexts
[i
];
584 ThreadContext
*oldTC
= oldCPU
->threadContexts
[i
];
586 newTC
->takeOverFrom(oldTC
);
588 CpuEvent::replaceThreadContext(oldTC
, newTC
);
590 assert(newTC
->contextId() == oldTC
->contextId());
591 assert(newTC
->threadId() == oldTC
->threadId());
592 system
->replaceThreadContext(newTC
, newTC
->contextId());
594 /* This code no longer works since the zero register (e.g.,
595 * r31 on Alpha) doesn't necessarily contain zero at this
598 ThreadContext::compare(oldTC, newTC);
601 Port
*old_itb_port
= oldTC
->getITBPtr()->getTableWalkerPort();
602 Port
*old_dtb_port
= oldTC
->getDTBPtr()->getTableWalkerPort();
603 Port
*new_itb_port
= newTC
->getITBPtr()->getTableWalkerPort();
604 Port
*new_dtb_port
= newTC
->getDTBPtr()->getTableWalkerPort();
606 // Move over any table walker ports if they exist
608 new_itb_port
->takeOverFrom(old_itb_port
);
610 new_dtb_port
->takeOverFrom(old_dtb_port
);
611 newTC
->getITBPtr()->takeOverFrom(oldTC
->getITBPtr());
612 newTC
->getDTBPtr()->takeOverFrom(oldTC
->getDTBPtr());
614 // Checker whether or not we have to transfer CheckerCPU
615 // objects over in the switch
616 CheckerCPU
*oldChecker
= oldTC
->getCheckerCpuPtr();
617 CheckerCPU
*newChecker
= newTC
->getCheckerCpuPtr();
618 if (oldChecker
&& newChecker
) {
619 Port
*old_checker_itb_port
=
620 oldChecker
->getITBPtr()->getTableWalkerPort();
621 Port
*old_checker_dtb_port
=
622 oldChecker
->getDTBPtr()->getTableWalkerPort();
623 Port
*new_checker_itb_port
=
624 newChecker
->getITBPtr()->getTableWalkerPort();
625 Port
*new_checker_dtb_port
=
626 newChecker
->getDTBPtr()->getTableWalkerPort();
628 newChecker
->getITBPtr()->takeOverFrom(oldChecker
->getITBPtr());
629 newChecker
->getDTBPtr()->takeOverFrom(oldChecker
->getDTBPtr());
631 // Move over any table walker ports if they exist for checker
632 if (new_checker_itb_port
)
633 new_checker_itb_port
->takeOverFrom(old_checker_itb_port
);
634 if (new_checker_dtb_port
)
635 new_checker_dtb_port
->takeOverFrom(old_checker_dtb_port
);
639 interrupts
= oldCPU
->interrupts
;
640 for (ThreadID tid
= 0; tid
< numThreads
; tid
++) {
641 interrupts
[tid
]->setCPU(this);
643 oldCPU
->interrupts
.clear();
646 for (ThreadID i
= 0; i
< size
; ++i
)
647 threadContexts
[i
]->profileClear();
650 schedule(profileEvent
, curTick());
653 // All CPUs have an instruction and a data port, and the new CPU's
654 // ports are dangling while the old CPU has its ports connected
655 // already. Unbind the old CPU and then bind the ports of the one
656 // we are switching to.
657 getInstPort().takeOverFrom(&oldCPU
->getInstPort());
658 getDataPort().takeOverFrom(&oldCPU
->getDataPort());
664 for (ThreadID i
= 0; i
< threadContexts
.size(); ++i
) {
665 ThreadContext
&tc(*threadContexts
[i
]);
666 CheckerCPU
*checker(tc
.getCheckerCpuPtr());
668 tc
.getITBPtr()->flushAll();
669 tc
.getDTBPtr()->flushAll();
671 checker
->getITBPtr()->flushAll();
672 checker
->getDTBPtr()->flushAll();
678 BaseCPU::processProfileEvent()
680 ThreadID size
= threadContexts
.size();
682 for (ThreadID i
= 0; i
< size
; ++i
)
683 threadContexts
[i
]->profileSample();
685 schedule(profileEvent
, curTick() + params()->profile
);
689 BaseCPU::serialize(CheckpointOut
&cp
) const
691 SERIALIZE_SCALAR(instCnt
);
694 /* Unlike _pid, _taskId is not serialized, as they are dynamically
695 * assigned unique ids that are only meaningful for the duration of
696 * a specific run. We will need to serialize the entire taskMap in
698 SERIALIZE_SCALAR(_pid
);
700 // Serialize the threads, this is done by the CPU implementation.
701 for (ThreadID i
= 0; i
< numThreads
; ++i
) {
702 ScopedCheckpointSection
sec(cp
, csprintf("xc.%i", i
));
703 interrupts
[i
]->serialize(cp
);
704 serializeThread(cp
, i
);
710 BaseCPU::unserialize(CheckpointIn
&cp
)
712 UNSERIALIZE_SCALAR(instCnt
);
715 UNSERIALIZE_SCALAR(_pid
);
717 // Unserialize the threads, this is done by the CPU implementation.
718 for (ThreadID i
= 0; i
< numThreads
; ++i
) {
719 ScopedCheckpointSection
sec(cp
, csprintf("xc.%i", i
));
720 interrupts
[i
]->unserialize(cp
);
721 unserializeThread(cp
, i
);
727 BaseCPU::scheduleInstStop(ThreadID tid
, Counter insts
, const char *cause
)
729 const Tick
now(getCurrentInstCount(tid
));
730 Event
*event(new LocalSimLoopExitEvent(cause
, 0));
732 threadContexts
[tid
]->scheduleInstCountEvent(event
, now
+ insts
);
736 BaseCPU::getCurrentInstCount(ThreadID tid
)
738 return threadContexts
[tid
]->getCurrentInstCount();
741 AddressMonitor::AddressMonitor() {
747 bool AddressMonitor::doMonitor(PacketPtr pkt
) {
748 assert(pkt
->req
->hasPaddr());
749 if (armed
&& waiting
) {
750 if (pAddr
== pkt
->getAddr()) {
751 DPRINTF(Mwait
,"pAddr=0x%lx invalidated: waking up core\n",
762 BaseCPU::traceFunctionsInternal(Addr pc
)
764 if (!debugSymbolTable
)
767 // if pc enters different function, print new function symbol and
768 // update saved range. Otherwise do nothing.
769 if (pc
< currentFunctionStart
|| pc
>= currentFunctionEnd
) {
771 bool found
= debugSymbolTable
->findNearestSymbol(pc
, sym_str
,
772 currentFunctionStart
,
776 // no symbol found: use addr as label
777 sym_str
= csprintf("0x%x", pc
);
778 currentFunctionStart
= pc
;
779 currentFunctionEnd
= pc
+ 1;
782 ccprintf(*functionTraceStream
, " (%d)\n%d: %s",
783 curTick() - functionEntryTick
, curTick(), sym_str
);
784 functionEntryTick
= curTick();
789 BaseCPU::waitForRemoteGDB() const
791 return params()->wait_for_remote_gdb
;