2 * Copyright (c) 2011-2012,2016 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * Copyright (c) 2013 Advanced Micro Devices, Inc.
17 * Copyright (c) 2013 Mark D. Hill and David A. Wood
18 * All rights reserved.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions are
22 * met: redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer;
24 * redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution;
27 * neither the name of the copyright holders nor the names of its
28 * contributors may be used to endorse or promote products derived from
29 * this software without specific prior written permission.
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
36 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
37 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
38 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
39 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
40 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
41 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * Authors: Steve Reinhardt
48 #include "cpu/base.hh"
54 #include "arch/tlb.hh"
55 #include "base/cprintf.hh"
56 #include "base/loader/symtab.hh"
57 #include "base/misc.hh"
58 #include "base/output.hh"
59 #include "base/trace.hh"
60 #include "cpu/checker/cpu.hh"
61 #include "cpu/cpuevent.hh"
62 #include "cpu/profile.hh"
63 #include "cpu/thread_context.hh"
64 #include "debug/Mwait.hh"
65 #include "debug/SyscallVerbose.hh"
66 #include "mem/page_table.hh"
67 #include "params/BaseCPU.hh"
68 #include "sim/clocked_object.hh"
69 #include "sim/full_system.hh"
70 #include "sim/process.hh"
71 #include "sim/sim_events.hh"
72 #include "sim/sim_exit.hh"
73 #include "sim/system.hh"
76 #include "sim/stat_control.hh"
80 vector
<BaseCPU
*> BaseCPU::cpuList
;
82 // This variable reflects the max number of threads in any CPU. Be
83 // careful to only use it once all the CPUs that you care about have
85 int maxThreadsPerCPU
= 1;
87 CPUProgressEvent::CPUProgressEvent(BaseCPU
*_cpu
, Tick ival
)
88 : Event(Event::Progress_Event_Pri
), _interval(ival
), lastNumInst(0),
89 cpu(_cpu
), _repeatEvent(true)
92 cpu
->schedule(this, curTick() + _interval
);
96 CPUProgressEvent::process()
98 Counter temp
= cpu
->totalOps();
101 cpu
->schedule(this, curTick() + _interval
);
103 if (cpu
->switchedOut()) {
108 double ipc
= double(temp
- lastNumInst
) / (_interval
/ cpu
->clockPeriod());
110 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
111 "%lli, IPC: %0.8d\n", cpu
->name(), temp
, temp
- lastNumInst
,
115 cprintf("%lli: %s progress event, total committed:%i, progress insts "
116 "committed: %lli\n", curTick(), cpu
->name(), temp
,
123 CPUProgressEvent::description() const
125 return "CPU Progress";
128 BaseCPU::BaseCPU(Params
*p
, bool is_checker
)
129 : MemObject(p
), instCnt(0), _cpuId(p
->cpu_id
), _socketId(p
->socket_id
),
130 _instMasterId(p
->system
->getMasterId(name() + ".inst")),
131 _dataMasterId(p
->system
->getMasterId(name() + ".data")),
132 _taskId(ContextSwitchTaskId::Unknown
), _pid(invldPid
),
133 _switchedOut(p
->switched_out
), _cacheLineSize(p
->system
->cacheLineSize()),
134 interrupts(p
->interrupts
), profileEvent(NULL
),
135 numThreads(p
->numThreads
), system(p
->system
),
136 functionTraceStream(nullptr), currentFunctionStart(0),
137 currentFunctionEnd(0), functionEntryTick(0),
138 addressMonitor(p
->numThreads
),
139 syscallRetryLatency(p
->syscallRetryLatency
)
141 // if Python did not provide a valid ID, do it here
143 _cpuId
= cpuList
.size();
146 // add self to global list of CPUs
147 cpuList
.push_back(this);
149 DPRINTF(SyscallVerbose
, "Constructing CPU with id %d, socket id %d\n",
152 if (numThreads
> maxThreadsPerCPU
)
153 maxThreadsPerCPU
= numThreads
;
155 // allocate per-thread instruction-based event queues
156 comInstEventQueue
= new EventQueue
*[numThreads
];
157 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
158 comInstEventQueue
[tid
] =
159 new EventQueue("instruction-based event queue");
162 // set up instruction-count-based termination events, if any
164 if (p
->max_insts_any_thread
!= 0) {
165 const char *cause
= "a thread reached the max instruction count";
166 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
167 scheduleInstStop(tid
, p
->max_insts_any_thread
, cause
);
170 // Set up instruction-count-based termination events for SimPoints
171 // Typically, there are more than one action points.
172 // Simulation.py is responsible to take the necessary actions upon
173 // exitting the simulation loop.
174 if (!p
->simpoint_start_insts
.empty()) {
175 const char *cause
= "simpoint starting point found";
176 for (size_t i
= 0; i
< p
->simpoint_start_insts
.size(); ++i
)
177 scheduleInstStop(0, p
->simpoint_start_insts
[i
], cause
);
180 if (p
->max_insts_all_threads
!= 0) {
181 const char *cause
= "all threads reached the max instruction count";
183 // allocate & initialize shared downcounter: each event will
184 // decrement this when triggered; simulation will terminate
185 // when counter reaches 0
186 int *counter
= new int;
187 *counter
= numThreads
;
188 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
) {
189 Event
*event
= new CountedExitEvent(cause
, *counter
);
190 comInstEventQueue
[tid
]->schedule(event
, p
->max_insts_all_threads
);
194 // allocate per-thread load-based event queues
195 comLoadEventQueue
= new EventQueue
*[numThreads
];
196 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
197 comLoadEventQueue
[tid
] = new EventQueue("load-based event queue");
200 // set up instruction-count-based termination events, if any
202 if (p
->max_loads_any_thread
!= 0) {
203 const char *cause
= "a thread reached the max load count";
204 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
205 scheduleLoadStop(tid
, p
->max_loads_any_thread
, cause
);
208 if (p
->max_loads_all_threads
!= 0) {
209 const char *cause
= "all threads reached the max load count";
210 // allocate & initialize shared downcounter: each event will
211 // decrement this when triggered; simulation will terminate
212 // when counter reaches 0
213 int *counter
= new int;
214 *counter
= numThreads
;
215 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
) {
216 Event
*event
= new CountedExitEvent(cause
, *counter
);
217 comLoadEventQueue
[tid
]->schedule(event
, p
->max_loads_all_threads
);
221 functionTracingEnabled
= false;
222 if (p
->function_trace
) {
223 const string fname
= csprintf("ftrace.%s", name());
224 functionTraceStream
= simout
.findOrCreate(fname
)->stream();
226 currentFunctionStart
= currentFunctionEnd
= 0;
227 functionEntryTick
= p
->function_trace_start
;
229 if (p
->function_trace_start
== 0) {
230 functionTracingEnabled
= true;
232 Event
*event
= new EventFunctionWrapper(
233 [this]{ enableFunctionTrace(); }, name(), true);
234 schedule(event
, p
->function_trace_start
);
238 // The interrupts should always be present unless this CPU is
239 // switched in later or in case it is a checker CPU
240 if (!params()->switched_out
&& !is_checker
) {
241 fatal_if(interrupts
.size() != numThreads
,
242 "CPU %s has %i interrupt controllers, but is expecting one "
244 name(), interrupts
.size(), numThreads
);
245 for (ThreadID tid
= 0; tid
< numThreads
; tid
++)
246 interrupts
[tid
]->setCPU(this);
250 if (params()->profile
)
251 profileEvent
= new EventFunctionWrapper(
252 [this]{ processProfileEvent(); },
255 tracer
= params()->tracer
;
257 if (params()->isa
.size() != numThreads
) {
258 fatal("Number of ISAs (%i) assigned to the CPU does not equal number "
259 "of threads (%i).\n", params()->isa
.size(), numThreads
);
264 BaseCPU::enableFunctionTrace()
266 functionTracingEnabled
= true;
272 delete[] comLoadEventQueue
;
273 delete[] comInstEventQueue
;
277 BaseCPU::armMonitor(ThreadID tid
, Addr address
)
279 assert(tid
< numThreads
);
280 AddressMonitor
&monitor
= addressMonitor
[tid
];
282 monitor
.armed
= true;
283 monitor
.vAddr
= address
;
285 DPRINTF(Mwait
,"[tid:%d] Armed monitor (vAddr=0x%lx)\n", tid
, address
);
289 BaseCPU::mwait(ThreadID tid
, PacketPtr pkt
)
291 assert(tid
< numThreads
);
292 AddressMonitor
&monitor
= addressMonitor
[tid
];
294 if (!monitor
.gotWakeup
) {
295 int block_size
= cacheLineSize();
296 uint64_t mask
= ~((uint64_t)(block_size
- 1));
298 assert(pkt
->req
->hasPaddr());
299 monitor
.pAddr
= pkt
->getAddr() & mask
;
300 monitor
.waiting
= true;
302 DPRINTF(Mwait
,"[tid:%d] mwait called (vAddr=0x%lx, "
303 "line's paddr=0x%lx)\n", tid
, monitor
.vAddr
, monitor
.pAddr
);
306 monitor
.gotWakeup
= false;
312 BaseCPU::mwaitAtomic(ThreadID tid
, ThreadContext
*tc
, TheISA::TLB
*dtb
)
314 assert(tid
< numThreads
);
315 AddressMonitor
&monitor
= addressMonitor
[tid
];
318 Addr addr
= monitor
.vAddr
;
319 int block_size
= cacheLineSize();
320 uint64_t mask
= ~((uint64_t)(block_size
- 1));
321 int size
= block_size
;
323 //The address of the next line if it crosses a cache line boundary.
324 Addr secondAddr
= roundDown(addr
+ size
- 1, block_size
);
326 if (secondAddr
> addr
)
327 size
= secondAddr
- addr
;
329 req
.setVirt(0, addr
, size
, 0x0, dataMasterId(), tc
->instAddr());
331 // translate to physical address
332 Fault fault
= dtb
->translateAtomic(&req
, tc
, BaseTLB::Read
);
333 assert(fault
== NoFault
);
335 monitor
.pAddr
= req
.getPaddr() & mask
;
336 monitor
.waiting
= true;
338 DPRINTF(Mwait
,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
339 tid
, monitor
.vAddr
, monitor
.pAddr
);
345 if (!params()->switched_out
) {
346 registerThreadContexts();
356 if (!params()->switched_out
&& profileEvent
)
357 schedule(profileEvent
, curTick());
360 if (params()->progress_interval
) {
361 new CPUProgressEvent(this, params()->progress_interval
);
364 // Assumption CPU start to operate instantaneously without any latency
365 if (ClockedObject::pwrState() == Enums::PwrState::UNDEFINED
)
366 ClockedObject::pwrState(Enums::PwrState::ON
);
371 BaseCPU::pmuProbePoint(const char *name
)
373 ProbePoints::PMUUPtr ptr
;
374 ptr
.reset(new ProbePoints::PMU(getProbeManager(), name
));
380 BaseCPU::regProbePoints()
382 ppCycles
= pmuProbePoint("Cycles");
384 ppRetiredInsts
= pmuProbePoint("RetiredInsts");
385 ppRetiredLoads
= pmuProbePoint("RetiredLoads");
386 ppRetiredStores
= pmuProbePoint("RetiredStores");
387 ppRetiredBranches
= pmuProbePoint("RetiredBranches");
391 BaseCPU::probeInstCommit(const StaticInstPtr
&inst
)
393 if (!inst
->isMicroop() || inst
->isLastMicroop())
394 ppRetiredInsts
->notify(1);
398 ppRetiredLoads
->notify(1);
401 ppRetiredStores
->notify(1);
403 if (inst
->isControl())
404 ppRetiredBranches
->notify(1);
410 MemObject::regStats();
412 using namespace Stats
;
415 .name(name() + ".numCycles")
416 .desc("number of cpu cycles simulated")
420 .name(name() + ".numWorkItemsStarted")
421 .desc("number of work items this cpu started")
424 numWorkItemsCompleted
425 .name(name() + ".numWorkItemsCompleted")
426 .desc("number of work items this cpu completed")
429 int size
= threadContexts
.size();
431 for (int i
= 0; i
< size
; ++i
) {
432 stringstream namestr
;
433 ccprintf(namestr
, "%s.ctx%d", name(), i
);
434 threadContexts
[i
]->regStats(namestr
.str());
436 } else if (size
== 1)
437 threadContexts
[0]->regStats(name());
441 BaseCPU::getMasterPort(const string
&if_name
, PortID idx
)
443 // Get the right port based on name. This applies to all the
444 // subclasses of the base CPU and relies on their implementation
445 // of getDataPort and getInstPort. In all cases there methods
446 // return a MasterPort pointer.
447 if (if_name
== "dcache_port")
448 return getDataPort();
449 else if (if_name
== "icache_port")
450 return getInstPort();
452 return MemObject::getMasterPort(if_name
, idx
);
456 BaseCPU::registerThreadContexts()
458 assert(system
->multiThread
|| numThreads
== 1);
460 ThreadID size
= threadContexts
.size();
461 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
462 ThreadContext
*tc
= threadContexts
[tid
];
464 if (system
->multiThread
) {
465 tc
->setContextId(system
->registerThreadContext(tc
));
467 tc
->setContextId(system
->registerThreadContext(tc
, _cpuId
));
471 tc
->getProcessPtr()->assignThreadContext(tc
->contextId());
477 BaseCPU::findContext(ThreadContext
*tc
)
479 ThreadID size
= threadContexts
.size();
480 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
481 if (tc
== threadContexts
[tid
])
488 BaseCPU::activateContext(ThreadID thread_num
)
490 // For any active thread running, update CPU power state to active (ON)
491 ClockedObject::pwrState(Enums::PwrState::ON
);
495 BaseCPU::suspendContext(ThreadID thread_num
)
497 // Check if all threads are suspended
498 for (auto t
: threadContexts
) {
499 if (t
->status() != ThreadContext::Suspended
) {
504 // All CPU threads suspended, enter lower power state for the CPU
505 ClockedObject::pwrState(Enums::PwrState::CLK_GATED
);
511 assert(!_switchedOut
);
513 if (profileEvent
&& profileEvent
->scheduled())
514 deschedule(profileEvent
);
516 // Flush all TLBs in the CPU to avoid having stale translations if
517 // it gets switched in later.
522 BaseCPU::takeOverFrom(BaseCPU
*oldCPU
)
524 assert(threadContexts
.size() == oldCPU
->threadContexts
.size());
525 assert(_cpuId
== oldCPU
->cpuId());
526 assert(_switchedOut
);
527 assert(oldCPU
!= this);
528 _pid
= oldCPU
->getPid();
529 _taskId
= oldCPU
->taskId();
530 _switchedOut
= false;
532 ThreadID size
= threadContexts
.size();
533 for (ThreadID i
= 0; i
< size
; ++i
) {
534 ThreadContext
*newTC
= threadContexts
[i
];
535 ThreadContext
*oldTC
= oldCPU
->threadContexts
[i
];
537 newTC
->takeOverFrom(oldTC
);
539 CpuEvent::replaceThreadContext(oldTC
, newTC
);
541 assert(newTC
->contextId() == oldTC
->contextId());
542 assert(newTC
->threadId() == oldTC
->threadId());
543 system
->replaceThreadContext(newTC
, newTC
->contextId());
545 /* This code no longer works since the zero register (e.g.,
546 * r31 on Alpha) doesn't necessarily contain zero at this
549 ThreadContext::compare(oldTC, newTC);
552 BaseMasterPort
*old_itb_port
= oldTC
->getITBPtr()->getMasterPort();
553 BaseMasterPort
*old_dtb_port
= oldTC
->getDTBPtr()->getMasterPort();
554 BaseMasterPort
*new_itb_port
= newTC
->getITBPtr()->getMasterPort();
555 BaseMasterPort
*new_dtb_port
= newTC
->getDTBPtr()->getMasterPort();
557 // Move over any table walker ports if they exist
559 assert(!new_itb_port
->isConnected());
560 assert(old_itb_port
);
561 assert(old_itb_port
->isConnected());
562 BaseSlavePort
&slavePort
= old_itb_port
->getSlavePort();
563 old_itb_port
->unbind();
564 new_itb_port
->bind(slavePort
);
567 assert(!new_dtb_port
->isConnected());
568 assert(old_dtb_port
);
569 assert(old_dtb_port
->isConnected());
570 BaseSlavePort
&slavePort
= old_dtb_port
->getSlavePort();
571 old_dtb_port
->unbind();
572 new_dtb_port
->bind(slavePort
);
574 newTC
->getITBPtr()->takeOverFrom(oldTC
->getITBPtr());
575 newTC
->getDTBPtr()->takeOverFrom(oldTC
->getDTBPtr());
577 // Checker whether or not we have to transfer CheckerCPU
578 // objects over in the switch
579 CheckerCPU
*oldChecker
= oldTC
->getCheckerCpuPtr();
580 CheckerCPU
*newChecker
= newTC
->getCheckerCpuPtr();
581 if (oldChecker
&& newChecker
) {
582 BaseMasterPort
*old_checker_itb_port
=
583 oldChecker
->getITBPtr()->getMasterPort();
584 BaseMasterPort
*old_checker_dtb_port
=
585 oldChecker
->getDTBPtr()->getMasterPort();
586 BaseMasterPort
*new_checker_itb_port
=
587 newChecker
->getITBPtr()->getMasterPort();
588 BaseMasterPort
*new_checker_dtb_port
=
589 newChecker
->getDTBPtr()->getMasterPort();
591 newChecker
->getITBPtr()->takeOverFrom(oldChecker
->getITBPtr());
592 newChecker
->getDTBPtr()->takeOverFrom(oldChecker
->getDTBPtr());
594 // Move over any table walker ports if they exist for checker
595 if (new_checker_itb_port
) {
596 assert(!new_checker_itb_port
->isConnected());
597 assert(old_checker_itb_port
);
598 assert(old_checker_itb_port
->isConnected());
599 BaseSlavePort
&slavePort
=
600 old_checker_itb_port
->getSlavePort();
601 old_checker_itb_port
->unbind();
602 new_checker_itb_port
->bind(slavePort
);
604 if (new_checker_dtb_port
) {
605 assert(!new_checker_dtb_port
->isConnected());
606 assert(old_checker_dtb_port
);
607 assert(old_checker_dtb_port
->isConnected());
608 BaseSlavePort
&slavePort
=
609 old_checker_dtb_port
->getSlavePort();
610 old_checker_dtb_port
->unbind();
611 new_checker_dtb_port
->bind(slavePort
);
616 interrupts
= oldCPU
->interrupts
;
617 for (ThreadID tid
= 0; tid
< numThreads
; tid
++) {
618 interrupts
[tid
]->setCPU(this);
620 oldCPU
->interrupts
.clear();
623 for (ThreadID i
= 0; i
< size
; ++i
)
624 threadContexts
[i
]->profileClear();
627 schedule(profileEvent
, curTick());
630 // All CPUs have an instruction and a data port, and the new CPU's
631 // ports are dangling while the old CPU has its ports connected
632 // already. Unbind the old CPU and then bind the ports of the one
633 // we are switching to.
634 assert(!getInstPort().isConnected());
635 assert(oldCPU
->getInstPort().isConnected());
636 BaseSlavePort
&inst_peer_port
= oldCPU
->getInstPort().getSlavePort();
637 oldCPU
->getInstPort().unbind();
638 getInstPort().bind(inst_peer_port
);
640 assert(!getDataPort().isConnected());
641 assert(oldCPU
->getDataPort().isConnected());
642 BaseSlavePort
&data_peer_port
= oldCPU
->getDataPort().getSlavePort();
643 oldCPU
->getDataPort().unbind();
644 getDataPort().bind(data_peer_port
);
650 for (ThreadID i
= 0; i
< threadContexts
.size(); ++i
) {
651 ThreadContext
&tc(*threadContexts
[i
]);
652 CheckerCPU
*checker(tc
.getCheckerCpuPtr());
654 tc
.getITBPtr()->flushAll();
655 tc
.getDTBPtr()->flushAll();
657 checker
->getITBPtr()->flushAll();
658 checker
->getDTBPtr()->flushAll();
664 BaseCPU::processProfileEvent()
666 ThreadID size
= threadContexts
.size();
668 for (ThreadID i
= 0; i
< size
; ++i
)
669 threadContexts
[i
]->profileSample();
671 schedule(profileEvent
, curTick() + params()->profile
);
675 BaseCPU::serialize(CheckpointOut
&cp
) const
677 SERIALIZE_SCALAR(instCnt
);
680 /* Unlike _pid, _taskId is not serialized, as they are dynamically
681 * assigned unique ids that are only meaningful for the duration of
682 * a specific run. We will need to serialize the entire taskMap in
684 SERIALIZE_SCALAR(_pid
);
686 // Serialize the threads, this is done by the CPU implementation.
687 for (ThreadID i
= 0; i
< numThreads
; ++i
) {
688 ScopedCheckpointSection
sec(cp
, csprintf("xc.%i", i
));
689 interrupts
[i
]->serialize(cp
);
690 serializeThread(cp
, i
);
696 BaseCPU::unserialize(CheckpointIn
&cp
)
698 UNSERIALIZE_SCALAR(instCnt
);
701 UNSERIALIZE_SCALAR(_pid
);
703 // Unserialize the threads, this is done by the CPU implementation.
704 for (ThreadID i
= 0; i
< numThreads
; ++i
) {
705 ScopedCheckpointSection
sec(cp
, csprintf("xc.%i", i
));
706 interrupts
[i
]->unserialize(cp
);
707 unserializeThread(cp
, i
);
713 BaseCPU::scheduleInstStop(ThreadID tid
, Counter insts
, const char *cause
)
715 const Tick
now(comInstEventQueue
[tid
]->getCurTick());
716 Event
*event(new LocalSimLoopExitEvent(cause
, 0));
718 comInstEventQueue
[tid
]->schedule(event
, now
+ insts
);
722 BaseCPU::getCurrentInstCount(ThreadID tid
)
724 return Tick(comInstEventQueue
[tid
]->getCurTick());
727 AddressMonitor::AddressMonitor() {
733 bool AddressMonitor::doMonitor(PacketPtr pkt
) {
734 assert(pkt
->req
->hasPaddr());
735 if (armed
&& waiting
) {
736 if (pAddr
== pkt
->getAddr()) {
737 DPRINTF(Mwait
,"pAddr=0x%lx invalidated: waking up core\n",
747 BaseCPU::scheduleLoadStop(ThreadID tid
, Counter loads
, const char *cause
)
749 const Tick
now(comLoadEventQueue
[tid
]->getCurTick());
750 Event
*event(new LocalSimLoopExitEvent(cause
, 0));
752 comLoadEventQueue
[tid
]->schedule(event
, now
+ loads
);
757 BaseCPU::traceFunctionsInternal(Addr pc
)
759 if (!debugSymbolTable
)
762 // if pc enters different function, print new function symbol and
763 // update saved range. Otherwise do nothing.
764 if (pc
< currentFunctionStart
|| pc
>= currentFunctionEnd
) {
766 bool found
= debugSymbolTable
->findNearestSymbol(pc
, sym_str
,
767 currentFunctionStart
,
771 // no symbol found: use addr as label
772 sym_str
= csprintf("0x%x", pc
);
773 currentFunctionStart
= pc
;
774 currentFunctionEnd
= pc
+ 1;
777 ccprintf(*functionTraceStream
, " (%d)\n%d: %s",
778 curTick() - functionEntryTick
, curTick(), sym_str
);
779 functionEntryTick
= curTick();
784 BaseCPU::waitForRemoteGDB() const
786 return params()->wait_for_remote_gdb
;