2 * Copyright (c) 2011 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Steve Reinhardt
50 #include "arch/tlb.hh"
51 #include "base/loader/symtab.hh"
52 #include "base/cprintf.hh"
53 #include "base/misc.hh"
54 #include "base/output.hh"
55 #include "base/trace.hh"
56 #include "config/use_checker.hh"
57 #include "cpu/base.hh"
58 #include "cpu/cpuevent.hh"
59 #include "cpu/profile.hh"
60 #include "cpu/thread_context.hh"
61 #include "debug/SyscallVerbose.hh"
62 #include "params/BaseCPU.hh"
63 #include "sim/full_system.hh"
64 #include "sim/process.hh"
65 #include "sim/sim_events.hh"
66 #include "sim/sim_exit.hh"
67 #include "sim/system.hh"
70 #include "cpu/checker/cpu.hh"
74 #include "sim/stat_control.hh"
78 vector
<BaseCPU
*> BaseCPU::cpuList
;
80 // This variable reflects the max number of threads in any CPU. Be
81 // careful to only use it once all the CPUs that you care about have
83 int maxThreadsPerCPU
= 1;
85 CPUProgressEvent::CPUProgressEvent(BaseCPU
*_cpu
, Tick ival
)
86 : Event(Event::Progress_Event_Pri
), _interval(ival
), lastNumInst(0),
87 cpu(_cpu
), _repeatEvent(true)
90 cpu
->schedule(this, curTick() + _interval
);
94 CPUProgressEvent::process()
96 Counter temp
= cpu
->totalInstructions();
98 double ipc
= double(temp
- lastNumInst
) / (_interval
/ cpu
->ticks(1));
100 DPRINTFN("%s progress event, total committed:%i, progress insts committed: "
101 "%lli, IPC: %0.8d\n", cpu
->name(), temp
, temp
- lastNumInst
,
105 cprintf("%lli: %s progress event, total committed:%i, progress insts "
106 "committed: %lli\n", curTick(), cpu
->name(), temp
,
112 cpu
->schedule(this, curTick() + _interval
);
116 CPUProgressEvent::description() const
118 return "CPU Progress";
121 BaseCPU::BaseCPU(Params
*p
)
122 : MemObject(p
), clock(p
->clock
), instCnt(0), _cpuId(p
->cpu_id
),
123 interrupts(p
->interrupts
),
124 numThreads(p
->numThreads
), system(p
->system
),
127 // currentTick = curTick();
129 // if Python did not provide a valid ID, do it here
131 _cpuId
= cpuList
.size();
134 // add self to global list of CPUs
135 cpuList
.push_back(this);
137 DPRINTF(SyscallVerbose
, "Constructing CPU with id %d\n", _cpuId
);
139 if (numThreads
> maxThreadsPerCPU
)
140 maxThreadsPerCPU
= numThreads
;
142 // allocate per-thread instruction-based event queues
143 comInstEventQueue
= new EventQueue
*[numThreads
];
144 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
145 comInstEventQueue
[tid
] =
146 new EventQueue("instruction-based event queue");
149 // set up instruction-count-based termination events, if any
151 if (p
->max_insts_any_thread
!= 0) {
152 const char *cause
= "a thread reached the max instruction count";
153 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
) {
154 Event
*event
= new SimLoopExitEvent(cause
, 0);
155 comInstEventQueue
[tid
]->schedule(event
, p
->max_insts_any_thread
);
159 if (p
->max_insts_all_threads
!= 0) {
160 const char *cause
= "all threads reached the max instruction count";
162 // allocate & initialize shared downcounter: each event will
163 // decrement this when triggered; simulation will terminate
164 // when counter reaches 0
165 int *counter
= new int;
166 *counter
= numThreads
;
167 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
) {
168 Event
*event
= new CountedExitEvent(cause
, *counter
);
169 comInstEventQueue
[tid
]->schedule(event
, p
->max_insts_all_threads
);
173 // allocate per-thread load-based event queues
174 comLoadEventQueue
= new EventQueue
*[numThreads
];
175 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
)
176 comLoadEventQueue
[tid
] = new EventQueue("load-based event queue");
179 // set up instruction-count-based termination events, if any
181 if (p
->max_loads_any_thread
!= 0) {
182 const char *cause
= "a thread reached the max load count";
183 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
) {
184 Event
*event
= new SimLoopExitEvent(cause
, 0);
185 comLoadEventQueue
[tid
]->schedule(event
, p
->max_loads_any_thread
);
189 if (p
->max_loads_all_threads
!= 0) {
190 const char *cause
= "all threads reached the max load count";
191 // allocate & initialize shared downcounter: each event will
192 // decrement this when triggered; simulation will terminate
193 // when counter reaches 0
194 int *counter
= new int;
195 *counter
= numThreads
;
196 for (ThreadID tid
= 0; tid
< numThreads
; ++tid
) {
197 Event
*event
= new CountedExitEvent(cause
, *counter
);
198 comLoadEventQueue
[tid
]->schedule(event
, p
->max_loads_all_threads
);
202 functionTracingEnabled
= false;
203 if (p
->function_trace
) {
204 const string fname
= csprintf("ftrace.%s", name());
205 functionTraceStream
= simout
.find(fname
);
206 if (!functionTraceStream
)
207 functionTraceStream
= simout
.create(fname
);
209 currentFunctionStart
= currentFunctionEnd
= 0;
210 functionEntryTick
= p
->function_trace_start
;
212 if (p
->function_trace_start
== 0) {
213 functionTracingEnabled
= true;
215 typedef EventWrapper
<BaseCPU
, &BaseCPU::enableFunctionTrace
> wrap
;
216 Event
*event
= new wrap(this, true);
217 schedule(event
, p
->function_trace_start
);
220 // Check if CPU model has interrupts connected. The CheckerCPU
221 // cannot take interrupts directly for example.
223 interrupts
->setCPU(this);
227 if (params()->profile
)
228 profileEvent
= new ProfileEvent(this, params()->profile
);
230 tracer
= params()->tracer
;
234 BaseCPU::enableFunctionTrace()
236 functionTracingEnabled
= true;
246 if (!params()->defer_registration
)
247 registerThreadContexts();
254 if (!params()->defer_registration
&& profileEvent
)
255 schedule(profileEvent
, curTick());
258 if (params()->progress_interval
) {
259 Tick num_ticks
= ticks(params()->progress_interval
);
261 new CPUProgressEvent(this, num_ticks
);
269 using namespace Stats
;
272 .name(name() + ".numCycles")
273 .desc("number of cpu cycles simulated")
277 .name(name() + ".numWorkItemsStarted")
278 .desc("number of work items this cpu started")
281 numWorkItemsCompleted
282 .name(name() + ".numWorkItemsCompleted")
283 .desc("number of work items this cpu completed")
286 int size
= threadContexts
.size();
288 for (int i
= 0; i
< size
; ++i
) {
289 stringstream namestr
;
290 ccprintf(namestr
, "%s.ctx%d", name(), i
);
291 threadContexts
[i
]->regStats(namestr
.str());
293 } else if (size
== 1)
294 threadContexts
[0]->regStats(name());
300 Tick next_tick
= curTick() - phase
+ clock
- 1;
301 next_tick
-= (next_tick
% clock
);
307 BaseCPU::nextCycle(Tick begin_tick
)
309 Tick next_tick
= begin_tick
;
310 if (next_tick
% clock
!= 0)
311 next_tick
= next_tick
- (next_tick
% clock
) + clock
;
314 assert(next_tick
>= curTick());
319 BaseCPU::registerThreadContexts()
321 ThreadID size
= threadContexts
.size();
322 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
323 ThreadContext
*tc
= threadContexts
[tid
];
325 /** This is so that contextId and cpuId match where there is a
326 * 1cpu:1context relationship. Otherwise, the order of registration
327 * could affect the assignment and cpu 1 could have context id 3, for
328 * example. We may even want to do something like this for SMT so that
329 * cpu 0 has the lowest thread contexts and cpu N has the highest, but
330 * I'll just do this for now
333 tc
->setContextId(system
->registerThreadContext(tc
, _cpuId
));
335 tc
->setContextId(system
->registerThreadContext(tc
));
338 tc
->getProcessPtr()->assignThreadContext(tc
->contextId());
344 BaseCPU::findContext(ThreadContext
*tc
)
346 ThreadID size
= threadContexts
.size();
347 for (ThreadID tid
= 0; tid
< size
; ++tid
) {
348 if (tc
== threadContexts
[tid
])
357 if (profileEvent
&& profileEvent
->scheduled())
358 deschedule(profileEvent
);
362 BaseCPU::takeOverFrom(BaseCPU
*oldCPU
)
364 Port
*ic
= getPort("icache_port");
365 Port
*dc
= getPort("dcache_port");
366 assert(threadContexts
.size() == oldCPU
->threadContexts
.size());
368 _cpuId
= oldCPU
->cpuId();
370 ThreadID size
= threadContexts
.size();
371 for (ThreadID i
= 0; i
< size
; ++i
) {
372 ThreadContext
*newTC
= threadContexts
[i
];
373 ThreadContext
*oldTC
= oldCPU
->threadContexts
[i
];
375 newTC
->takeOverFrom(oldTC
);
377 CpuEvent::replaceThreadContext(oldTC
, newTC
);
379 assert(newTC
->contextId() == oldTC
->contextId());
380 assert(newTC
->threadId() == oldTC
->threadId());
381 system
->replaceThreadContext(newTC
, newTC
->contextId());
383 /* This code no longer works since the zero register (e.g.,
384 * r31 on Alpha) doesn't necessarily contain zero at this
387 ThreadContext::compare(oldTC, newTC);
390 Port
*old_itb_port
, *old_dtb_port
, *new_itb_port
, *new_dtb_port
;
391 old_itb_port
= oldTC
->getITBPtr()->getPort();
392 old_dtb_port
= oldTC
->getDTBPtr()->getPort();
393 new_itb_port
= newTC
->getITBPtr()->getPort();
394 new_dtb_port
= newTC
->getDTBPtr()->getPort();
396 // Move over any table walker ports if they exist
397 if (new_itb_port
&& !new_itb_port
->isConnected()) {
398 assert(old_itb_port
);
399 Port
*peer
= old_itb_port
->getPeer();;
400 new_itb_port
->setPeer(peer
);
401 peer
->setPeer(new_itb_port
);
403 if (new_dtb_port
&& !new_dtb_port
->isConnected()) {
404 assert(old_dtb_port
);
405 Port
*peer
= old_dtb_port
->getPeer();;
406 new_dtb_port
->setPeer(peer
);
407 peer
->setPeer(new_dtb_port
);
411 Port
*old_checker_itb_port
, *old_checker_dtb_port
;
412 Port
*new_checker_itb_port
, *new_checker_dtb_port
;
414 CheckerCPU
*oldChecker
=
415 dynamic_cast<CheckerCPU
*>(oldTC
->getCheckerCpuPtr());
416 CheckerCPU
*newChecker
=
417 dynamic_cast<CheckerCPU
*>(newTC
->getCheckerCpuPtr());
418 old_checker_itb_port
= oldChecker
->getITBPtr()->getPort();
419 old_checker_dtb_port
= oldChecker
->getDTBPtr()->getPort();
420 new_checker_itb_port
= newChecker
->getITBPtr()->getPort();
421 new_checker_dtb_port
= newChecker
->getDTBPtr()->getPort();
423 // Move over any table walker ports if they exist for checker
424 if (new_checker_itb_port
&& !new_checker_itb_port
->isConnected()) {
425 assert(old_checker_itb_port
);
426 Port
*peer
= old_checker_itb_port
->getPeer();;
427 new_checker_itb_port
->setPeer(peer
);
428 peer
->setPeer(new_checker_itb_port
);
430 if (new_checker_dtb_port
&& !new_checker_dtb_port
->isConnected()) {
431 assert(old_checker_dtb_port
);
432 Port
*peer
= old_checker_dtb_port
->getPeer();;
433 new_checker_dtb_port
->setPeer(peer
);
434 peer
->setPeer(new_checker_dtb_port
);
440 interrupts
= oldCPU
->interrupts
;
441 interrupts
->setCPU(this);
444 for (ThreadID i
= 0; i
< size
; ++i
)
445 threadContexts
[i
]->profileClear();
448 schedule(profileEvent
, curTick());
451 // Connect new CPU to old CPU's memory only if new CPU isn't
452 // connected to anything. Also connect old CPU's memory to new
454 if (!ic
->isConnected()) {
455 Port
*peer
= oldCPU
->getPort("icache_port")->getPeer();
460 if (!dc
->isConnected()) {
461 Port
*peer
= oldCPU
->getPort("dcache_port")->getPeer();
468 BaseCPU::ProfileEvent::ProfileEvent(BaseCPU
*_cpu
, Tick _interval
)
469 : cpu(_cpu
), interval(_interval
)
473 BaseCPU::ProfileEvent::process()
475 ThreadID size
= cpu
->threadContexts
.size();
476 for (ThreadID i
= 0; i
< size
; ++i
) {
477 ThreadContext
*tc
= cpu
->threadContexts
[i
];
481 cpu
->schedule(this, curTick() + interval
);
485 BaseCPU::serialize(std::ostream
&os
)
487 SERIALIZE_SCALAR(instCnt
);
488 interrupts
->serialize(os
);
492 BaseCPU::unserialize(Checkpoint
*cp
, const std::string
§ion
)
494 UNSERIALIZE_SCALAR(instCnt
);
495 interrupts
->unserialize(cp
, section
);
499 BaseCPU::traceFunctionsInternal(Addr pc
)
501 if (!debugSymbolTable
)
504 // if pc enters different function, print new function symbol and
505 // update saved range. Otherwise do nothing.
506 if (pc
< currentFunctionStart
|| pc
>= currentFunctionEnd
) {
508 bool found
= debugSymbolTable
->findNearestSymbol(pc
, sym_str
,
509 currentFunctionStart
,
513 // no symbol found: use addr as label
514 sym_str
= csprintf("0x%x", pc
);
515 currentFunctionStart
= pc
;
516 currentFunctionEnd
= pc
+ 1;
519 ccprintf(*functionTraceStream
, " (%d)\n%d: %s",
520 curTick() - functionEntryTick
, curTick(), sym_str
);
521 functionEntryTick
= curTick();
526 BaseCPU::CpuPort::recvTiming(PacketPtr pkt
)
528 panic("BaseCPU doesn't expect recvTiming callback!");
533 BaseCPU::CpuPort::recvRetry()
535 panic("BaseCPU doesn't expect recvRetry callback!");
539 BaseCPU::CpuPort::recvAtomic(PacketPtr pkt
)
541 panic("BaseCPU doesn't expect recvAtomic callback!");
546 BaseCPU::CpuPort::recvFunctional(PacketPtr pkt
)
548 // No internal storage to update (in the general case). In the
549 // long term this should never be called, but that assumed a split
550 // into master/slave and request/response.
554 BaseCPU::CpuPort::recvRangeChange()