2 * Copyright (c) 2011-2013 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Steve Reinhardt
46 #ifndef __CPU_BASE_HH__
47 #define __CPU_BASE_HH__
51 // Before we do anything else, check if this build is the NULL ISA,
52 // and if so stop here
53 #include "config/the_isa.hh"
54 #if THE_ISA == NULL_ISA
55 #include "arch/null/cpu_dummy.hh"
57 #include "arch/interrupts.hh"
58 #include "arch/isa_traits.hh"
59 #include "arch/microcode_rom.hh"
60 #include "base/statistics.hh"
61 #include "mem/mem_object.hh"
62 #include "sim/eventq.hh"
63 #include "sim/full_system.hh"
64 #include "sim/insttracer.hh"
65 #include "sim/probe/pmu.hh"
66 #include "sim/system.hh"
67 #include "debug/Mwait.hh"
77 bool doMonitor(PacketPtr pkt);
83 bool waiting; // 0=normal, 1=mwaiting
87 class CPUProgressEvent : public Event
96 CPUProgressEvent(BaseCPU *_cpu, Tick ival = 0);
100 void interval(Tick ival) { _interval = ival; }
101 Tick interval() { return _interval; }
103 void repeatEvent(bool repeat) { _repeatEvent = repeat; }
105 virtual const char *description() const;
108 class BaseCPU : public MemObject
112 /// Instruction count used for SPARC misc register
113 /// @todo unify this with the counters that cpus individually keep
116 // every cpu has an id, put it in the base cpu
117 // Set at initialization, only time a cpuId might change is during a
118 // takeover (which should be done from within the BaseCPU anyway,
119 // therefore no setCpuId() method is provided
122 /** Each cpu will have a socket ID that corresponds to its physical location
123 * in the system. This is usually used to bucket cpu cores under single DVFS
124 * domain. This information may also be required by the OS to identify the
125 * cpu core grouping (as in the case of ARM via MPIDR register)
127 const uint32_t _socketId;
129 /** instruction side request id that must be placed in all requests */
130 MasterID _instMasterId;
132 /** data side request id that must be placed in all requests */
133 MasterID _dataMasterId;
135 /** An intrenal representation of a task identifier within gem5. This is
136 * used so the CPU can add which taskId (which is an internal representation
137 * of the OS process ID) to each request so components in the memory system
138 * can track which process IDs are ultimately interacting with them
142 /** The current OS process ID that is executing on this processor. This is
143 * used to generate a taskId */
146 /** Is the CPU switched out or active? */
149 /** Cache the cache line size that we get from the system */
150 const unsigned int _cacheLineSize;
155 * Purely virtual method that returns a reference to the data
156 * port. All subclasses must implement this method.
158 * @return a reference to the data port
160 virtual MasterPort &getDataPort() = 0;
163 * Purely virtual method that returns a reference to the instruction
164 * port. All subclasses must implement this method.
166 * @return a reference to the instruction port
168 virtual MasterPort &getInstPort() = 0;
170 /** Reads this CPU's ID. */
171 int cpuId() const { return _cpuId; }
173 /** Reads this CPU's Socket ID. */
174 uint32_t socketId() const { return _socketId; }
176 /** Reads this CPU's unique data requestor ID */
177 MasterID dataMasterId() { return _dataMasterId; }
178 /** Reads this CPU's unique instruction requestor ID */
179 MasterID instMasterId() { return _instMasterId; }
182 * Get a master port on this CPU. All CPUs have a data and
183 * instruction port, and this method uses getDataPort and
184 * getInstPort of the subclasses to resolve the two ports.
186 * @param if_name the port name
187 * @param idx ignored index
189 * @return a reference to the port with the given name
191 BaseMasterPort &getMasterPort(const std::string &if_name,
192 PortID idx = InvalidPortID);
194 /** Get cpu task id */
195 uint32_t taskId() const { return _taskId; }
196 /** Set cpu task id */
197 void taskId(uint32_t id) { _taskId = id; }
199 uint32_t getPid() const { return _pid; }
200 void setPid(uint32_t pid) { _pid = pid; }
202 inline void workItemBegin() { numWorkItemsStarted++; }
203 inline void workItemEnd() { numWorkItemsCompleted++; }
204 // @todo remove me after debugging with legion done
205 Tick instCount() { return instCnt; }
207 TheISA::MicrocodeRom microcodeRom;
210 TheISA::Interrupts *interrupts;
214 getInterruptController()
219 virtual void wakeup() = 0;
222 postInterrupt(int int_num, int index)
224 interrupts->post(int_num, index);
230 clearInterrupt(int int_num, int index)
232 interrupts->clear(int_num, index);
238 interrupts->clearAll();
242 checkInterrupts(ThreadContext *tc) const
244 return FullSystem && interrupts->checkInterrupts(tc);
247 class ProfileEvent : public Event
254 ProfileEvent(BaseCPU *cpu, Tick interval);
257 ProfileEvent *profileEvent;
260 std::vector<ThreadContext *> threadContexts;
262 Trace::InstTracer * tracer;
267 /** Invalid or unknown Pid. Possible when operating system is not present
268 * or has not assigned a pid yet */
269 static const uint32_t invldPid = std::numeric_limits<uint32_t>::max();
271 // Mask to align PCs to MachInst sized boundaries
272 static const Addr PCMask = ~((Addr)sizeof(TheISA::MachInst) - 1);
274 /// Provide access to the tracer pointer
275 Trace::InstTracer * getTracer() { return tracer; }
277 /// Notify the CPU that the indicated context is now active.
278 virtual void activateContext(ThreadID thread_num) {}
280 /// Notify the CPU that the indicated context is now suspended.
281 virtual void suspendContext(ThreadID thread_num) {}
283 /// Notify the CPU that the indicated context is now halted.
284 virtual void haltContext(ThreadID thread_num) {}
286 /// Given a Thread Context pointer return the thread num
287 int findContext(ThreadContext *tc);
289 /// Given a thread num get tho thread context for it
290 virtual ThreadContext *getContext(int tn) { return threadContexts[tn]; }
292 /// Get the number of thread contexts available
293 unsigned numContexts() { return threadContexts.size(); }
296 typedef BaseCPUParams Params;
297 const Params *params() const
298 { return reinterpret_cast<const Params *>(_params); }
299 BaseCPU(Params *params, bool is_checker = false);
303 virtual void startup();
304 virtual void regStats();
306 void regProbePoints() M5_ATTR_OVERRIDE;
308 void registerThreadContexts();
311 * Prepare for another CPU to take over execution.
313 * When this method exits, all internal state should have been
314 * flushed. After the method returns, the simulator calls
315 * takeOverFrom() on the new CPU with this CPU as its parameter.
317 virtual void switchOut();
320 * Load the state of a CPU from the previous CPU object, invoked
321 * on all new CPUs that are about to be switched in.
323 * A CPU model implementing this method is expected to initialize
324 * its state from the old CPU and connect its memory (unless they
325 * are already connected) to the memories connected to the old
328 * @param cpu CPU to initialize read state from.
330 virtual void takeOverFrom(BaseCPU *cpu);
333 * Flush all TLBs in the CPU.
335 * This method is mainly used to flush stale translations when
336 * switching CPUs. It is also exported to the Python world to
337 * allow it to request a TLB flush after draining the CPU to make
338 * it easier to compare traces when debugging
339 * handover/checkpointing.
344 * Determine if the CPU is switched out.
346 * @return True if the CPU is switched out, false otherwise.
348 bool switchedOut() const { return _switchedOut; }
351 * Verify that the system is in a memory mode supported by the
354 * Implementations are expected to query the system for the
355 * current memory mode and ensure that it is what the CPU model
356 * expects. If the check fails, the implementation should
357 * terminate the simulation using fatal().
359 virtual void verifyMemoryMode() const { };
362 * Number of threads we're actually simulating (<= SMT_MAX_THREADS).
363 * This is a constant for the duration of the simulation.
368 * Vector of per-thread instruction-based event queues. Used for
369 * scheduling events based on number of instructions committed by
370 * a particular thread.
372 EventQueue **comInstEventQueue;
375 * Vector of per-thread load-based event queues. Used for
376 * scheduling events based on number of loads committed by
377 *a particular thread.
379 EventQueue **comLoadEventQueue;
384 * Get the cache line size of the system.
386 inline unsigned int cacheLineSize() const { return _cacheLineSize; }
389 * Serialize this object to the given output stream.
391 * @note CPU models should normally overload the serializeThread()
392 * method instead of the serialize() method as this provides a
393 * uniform data format for all CPU models and promotes better code
396 * @param os The stream to serialize to.
398 void serialize(CheckpointOut &cp) const M5_ATTR_OVERRIDE;
401 * Reconstruct the state of this object from a checkpoint.
403 * @note CPU models should normally overload the
404 * unserializeThread() method instead of the unserialize() method
405 * as this provides a uniform data format for all CPU models and
406 * promotes better code reuse.
408 * @param cp The checkpoint use.
409 * @param section The section name of this object.
411 void unserialize(CheckpointIn &cp) M5_ATTR_OVERRIDE;
414 * Serialize a single thread.
416 * @param os The stream to serialize to.
417 * @param tid ID of the current thread.
419 virtual void serializeThread(CheckpointOut &cp, ThreadID tid) const {};
422 * Unserialize one thread.
424 * @param cp The checkpoint use.
425 * @param section The section name of this thread.
426 * @param tid ID of the current thread.
428 virtual void unserializeThread(CheckpointIn &cp, ThreadID tid) {};
430 virtual Counter totalInsts() const = 0;
432 virtual Counter totalOps() const = 0;
435 * Schedule an event that exits the simulation loops after a
436 * predefined number of instructions.
438 * This method is usually called from the configuration script to
439 * get an exit event some time in the future. It is typically used
440 * when the script wants to simulate for a specific number of
441 * instructions rather than ticks.
443 * @param tid Thread monitor.
444 * @param insts Number of instructions into the future.
445 * @param cause Cause to signal in the exit event.
447 void scheduleInstStop(ThreadID tid, Counter insts, const char *cause);
450 * Schedule an event that exits the simulation loops after a
451 * predefined number of load operations.
453 * This method is usually called from the configuration script to
454 * get an exit event some time in the future. It is typically used
455 * when the script wants to simulate for a specific number of
456 * loads rather than ticks.
458 * @param tid Thread monitor.
459 * @param loads Number of load instructions into the future.
460 * @param cause Cause to signal in the exit event.
462 void scheduleLoadStop(ThreadID tid, Counter loads, const char *cause);
467 * @name PMU Probe points.
471 * Helper method to trigger PMU probes for a committed
474 * @param inst Instruction that just committed
476 virtual void probeInstCommit(const StaticInstPtr &inst);
479 * Helper method to instantiate probe points belonging to this
482 * @param name Name of the probe point.
483 * @return A unique_ptr to the new probe point.
485 ProbePoints::PMUUPtr pmuProbePoint(const char *name);
487 /** CPU cycle counter */
488 ProbePoints::PMUUPtr ppCycles;
491 * Instruction commit probe point.
493 * This probe point is triggered whenever one or more instructions
494 * are committed. It is normally triggered once for every
495 * instruction. However, CPU models committing bundles of
496 * instructions may call notify once for the entire bundle.
498 ProbePoints::PMUUPtr ppRetiredInsts;
500 /** Retired load instructions */
501 ProbePoints::PMUUPtr ppRetiredLoads;
502 /** Retired store instructions */
503 ProbePoints::PMUUPtr ppRetiredStores;
505 /** Retired branches (any type) */
506 ProbePoints::PMUUPtr ppRetiredBranches;
514 bool functionTracingEnabled;
515 std::ostream *functionTraceStream;
516 Addr currentFunctionStart;
517 Addr currentFunctionEnd;
518 Tick functionEntryTick;
519 void enableFunctionTrace();
520 void traceFunctionsInternal(Addr pc);
523 static std::vector<BaseCPU *> cpuList; //!< Static global cpu list
526 void traceFunctions(Addr pc)
528 if (functionTracingEnabled)
529 traceFunctionsInternal(pc);
532 static int numSimulatedCPUs() { return cpuList.size(); }
533 static Counter numSimulatedInsts()
537 int size = cpuList.size();
538 for (int i = 0; i < size; ++i)
539 total += cpuList[i]->totalInsts();
544 static Counter numSimulatedOps()
548 int size = cpuList.size();
549 for (int i = 0; i < size; ++i)
550 total += cpuList[i]->totalOps();
556 // Number of CPU cycles simulated
557 Stats::Scalar numCycles;
558 Stats::Scalar numWorkItemsStarted;
559 Stats::Scalar numWorkItemsCompleted;
562 AddressMonitor addressMonitor;
565 void armMonitor(Addr address);
566 bool mwait(PacketPtr pkt);
567 void mwaitAtomic(ThreadContext *tc, TheISA::TLB *dtb);
568 AddressMonitor *getCpuAddrMonitor() { return &addressMonitor; }
569 void atomicNotify(Addr address);
572 #endif // THE_ISA == NULL_ISA
574 #endif // __CPU_BASE_HH__