2 * Copyright (c) 2011-2012 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * Copyright (c) 2011 Regents of the University of California
16 * All rights reserved.
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are
20 * met: redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer;
22 * redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution;
25 * neither the name of the copyright holders nor the names of its
26 * contributors may be used to endorse or promote products derived from
27 * this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 * Authors: Steve Reinhardt
46 #ifndef __CPU_BASE_HH__
47 #define __CPU_BASE_HH__
51 #include "arch/interrupts.hh"
52 #include "arch/isa_traits.hh"
53 #include "arch/microcode_rom.hh"
54 #include "base/statistics.hh"
55 #include "config/the_isa.hh"
56 #include "mem/mem_object.hh"
57 #include "sim/eventq.hh"
58 #include "sim/full_system.hh"
59 #include "sim/insttracer.hh"
67 class CPUProgressEvent : public Event
76 CPUProgressEvent(BaseCPU *_cpu, Tick ival = 0);
80 void interval(Tick ival) { _interval = ival; }
81 Tick interval() { return _interval; }
83 void repeatEvent(bool repeat) { _repeatEvent = repeat; }
85 virtual const char *description() const;
88 class BaseCPU : public MemObject
92 // @todo remove me after debugging with legion done
94 // every cpu has an id, put it in the base cpu
95 // Set at initialization, only time a cpuId might change is during a
96 // takeover (which should be done from within the BaseCPU anyway,
97 // therefore no setCpuId() method is provided
100 /** instruction side request id that must be placed in all requests */
101 MasterID _instMasterId;
103 /** data side request id that must be placed in all requests */
104 MasterID _dataMasterId;
106 /** An intrenal representation of a task identifier within gem5. This is
107 * used so the CPU can add which taskId (which is an internal representation
108 * of the OS process ID) to each request so components in the memory system
109 * can track which process IDs are ultimately interacting with them
113 /** The current OS process ID that is executing on this processor. This is
114 * used to generate a taskId */
117 /** Is the CPU switched out or active? */
121 * Define a base class for the CPU ports (instruction and data)
122 * that is refined in the subclasses. This class handles the
123 * common cases, i.e. the functional accesses and the status
124 * changes and address range queries. The default behaviour for
125 * both atomic and timing access is to panic and the corresponding
126 * subclasses have to override these methods.
128 class CpuPort : public MasterPort
133 * Create a CPU port with a name and a structural owner.
135 * @param _name port name including the owner
136 * @param _name structural owner of this port
138 CpuPort(const std::string& _name, MemObject* _owner) :
139 MasterPort(_name, _owner)
144 virtual bool recvTimingResp(PacketPtr pkt);
146 virtual void recvRetry();
148 virtual void recvFunctionalSnoop(PacketPtr pkt);
155 * Purely virtual method that returns a reference to the data
156 * port. All subclasses must implement this method.
158 * @return a reference to the data port
160 virtual CpuPort &getDataPort() = 0;
163 * Purely virtual method that returns a reference to the instruction
164 * port. All subclasses must implement this method.
166 * @return a reference to the instruction port
168 virtual CpuPort &getInstPort() = 0;
170 /** Reads this CPU's ID. */
171 int cpuId() { return _cpuId; }
173 /** Reads this CPU's unique data requestor ID */
174 MasterID dataMasterId() { return _dataMasterId; }
175 /** Reads this CPU's unique instruction requestor ID */
176 MasterID instMasterId() { return _instMasterId; }
179 * Get a master port on this CPU. All CPUs have a data and
180 * instruction port, and this method uses getDataPort and
181 * getInstPort of the subclasses to resolve the two ports.
183 * @param if_name the port name
184 * @param idx ignored index
186 * @return a reference to the port with the given name
188 BaseMasterPort &getMasterPort(const std::string &if_name,
189 PortID idx = InvalidPortID);
191 /** Get cpu task id */
192 uint32_t taskId() const { return _taskId; }
193 /** Set cpu task id */
194 void taskId(uint32_t id) { _taskId = id; }
196 uint32_t getPid() const { return _pid; }
197 void setPid(uint32_t pid) { _pid = pid; }
199 inline void workItemBegin() { numWorkItemsStarted++; }
200 inline void workItemEnd() { numWorkItemsCompleted++; }
201 // @todo remove me after debugging with legion done
202 Tick instCount() { return instCnt; }
204 TheISA::MicrocodeRom microcodeRom;
207 TheISA::Interrupts *interrupts;
211 getInterruptController()
216 virtual void wakeup() = 0;
219 postInterrupt(int int_num, int index)
221 interrupts->post(int_num, index);
227 clearInterrupt(int int_num, int index)
229 interrupts->clear(int_num, index);
235 interrupts->clearAll();
239 checkInterrupts(ThreadContext *tc) const
241 return FullSystem && interrupts->checkInterrupts(tc);
244 class ProfileEvent : public Event
251 ProfileEvent(BaseCPU *cpu, Tick interval);
254 ProfileEvent *profileEvent;
257 std::vector<ThreadContext *> threadContexts;
259 Trace::InstTracer * tracer;
263 // Mask to align PCs to MachInst sized boundaries
264 static const Addr PCMask = ~((Addr)sizeof(TheISA::MachInst) - 1);
266 /// Provide access to the tracer pointer
267 Trace::InstTracer * getTracer() { return tracer; }
269 /// Notify the CPU that the indicated context is now active. The
270 /// delay parameter indicates the number of ticks to wait before
271 /// executing (typically 0 or 1).
272 virtual void activateContext(ThreadID thread_num, Cycles delay) {}
274 /// Notify the CPU that the indicated context is now suspended.
275 virtual void suspendContext(ThreadID thread_num) {}
277 /// Notify the CPU that the indicated context is now deallocated.
278 virtual void deallocateContext(ThreadID thread_num) {}
280 /// Notify the CPU that the indicated context is now halted.
281 virtual void haltContext(ThreadID thread_num) {}
283 /// Given a Thread Context pointer return the thread num
284 int findContext(ThreadContext *tc);
286 /// Given a thread num get tho thread context for it
287 ThreadContext *getContext(int tn) { return threadContexts[tn]; }
290 typedef BaseCPUParams Params;
291 const Params *params() const
292 { return reinterpret_cast<const Params *>(_params); }
293 BaseCPU(Params *params, bool is_checker = false);
297 virtual void startup();
298 virtual void regStats();
300 virtual void activateWhenReady(ThreadID tid) {};
302 void registerThreadContexts();
305 * Prepare for another CPU to take over execution.
307 * When this method exits, all internal state should have been
308 * flushed. After the method returns, the simulator calls
309 * takeOverFrom() on the new CPU with this CPU as its parameter.
311 virtual void switchOut();
314 * Load the state of a CPU from the previous CPU object, invoked
315 * on all new CPUs that are about to be switched in.
317 * A CPU model implementing this method is expected to initialize
318 * its state from the old CPU and connect its memory (unless they
319 * are already connected) to the memories connected to the old
322 * @param cpu CPU to initialize read state from.
324 virtual void takeOverFrom(BaseCPU *cpu);
327 * Flush all TLBs in the CPU.
329 * This method is mainly used to flush stale translations when
330 * switching CPUs. It is also exported to the Python world to
331 * allow it to request a TLB flush after draining the CPU to make
332 * it easier to compare traces when debugging
333 * handover/checkpointing.
338 * Determine if the CPU is switched out.
340 * @return True if the CPU is switched out, false otherwise.
342 bool switchedOut() const { return _switchedOut; }
345 * Number of threads we're actually simulating (<= SMT_MAX_THREADS).
346 * This is a constant for the duration of the simulation.
351 * Vector of per-thread instruction-based event queues. Used for
352 * scheduling events based on number of instructions committed by
353 * a particular thread.
355 EventQueue **comInstEventQueue;
358 * Vector of per-thread load-based event queues. Used for
359 * scheduling events based on number of loads committed by
360 *a particular thread.
362 EventQueue **comLoadEventQueue;
367 * Serialize this object to the given output stream.
369 * @note CPU models should normally overload the serializeThread()
370 * method instead of the serialize() method as this provides a
371 * uniform data format for all CPU models and promotes better code
374 * @param os The stream to serialize to.
376 virtual void serialize(std::ostream &os);
379 * Reconstruct the state of this object from a checkpoint.
381 * @note CPU models should normally overload the
382 * unserializeThread() method instead of the unserialize() method
383 * as this provides a uniform data format for all CPU models and
384 * promotes better code reuse.
386 * @param cp The checkpoint use.
387 * @param section The section name of this object.
389 virtual void unserialize(Checkpoint *cp, const std::string §ion);
392 * Serialize a single thread.
394 * @param os The stream to serialize to.
395 * @param tid ID of the current thread.
397 virtual void serializeThread(std::ostream &os, ThreadID tid) {};
400 * Unserialize one thread.
402 * @param cp The checkpoint use.
403 * @param section The section name of this thread.
404 * @param tid ID of the current thread.
406 virtual void unserializeThread(Checkpoint *cp, const std::string §ion,
410 * Return pointer to CPU's branch predictor (NULL if none).
411 * @return Branch predictor pointer.
413 virtual BranchPred *getBranchPred() { return NULL; };
415 virtual Counter totalInsts() const = 0;
417 virtual Counter totalOps() const = 0;
421 bool functionTracingEnabled;
422 std::ostream *functionTraceStream;
423 Addr currentFunctionStart;
424 Addr currentFunctionEnd;
425 Tick functionEntryTick;
426 void enableFunctionTrace();
427 void traceFunctionsInternal(Addr pc);
430 static std::vector<BaseCPU *> cpuList; //!< Static global cpu list
433 void traceFunctions(Addr pc)
435 if (functionTracingEnabled)
436 traceFunctionsInternal(pc);
439 static int numSimulatedCPUs() { return cpuList.size(); }
440 static Counter numSimulatedInsts()
444 int size = cpuList.size();
445 for (int i = 0; i < size; ++i)
446 total += cpuList[i]->totalInsts();
451 static Counter numSimulatedOps()
455 int size = cpuList.size();
456 for (int i = 0; i < size; ++i)
457 total += cpuList[i]->totalOps();
463 // Number of CPU cycles simulated
464 Stats::Scalar numCycles;
465 Stats::Scalar numWorkItemsStarted;
466 Stats::Scalar numWorkItemsCompleted;
469 #endif // __CPU_BASE_HH__