/** Explicit constructor assigning a value. */
explicit Cycles(uint64_t _c) : c(_c) { }
+ /** Default constructor for parameter classes. */
+ Cycles() : c(0) { }
+
#ifndef SWIG // keep the operators away from SWIG
/** Converting back to the value type. */
class OpDesc(SimObject):
type = 'OpDesc'
- issueLat = Param.Int(1, "cycles until another can be issued")
+ issueLat = Param.Cycles(1, "cycles until another can be issued")
opClass = Param.OpClass("type of operation")
- opLat = Param.Int(1, "cycles until result is available")
+ opLat = Param.Cycles(1, "cycles until result is available")
class FUDesc(SimObject):
type = 'FUDesc'
{
public:
OpClass opClass;
- unsigned opLat;
- unsigned issueLat;
+ Cycles opLat;
+ Cycles issueLat;
OpDesc(const OpDescParams *p)
: SimObject(p), opClass(p->opClass), opLat(p->opLat),
stageTracing = Param.Bool(False, "Enable tracing of each stage in CPU")
- multLatency = Param.Unsigned(1, "Latency for Multiply Operations")
- multRepeatRate = Param.Unsigned(1, "Repeat Rate for Multiply Operations")
- div8Latency = Param.Unsigned(1, "Latency for 8-bit Divide Operations")
- div8RepeatRate = Param.Unsigned(1, "Repeat Rate for 8-bit Divide Operations")
- div16Latency = Param.Unsigned(1, "Latency for 16-bit Divide Operations")
- div16RepeatRate = Param.Unsigned(1, "Repeat Rate for 16-bit Divide Operations")
- div24Latency = Param.Unsigned(1, "Latency for 24-bit Divide Operations")
- div24RepeatRate = Param.Unsigned(1, "Repeat Rate for 24-bit Divide Operations")
- div32Latency = Param.Unsigned(1, "Latency for 32-bit Divide Operations")
- div32RepeatRate = Param.Unsigned(1, "Repeat Rate for 32-bit Divide Operations")
+ multLatency = Param.Cycles(1, "Latency for Multiply Operations")
+ multRepeatRate = Param.Cycles(1, "Repeat Rate for Multiply Operations")
+ div8Latency = Param.Cycles(1, "Latency for 8-bit Divide Operations")
+ div8RepeatRate = Param.Cycles(1, "Repeat Rate for 8-bit Divide Operations")
+ div16Latency = Param.Cycles(1, "Latency for 16-bit Divide Operations")
+ div16RepeatRate = Param.Cycles(1, "Repeat Rate for 16-bit Divide Operations")
+ div24Latency = Param.Cycles(1, "Latency for 24-bit Divide Operations")
+ div24RepeatRate = Param.Cycles(1, "Repeat Rate for 24-bit Divide Operations")
+ div32Latency = Param.Cycles(1, "Latency for 32-bit Divide Operations")
+ div32RepeatRate = Param.Cycles(1, "Repeat Rate for 32-bit Divide Operations")
// then MDU must be defined as its own SimObject so that an arbitrary # can
// be defined with different parameters
/** Latency & Repeat Rate for Multiply Insts */
- unsigned multLatency;
- unsigned multRepeatRate;
+ Cycles multLatency;
+ Cycles multRepeatRate;
/** Latency & Repeat Rate for 8-bit Divide Insts */
- unsigned div8Latency;
- unsigned div8RepeatRate;
+ Cycles div8Latency;
+ Cycles div8RepeatRate;
/** Latency & Repeat Rate for 16-bit Divide Insts */
- unsigned div16Latency;
- unsigned div16RepeatRate;
+ Cycles div16Latency;
+ Cycles div16RepeatRate;
/** Latency & Repeat Rate for 24-bit Divide Insts */
- unsigned div24Latency;
- unsigned div24RepeatRate;
+ Cycles div24Latency;
+ Cycles div24RepeatRate;
/** Latency & Repeat Rate for 32-bit Divide Insts */
- unsigned div32Latency;
- unsigned div32RepeatRate;
+ Cycles div32Latency;
+ Cycles div32RepeatRate;
};
cachePorts = Param.Unsigned(200, "Cache Ports")
- decodeToFetchDelay = Param.Unsigned(1, "Decode to fetch delay")
- renameToFetchDelay = Param.Unsigned(1 ,"Rename to fetch delay")
- iewToFetchDelay = Param.Unsigned(1, "Issue/Execute/Writeback to fetch "
- "delay")
- commitToFetchDelay = Param.Unsigned(1, "Commit to fetch delay")
+ decodeToFetchDelay = Param.Cycles(1, "Decode to fetch delay")
+ renameToFetchDelay = Param.Cycles(1 ,"Rename to fetch delay")
+ iewToFetchDelay = Param.Cycles(1, "Issue/Execute/Writeback to fetch "
+ "delay")
+ commitToFetchDelay = Param.Cycles(1, "Commit to fetch delay")
fetchWidth = Param.Unsigned(8, "Fetch width")
- renameToDecodeDelay = Param.Unsigned(1, "Rename to decode delay")
- iewToDecodeDelay = Param.Unsigned(1, "Issue/Execute/Writeback to decode "
- "delay")
- commitToDecodeDelay = Param.Unsigned(1, "Commit to decode delay")
- fetchToDecodeDelay = Param.Unsigned(1, "Fetch to decode delay")
+ renameToDecodeDelay = Param.Cycles(1, "Rename to decode delay")
+ iewToDecodeDelay = Param.Cycles(1, "Issue/Execute/Writeback to decode "
+ "delay")
+ commitToDecodeDelay = Param.Cycles(1, "Commit to decode delay")
+ fetchToDecodeDelay = Param.Cycles(1, "Fetch to decode delay")
decodeWidth = Param.Unsigned(8, "Decode width")
- iewToRenameDelay = Param.Unsigned(1, "Issue/Execute/Writeback to rename "
- "delay")
- commitToRenameDelay = Param.Unsigned(1, "Commit to rename delay")
- decodeToRenameDelay = Param.Unsigned(1, "Decode to rename delay")
+ iewToRenameDelay = Param.Cycles(1, "Issue/Execute/Writeback to rename "
+ "delay")
+ commitToRenameDelay = Param.Cycles(1, "Commit to rename delay")
+ decodeToRenameDelay = Param.Cycles(1, "Decode to rename delay")
renameWidth = Param.Unsigned(8, "Rename width")
- commitToIEWDelay = Param.Unsigned(1, "Commit to "
+ commitToIEWDelay = Param.Cycles(1, "Commit to "
"Issue/Execute/Writeback delay")
- renameToIEWDelay = Param.Unsigned(2, "Rename to "
+ renameToIEWDelay = Param.Cycles(2, "Rename to "
"Issue/Execute/Writeback delay")
- issueToExecuteDelay = Param.Unsigned(1, "Issue to execute delay (internal "
+ issueToExecuteDelay = Param.Cycles(1, "Issue to execute delay (internal "
"to the IEW stage)")
dispatchWidth = Param.Unsigned(8, "Dispatch width")
issueWidth = Param.Unsigned(8, "Issue width")
wbDepth = Param.Unsigned(1, "Writeback depth")
fuPool = Param.FUPool(DefaultFUPool(), "Functional Unit pool")
- iewToCommitDelay = Param.Unsigned(1, "Issue/Execute/Writeback to commit "
+ iewToCommitDelay = Param.Cycles(1, "Issue/Execute/Writeback to commit "
"delay")
- renameToROBDelay = Param.Unsigned(1, "Rename to reorder buffer delay")
+ renameToROBDelay = Param.Cycles(1, "Rename to reorder buffer delay")
commitWidth = Param.Unsigned(8, "Commit width")
squashWidth = Param.Unsigned(8, "Squash width")
- trapLatency = Param.Unsigned(13, "Trap latency")
- fetchTrapLatency = Param.Unsigned(1, "Fetch trap latency")
+ trapLatency = Param.Cycles(13, "Trap latency")
+ fetchTrapLatency = Param.Cycles(1, "Fetch trap latency")
backComSize = Param.Unsigned(5, "Time buffer size for backwards communication")
forwardComSize = Param.Unsigned(5, "Time buffer size for forward communication")
/** Priority List used for Commit Policy */
std::list<ThreadID> priority_list;
- /** IEW to Commit delay, in ticks. */
- unsigned iewToCommitDelay;
+ /** IEW to Commit delay. */
+ Cycles iewToCommitDelay;
- /** Commit to IEW delay, in ticks. */
- unsigned commitToIEWDelay;
+ /** Commit to IEW delay. */
+ Cycles commitToIEWDelay;
- /** Rename to ROB delay, in ticks. */
- unsigned renameToROBDelay;
+ /** Rename to ROB delay. */
+ Cycles renameToROBDelay;
- unsigned fetchToCommitDelay;
+ Cycles fetchToCommitDelay;
/** Rename width, in instructions. Used so ROB knows how many
* instructions to get from the rename instruction queue.
/** Tracks which stages are telling decode to stall. */
Stalls stalls[Impl::MaxThreads];
- /** Rename to decode delay, in ticks. */
- unsigned renameToDecodeDelay;
+ /** Rename to decode delay. */
+ Cycles renameToDecodeDelay;
- /** IEW to decode delay, in ticks. */
- unsigned iewToDecodeDelay;
+ /** IEW to decode delay. */
+ Cycles iewToDecodeDelay;
- /** Commit to decode delay, in ticks. */
- unsigned commitToDecodeDelay;
+ /** Commit to decode delay. */
+ Cycles commitToDecodeDelay;
- /** Fetch to decode delay, in ticks. */
- unsigned fetchToDecodeDelay;
+ /** Fetch to decode delay. */
+ Cycles fetchToDecodeDelay;
/** The width of decode, in instructions. */
unsigned decodeWidth;
/** Tracks which stages are telling fetch to stall. */
Stalls stalls[Impl::MaxThreads];
- /** Decode to fetch delay, in ticks. */
- unsigned decodeToFetchDelay;
+ /** Decode to fetch delay. */
+ Cycles decodeToFetchDelay;
- /** Rename to fetch delay, in ticks. */
- unsigned renameToFetchDelay;
+ /** Rename to fetch delay. */
+ Cycles renameToFetchDelay;
- /** IEW to fetch delay, in ticks. */
- unsigned iewToFetchDelay;
+ /** IEW to fetch delay. */
+ Cycles iewToFetchDelay;
- /** Commit to fetch delay, in ticks. */
- unsigned commitToFetchDelay;
+ /** Commit to fetch delay. */
+ Cycles commitToFetchDelay;
/** The width of fetch in instructions. */
unsigned fetchWidth;
funcUnits.clear();
for (int i = 0; i < Num_OpClasses; ++i) {
- maxOpLatencies[i] = 0;
- maxIssueLatencies[i] = 0;
+ maxOpLatencies[i] = Cycles(0);
+ maxIssueLatencies[i] = Cycles(0);
}
//
}
void
-FUPool::annotateMemoryUnits(unsigned hit_latency)
+FUPool::annotateMemoryUnits(Cycles hit_latency)
{
maxOpLatencies[MemReadOp] = hit_latency;
{
private:
/** Maximum op execution latencies, per op class. */
- unsigned maxOpLatencies[Num_OpClasses];
+ Cycles maxOpLatencies[Num_OpClasses];
/** Maximum issue latencies, per op class. */
- unsigned maxIssueLatencies[Num_OpClasses];
+ Cycles maxIssueLatencies[Num_OpClasses];
/** Bitvector listing capabilities of this FU pool. */
std::bitset<Num_OpClasses> capabilityList;
/** Annotates units that provide memory operations. Included only because
* old FU pool provided this function.
*/
- void annotateMemoryUnits(unsigned hit_latency);
+ void annotateMemoryUnits(Cycles hit_latency);
/**
* Gets a FU providing the requested capability. Will mark the unit as busy,
void dump();
/** Returns the operation execution latency of the given capability. */
- unsigned getOpLatency(OpClass capability) {
+ Cycles getOpLatency(OpClass capability) {
return maxOpLatencies[capability];
}
/** Returns the issue latency of the given capability. */
- unsigned getIssueLatency(OpClass capability) {
+ Cycles getIssueLatency(OpClass capability) {
return maxIssueLatencies[capability];
}
*/
bool updatedQueues;
- /** Commit to IEW delay, in ticks. */
- unsigned commitToIEWDelay;
+ /** Commit to IEW delay. */
+ Cycles commitToIEWDelay;
- /** Rename to IEW delay, in ticks. */
- unsigned renameToIEWDelay;
+ /** Rename to IEW delay. */
+ Cycles renameToIEWDelay;
/**
- * Issue to execute delay, in ticks. What this actually represents is
+ * Issue to execute delay. What this actually represents is
* the amount of time it takes for an instruction to wake up, be
* scheduled, and sent to a FU for execution.
*/
- unsigned issueToExecuteDelay;
+ Cycles issueToExecuteDelay;
/** Width of dispatch, in instructions. */
unsigned dispatchWidth;
/** Delay between commit stage and the IQ.
* @todo: Make there be a distinction between the delays within IEW.
*/
- unsigned commitToIEWDelay;
+ Cycles commitToIEWDelay;
/** Is the IQ switched out. */
bool switchedOut;
}
int idx = -2;
- int op_latency = 1;
+ Cycles op_latency = Cycles(1);
ThreadID tid = issuing_inst->threadNumber;
if (op_class != No_OpClass) {
// If we have an instruction that doesn't require a FU, or a
// valid FU, then schedule for execution.
if (idx == -2 || idx != -1) {
- if (op_latency == 1) {
+ if (op_latency == Cycles(1)) {
i2e_info->size++;
instsToExecute.push_back(issuing_inst);
if (idx >= 0)
fuPool->freeUnitNextCycle(idx);
} else {
- int issue_latency = fuPool->getIssueLatency(op_class);
+ Cycles issue_latency = fuPool->getIssueLatency(op_class);
// Generate completion event for the FU
FUCompletion *execution = new FUCompletion(issuing_inst,
idx, this);
cpu->clockEdge(Cycles(op_latency - 1)));
// @todo: Enforce that issue_latency == 1 or op_latency
- if (issue_latency > 1) {
+ if (issue_latency > Cycles(1)) {
// If FU isn't pipelined, then it must be freed
// upon the execution completing.
execution->setFreeFU();
master = VectorMasterPort("vector port for connecting slaves")
# Override the default clock
clock = '1GHz'
- header_cycles = Param.Int(1, "cycles of overhead per transaction")
+ header_cycles = Param.Cycles(1, "cycles of overhead per transaction")
width = Param.Int(8, "bus width (bytes)")
block_size = Param.Int(64, "The default block size if not set by " \
"any connected module")
assoc = Param.Int("associativity")
block_size = Param.Int("block size in bytes")
latency = Param.Latency("Latency")
- hash_delay = Param.Int(1, "time in cycles of hash access")
+ hash_delay = Param.Cycles(1, "time in cycles of hash access")
max_miss_count = Param.Counter(0,
"number of misses to handle before calling exit")
mshrs = Param.Int("number of MSHRs (max outstanding requests)")
}
}
+ // @todo: is hashDelay is really cycles, then
+ // multiply with period
set_lat = set_lat * hashDelay + hitLatency;
if (tag_ptr != NULL) {
// IIC replacement: if this is not the first element of
const unsigned subMask;
/** The latency of a hash lookup. */
- const unsigned hashDelay;
+ const Cycles hashDelay;
/** The total number of tags in primary and secondary. */
const unsigned numTags;
/** The number of tags in the secondary tag store. */
#include "mem/ruby/system/BankedArray.hh"
#include "sim/eventq.hh"
-BankedArray::BankedArray(unsigned int banks, unsigned int accessLatency, unsigned int startIndexBit) :
+BankedArray::BankedArray(unsigned int banks, Cycles accessLatency, unsigned int startIndexBit) :
EventManager(&mainEventQueue)
{
this->banks = banks;
{
private:
unsigned int banks;
- unsigned int accessLatency;
+ Cycles accessLatency;
unsigned int bankBits;
unsigned int startIndexBit;
unsigned int mapIndexToBank(Index idx);
public:
- BankedArray(unsigned int banks, unsigned int accessLatency, unsigned int startIndexBit);
+ BankedArray(unsigned int banks, Cycles accessLatency, unsigned int startIndexBit);
// Note: We try the access based on the cache index, not the address
// This is so we don't get aliasing on blocks being replaced
dataArrayBanks = Param.Int(1, "Number of banks for the data array")
tagArrayBanks = Param.Int(1, "Number of banks for the tag array")
- dataAccessLatency = Param.Int(1, "Gem5 cycles for the data array")
- tagAccessLatency = Param.Int(1, "Gem5 cycles for the tag array")
+ dataAccessLatency = Param.Cycles(1, "cycles for a data array access")
+ tagAccessLatency = Param.Cycles(1, "cycles for a tag array access")
resourceStalls = Param.Bool(False, "stall if there is a resource failure")
private:
int m_max_outstanding_requests;
- int m_deadlock_threshold;
+ Cycles m_deadlock_threshold;
CacheMemory* m_dataCache_ptr;
CacheMemory* m_instCache_ptr;
dcache = Param.RubyCache("")
max_outstanding_requests = Param.Int(16,
"max requests (incl. prefetches) outstanding")
- deadlock_threshold = Param.Int(500000,
+ deadlock_threshold = Param.Cycles(500000,
"max outstanding cycles for a request before deadlock/livelock declared")
class DMASequencer(RubyPort):
# most derived types require this, so we just do it here once
code('%import "stdint.i"')
code('%import "base/types.hh"')
- # ignore the case operator for Cycles
- code('%ignore *::operator uint64_t() const;')
def getValue(self):
return long(self.value)
class UInt64(CheckedInt): cxx_type = 'uint64_t'; size = 64; unsigned = True
class Counter(CheckedInt): cxx_type = 'Counter'; size = 64; unsigned = True
-class Cycles(CheckedInt): cxx_type = 'Cycles'; size = 64; unsigned = True
class Tick(CheckedInt): cxx_type = 'Tick'; size = 64; unsigned = True
class TcpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class UdpPort(CheckedInt): cxx_type = 'uint16_t'; size = 16; unsigned = True
class Percent(CheckedInt): cxx_type = 'int'; min = 0; max = 100
+class Cycles(CheckedInt):
+ cxx_type = 'Cycles'
+ size = 64
+ unsigned = True
+
+ def getValue(self):
+ from m5.internal.core import Cycles
+ return Cycles(self.value)
+
class Float(ParamValue, float):
cxx_type = 'double'