Result of running 'hg m5style --skip-all --fix-control -a'.
std::vector<auxv_t> auxv;
ElfObject * elfObject = dynamic_cast<ElfObject *>(objFile);
- if(elfObject)
+ if (elfObject)
{
// modern glibc uses a bunch of auxiliary vectors to set up
// TLS as well as do a bunch of other stuff
// 32-bit memory operation
// Find register for operation
unsigned reg_idx;
- while(!bits(regs, reg)) reg++;
+ while (!bits(regs, reg)) reg++;
replaceBits(regs, reg, 0);
reg_idx = force_user ? intRegInMode(MODE_USER, reg) : reg;
TLB::AllowUnaligned;
int i = 0;
- for(; i < numMemMicroops - 1; ++i) {
+ for (; i < numMemMicroops - 1; ++i) {
microOps[uopIdx++] = new MicroNeonLoad64(
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
baseIsSP, 16 /* accSize */, eSize);
microOps = new StaticInstPtr[numMicroops];
unsigned uopIdx = 0;
- for(int i = 0; i < numMarshalMicroops; ++i) {
+ for (int i = 0; i < numMarshalMicroops; ++i) {
switch (numRegs) {
case 1: microOps[uopIdx++] = new MicroIntNeon64_1Reg(
machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
TLB::AllowUnaligned;
int i = 0;
- for(; i < numMemMicroops - 1; ++i) {
+ for (; i < numMemMicroops - 1; ++i) {
microOps[uopIdx++] = new MicroNeonStore64(
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
baseIsSP, 16 /* accSize */, eSize);
}
}
- for(int i = 0; i < numMarshalMicroops; ++i) {
+ for (int i = 0; i < numMarshalMicroops; ++i) {
microOps[uopIdx++] = new MicroUnpackNeon64(
machInst, vd + (RegIndex) (2 * i), vx, eSize, dataSize,
numStructElems, index, i /* step */, replicate);
microOps = new StaticInstPtr[numMicroops];
unsigned uopIdx = 0;
- for(int i = 0; i < numMarshalMicroops; ++i) {
+ for (int i = 0; i < numMarshalMicroops; ++i) {
microOps[uopIdx++] = new MicroPackNeon64(
machInst, vx + (RegIndex) (2 * i), vd, eSize, dataSize,
numStructElems, index, i /* step */, replicate);
TLB::AllowUnaligned;
int i = 0;
- for(; i < numMemMicroops - 1; ++i) {
+ for (; i < numMemMicroops - 1; ++i) {
microOps[uopIdx++] = new MicroNeonStore64(
machInst, vx + (RegIndex) i, rnsp, 16 * i, memaccessFlags,
baseIsSP, 16 /* accsize */, eSize);
bool zero1 = (std::fpclassify(a) == FP_ZERO);
bool zero2 = (std::fpclassify(b) == FP_ZERO);
if ((inf1 && zero2) || (zero1 && inf2)) {
- if(sign1 ^ sign2)
+ if (sign1 ^ sign2)
return (T)(-2.0);
else
return (T)(2.0);
}
aXb = a*b;
fpClassAxB = std::fpclassify(aXb);
- if(fpClassAxB == FP_SUBNORMAL) {
+ if (fpClassAxB == FP_SUBNORMAL) {
feraiseexcept(FeUnderflow);
return 1.5;
}
}
aXb = a*b;
fpClassAxB = std::fpclassify(aXb);
- if(fpClassAxB == FP_SUBNORMAL) {
+ if (fpClassAxB == FP_SUBNORMAL) {
feraiseexcept(FeUnderflow);
return 2.0;
}
}
aXb = a*b;
fpClassAxB = std::fpclassify(aXb);
- if(fpClassAxB == FP_SUBNORMAL) {
+ if (fpClassAxB == FP_SUBNORMAL) {
feraiseexcept(FeUnderflow);
return 1.5;
}
}
aXb = a*b;
fpClassAxB = std::fpclassify(aXb);
- if(fpClassAxB == FP_SUBNORMAL) {
+ if (fpClassAxB == FP_SUBNORMAL) {
feraiseexcept(FeUnderflow);
return 2.0;
}
default:
return NUM_MISCREGS;
}
- } else if(is_reg64) {
+ } else if (is_reg64) {
return NUM_MISCREGS;
} else {
warn("Unhandled register length, register (0x%x) ignored.\n");
// newer kernels use __loop_udelay and __loop_const_udelay symbols
uDelaySkipEvent = addKernelFuncEvent<UDelayEvent>(
"__loop_udelay", "__udelay", 1000, 0);
- if(!uDelaySkipEvent)
+ if (!uDelaySkipEvent)
uDelaySkipEvent = addKernelFuncEventOrPanic<UDelayEvent>(
"__udelay", "__udelay", 1000, 0);
// time. Constant comes from code.
constUDelaySkipEvent = addKernelFuncEvent<UDelayEvent>(
"__loop_const_udelay", "__const_udelay", 1000, 107374);
- if(!constUDelaySkipEvent)
+ if (!constUDelaySkipEvent)
constUDelaySkipEvent = addKernelFuncEventOrPanic<UDelayEvent>(
"__const_udelay", "__const_udelay", 1000, 107374);
// than rangeMRU
if (x > rangeMRU && !functional) {
TlbEntry tmp_entry = table[x];
- for(int i = x; i > 0; i--)
+ for (int i = x; i > 0; i--)
table[i] = table[i - 1];
table[0] = tmp_entry;
retval = &table[0];
int num_entries = size;
SERIALIZE_SCALAR(num_entries);
- for(int i = 0; i < size; i++)
+ for (int i = 0; i < size; i++)
table[i].serializeSection(cp, csprintf("TlbEntry%d", i));
}
int num_entries;
UNSERIALIZE_SCALAR(num_entries);
- for(int i = 0; i < min(size, num_entries); i++)
+ for (int i = 0; i < min(size, num_entries); i++)
table[i].unserializeSection(cp, csprintf("TlbEntry%d", i));
}
Process * p = tc->getProcessPtr();
Fault fault = p->pTable->translate(req);
- if(fault != NoFault)
+ if (fault != NoFault)
return fault;
return NoFault;
void
ISA::clear()
{
- for(int i = 0; i < NumMiscRegs; i++) {
+ for (int i = 0; i < NumMiscRegs; i++) {
for (int j = 0; j < miscRegFile[i].size(); j++)
miscRegFile[i][j] = 0;
proxy.writeBlob(addr, (uint8_t *)(&guestVal), sizeof(T));
uint8_t checkSum = 0;
- while(guestVal) {
+ while (guestVal) {
checkSum += guestVal;
guestVal >>= 8;
}
funcNum);
return false;
}
- } else if(family == 0x0000) {
+ } else if (family == 0x0000) {
// The standard functions
switch (funcNum) {
case VendorAndLargestStdFunc:
//Figure out the effective address size. This can be overriden to
//a fixed value at the decoder level.
int logAddrSize;
- if(emi.legacy.addr)
+ if (emi.legacy.addr)
logAddrSize = altAddr;
else
logAddrSize = defAddr;
if (modrmTable[opcode]) {
nextState = ModRMState;
} else {
- if(immediateSize) {
+ if (immediateSize) {
nextState = ImmediateState;
} else {
instDone = true;
//Figure out the effective address size. This can be overriden to
//a fixed value at the decoder level.
int logAddrSize;
- if(emi.legacy.addr)
+ if (emi.legacy.addr)
logAddrSize = altAddr;
else
logAddrSize = defAddr;
if (modRM.rm == 4 && modRM.mod != 3) {
// && in 32/64 bit mode)
nextState = SIBState;
- } else if(displacementSize) {
+ } else if (displacementSize) {
nextState = DisplacementState;
- } else if(immediateSize) {
+ } else if (immediateSize) {
nextState = ImmediateState;
} else {
instDone = true;
displacementSize = 4;
if (displacementSize) {
nextState = DisplacementState;
- } else if(immediateSize) {
+ } else if (immediateSize) {
nextState = ImmediateState;
} else {
instDone = true;
DPRINTF(Decoder, "Collecting %d byte displacement, got %d bytes.\n",
displacementSize, immediateCollected);
- if(displacementSize == immediateCollected) {
+ if (displacementSize == immediateCollected) {
//Reset this for other immediates.
immediateCollected = 0;
//Sign extend the displacement
}
DPRINTF(Decoder, "Collected displacement %#x.\n",
emi.displacement);
- if(immediateSize) {
+ if (immediateSize) {
nextState = ImmediateState;
} else {
instDone = true;
DPRINTF(Decoder, "Collecting %d byte immediate, got %d bytes.\n",
immediateSize, immediateCollected);
- if(immediateSize == immediateCollected)
+ if (immediateSize == immediateCollected)
{
//Reset this for other immediates.
immediateCollected = 0;
std::stringstream response;
printMnemonic(response, instMnem, mnemonic);
- if(flags[IsLoad])
+ if (flags[IsLoad])
printDestReg(response, 0, dataSize);
else
printSrcReg(response, 2, dataSize);
{
DPRINTF(X86, "flagMask = %#x\n", flagMask);
uint64_t flags = oldFlags & ~flagMask;
- if(flagMask & (ECFBit | CFBit))
+ if (flagMask & (ECFBit | CFBit))
{
- if(findCarry(dataSize*8, _dest, _src1, _src2))
+ if (findCarry(dataSize*8, _dest, _src1, _src2))
flags |= (flagMask & (ECFBit | CFBit));
- if(subtract)
+ if (subtract)
flags ^= (flagMask & (ECFBit | CFBit));
}
- if(flagMask & PFBit && !findParity(8, _dest))
+ if (flagMask & PFBit && !findParity(8, _dest))
flags |= PFBit;
- if(flagMask & AFBit)
+ if (flagMask & AFBit)
{
- if(findCarry(4, _dest, _src1, _src2))
+ if (findCarry(4, _dest, _src1, _src2))
flags |= AFBit;
- if(subtract)
+ if (subtract)
flags ^= AFBit;
}
- if(flagMask & (EZFBit | ZFBit) && findZero(dataSize*8, _dest))
+ if (flagMask & (EZFBit | ZFBit) && findZero(dataSize*8, _dest))
flags |= (flagMask & (EZFBit | ZFBit));
- if(flagMask & SFBit && findNegative(dataSize*8, _dest))
+ if (flagMask & SFBit && findNegative(dataSize*8, _dest))
flags |= SFBit;
- if(flagMask & OFBit && findOverflow(dataSize*8, _dest, _src1, _src2))
+ if (flagMask & OFBit && findOverflow(dataSize*8, _dest, _src1, _src2))
flags |= OFBit;
return flags;
}
void
X86StaticInst::printSrcReg(std::ostream &os, int reg, int size) const
{
- if(_numSrcRegs > reg)
+ if (_numSrcRegs > reg)
printReg(os, _srcRegIdx[reg], size);
}
void
X86StaticInst::printDestReg(std::ostream &os, int reg, int size) const
{
- if(_numDestRegs > reg)
+ if (_numDestRegs > reg)
printReg(os, _destRegIdx[reg], size);
}
bool fold = rel_reg & IntFoldBit;
rel_reg &= ~IntFoldBit;
- if(fold)
+ if (fold)
suffix = "h";
- else if(rel_reg < 8 && size == 1)
+ else if (rel_reg < 8 && size == 1)
suffix = "l";
switch (rel_reg) {
} else {
if (scale != 0 && index != ZeroReg)
{
- if(scale != 1)
+ if (scale != 1)
ccprintf(os, "%d*", scale);
printReg(os, index, addressSize);
someAddr = true;
}
if (base != ZeroReg)
{
- if(someAddr)
+ if (someAddr)
os << " + ";
printReg(os, base, addressSize);
someAddr = true;
}
if (disp != 0)
{
- if(someAddr)
+ if (someAddr)
os << " + ";
ccprintf(os, "%#x", disp);
someAddr = true;
inline uint64_t merge(uint64_t into, uint64_t val, int size) const
{
X86IntReg reg = into;
- if(_destRegIdx[0] & IntFoldBit)
+ if (_destRegIdx[0] & IntFoldBit)
{
reg.H = val;
return reg;
{
X86IntReg reg = from;
DPRINTF(X86, "Picking with size %d\n", size);
- if(_srcRegIdx[idx] & IntFoldBit)
+ if (_srcRegIdx[idx] & IntFoldBit)
return reg.H;
switch(size)
{
{
X86IntReg reg = from;
DPRINTF(X86, "Picking with size %d\n", size);
- if(_srcRegIdx[idx] & IntFoldBit)
+ if (_srcRegIdx[idx] & IntFoldBit)
return reg.SH;
switch(size)
{
bool
X86NativeTrace::checkRcxReg(const char * name, uint64_t &mVal, uint64_t &nVal)
{
- if(!checkRcx)
+ if (!checkRcx)
checkRcx = (mVal != oldRcxVal || nVal != oldRealRcxVal);
- if(checkRcx)
+ if (checkRcx)
return checkReg(name, mVal, nVal);
return true;
}
bool
X86NativeTrace::checkR11Reg(const char * name, uint64_t &mVal, uint64_t &nVal)
{
- if(!checkR11)
+ if (!checkR11)
checkR11 = (mVal != oldR11Val || nVal != oldRealR11Val);
- if(checkR11)
+ if (checkR11)
return checkReg(name, mVal, nVal);
return true;
}
nState.update(this);
mState.update(record->getThread());
- if(record->getStaticInst()->isSyscall())
+ if (record->getStaticInst()->isSyscall())
{
checkRcx = false;
checkR11 = false;
nextState = Ready;
if (write)
walker->port.sendAtomic(write);
- } while(read);
+ } while (read);
state = Ready;
nextState = Waiting;
}
assert(fault == NoFault || read == NULL);
state = nextState;
nextState = Ready;
- } while(read);
+ } while (read);
logBytes = entry.logBytes;
addr = entry.paddr;
dataAttr.system = 1;
//Initialize the segment registers.
- for(int seg = 0; seg < NUM_SEGMENTREGS; seg++) {
+ for (int seg = 0; seg < NUM_SEGMENTREGS; seg++) {
tc->setMiscRegNoEffect(MISCREG_SEG_BASE(seg), 0);
tc->setMiscRegNoEffect(MISCREG_SEG_EFF_BASE(seg), 0);
tc->setMiscRegNoEffect(MISCREG_SEG_ATTR(seg), dataAttr);
dataAttr.system = 1;
//Initialize the segment registers.
- for(int seg = 0; seg < NUM_SEGMENTREGS; seg++) {
+ for (int seg = 0; seg < NUM_SEGMENTREGS; seg++) {
tc->setMiscRegNoEffect(MISCREG_SEG_BASE(seg), 0);
tc->setMiscRegNoEffect(MISCREG_SEG_EFF_BASE(seg), 0);
tc->setMiscRegNoEffect(MISCREG_SEG_ATTR(seg), dataAttr);
std::vector<auxv_t> auxv = extraAuxvs;
string filename;
- if(argv.size() < 1)
+ if (argv.size() < 1)
filename = "";
else
filename = argv[0];
inline static bool
operator == (const ExtMachInst &emi1, const ExtMachInst &emi2)
{
- if(emi1.legacy != emi2.legacy)
+ if (emi1.legacy != emi2.legacy)
return false;
- if(emi1.rex != emi2.rex)
+ if (emi1.rex != emi2.rex)
return false;
- if(emi1.opcode.type != emi2.opcode.type)
+ if (emi1.opcode.type != emi2.opcode.type)
return false;
- if(emi1.opcode.op != emi2.opcode.op)
+ if (emi1.opcode.op != emi2.opcode.op)
return false;
- if(emi1.modRM != emi2.modRM)
+ if (emi1.modRM != emi2.modRM)
return false;
- if(emi1.sib != emi2.sib)
+ if (emi1.sib != emi2.sib)
return false;
- if(emi1.immediate != emi2.immediate)
+ if (emi1.immediate != emi2.immediate)
return false;
- if(emi1.displacement != emi2.displacement)
+ if (emi1.displacement != emi2.displacement)
return false;
- if(emi1.mode != emi2.mode)
+ if (emi1.mode != emi2.mode)
return false;
- if(emi1.opSize != emi2.opSize)
+ if (emi1.opSize != emi2.opSize)
return false;
- if(emi1.addrSize != emi2.addrSize)
+ if (emi1.addrSize != emi2.addrSize)
return false;
- if(emi1.stackSize != emi2.stackSize)
+ if (emi1.stackSize != emi2.stackSize)
return false;
- if(emi1.dispSize != emi2.dispSize)
+ if (emi1.dispSize != emi2.dispSize)
return false;
return true;
}
do { \
static const char msg[] = m; \
atomic_write(fd, msg, sizeof(msg) - 1); \
- } while(0)
+ } while (0)
/**
* Statically allocate a string and write it to STDERR.
}
// qData (vector<AnnotateList>)
- for(x = 0; x < qData.size(); x++) {
+ for (x = 0; x < qData.size(); x++) {
if (!qData[x].size())
continue;
y = 0;
{
int x;
int rm = fegetround();
- for(x = 0; x < 4; x++)
+ for (x = 0; x < 4; x++)
if (m5_round_ops[x] == rm)
return x;
abort();
// 2 == solaris, 3 == freebsd
data = elf_rawdata(section, NULL);
assert(data->d_buf);
- if(ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
osAbi = htole(((uint32_t*)data->d_buf)[4]);
else
osAbi = htobe(((uint32_t*)data->d_buf)[4]);
result->_programHeaderCount = ehdr.e_phnum;
//Record the size of each entry
result->_programHeaderSize = ehdr.e_phentsize;
- if(result->_programHeaderCount) //If there is a program header table
+ if (result->_programHeaderCount) //If there is a program header table
{
//Figure out the virtual address of the header table in the
//final memory image. We use the program headers themselves
GElf_Phdr phdr;
uint64_t e_phoff = ehdr.e_phoff;
result->_programHeaderTable = 0;
- for(int hdrnum = 0; hdrnum < result->_programHeaderCount; hdrnum++)
+ for (int hdrnum = 0; hdrnum < result->_programHeaderCount; hdrnum++)
{
gelf_getphdr(elf, hdrnum, &phdr);
//Check if we've found the segment with the headers in it
- if(phdr.p_offset <= e_phoff &&
+ if (phdr.p_offset <= e_phoff &&
phdr.p_offset + phdr.p_filesz > e_phoff)
{
result->_programHeaderTable =
squares += hs->squares;
samples += hs->samples;
- while(bucket_size > hs->bucket_size)
+ while (bucket_size > hs->bucket_size)
hs->grow_up();
- while(bucket_size < hs->bucket_size)
+ while (bucket_size < hs->bucket_size)
grow_up();
for (uint32_t i = 0; i < b_size; i++)
if (_repeatEvent)
cpu->schedule(this, curTick() + _interval);
- if(cpu->switchedOut()) {
+ if (cpu->switchedOut()) {
return;
}
assert(tid < numThreads);
AddressMonitor &monitor = addressMonitor[tid];
- if(monitor.gotWakeup == false) {
+ if (monitor.gotWakeup == false) {
int block_size = cacheLineSize();
uint64_t mask = ~((uint64_t)(block_size - 1));
bool AddressMonitor::doMonitor(PacketPtr pkt) {
assert(pkt->req->hasPaddr());
- if(armed && waiting) {
- if(pAddr == pkt->getAddr()) {
+ if (armed && waiting) {
+ if (pAddr == pkt->getAddr()) {
DPRINTF(Mwait,"pAddr=0x%lx invalidated: waking up core\n",
pkt->getAddr());
waiting = false;
_buf += ret;
break;
}
- } while(_size);
+ } while (_size);
}
APPLY_IREG(r13, INTREG_R13); \
APPLY_IREG(r14, INTREG_R14); \
APPLY_IREG(r15, INTREG_R15); \
- } while(0)
+ } while (0)
#define FOREACH_SREG() \
do { \
APPLY_SREG(cr8, MISCREG_CR8); \
APPLY_SREG(efer, MISCREG_EFER); \
APPLY_SREG(apic_base, MISCREG_APIC_BASE); \
- } while(0)
+ } while (0)
#define FOREACH_DREG() \
do { \
APPLY_DREG(db[3], MISCREG_DR3); \
APPLY_DREG(dr6, MISCREG_DR6); \
APPLY_DREG(dr7, MISCREG_DR7); \
- } while(0)
+ } while (0)
#define FOREACH_SEGMENT() \
do { \
APPLY_SEGMENT(ss, MISCREG_SS - MISCREG_SEG_SEL_BASE); \
APPLY_SEGMENT(tr, MISCREG_TR - MISCREG_SEG_SEL_BASE); \
APPLY_SEGMENT(ldt, MISCREG_TSL - MISCREG_SEG_SEL_BASE); \
- } while(0)
+ } while (0)
#define FOREACH_DTABLE() \
do { \
APPLY_DTABLE(gdt, MISCREG_TSG - MISCREG_SEG_SEL_BASE); \
APPLY_DTABLE(idt, MISCREG_IDTR - MISCREG_SEG_SEL_BASE); \
- } while(0)
+ } while (0)
template<typename STRUCT, typename ENTRY>
static STRUCT *newVarStruct(size_t entries)
execSeqNum++;
/* Correctly size the output before writing */
- if(output_index == 0) insts_out.resize(outputWidth);
+ if (output_index == 0) insts_out.resize(outputWidth);
/* Push into output */
insts_out.insts[output_index] = output_inst;
output_index++;
fatal("All listeners are disabled!");
int port = 8000;
- while(!native_listener.listen(port, true))
+ while (!native_listener.listen(port, true))
{
DPRINTF(GDBMisc, "Can't bind port %d\n", port);
port++;
bool
checkReg(const char * regName, T &val, T &realVal)
{
- if(val != realVal)
+ if (val != realVal)
{
DPRINTFN("Register %s should be %#x but is %#x.\n",
regName, realVal, val);
if (iqPolicy == Partitioned) {
maxEntries[tid] = numEntries / active_threads;
- } else if(iqPolicy == Threshold && active_threads == 1) {
+ } else if (iqPolicy == Threshold && active_threads == 1) {
maxEntries[tid] = numEntries;
}
}
{
typename InstQueue::iterator it;
warn("Skidbuffer contents:\n");
- for(it = skidBuffer[tid].begin(); it != skidBuffer[tid].end(); it++)
+ for (it = skidBuffer[tid].begin(); it != skidBuffer[tid].end(); it++)
{
warn("[tid:%u]: %s [sn:%i].\n", tid,
(*it)->staticInst->disassemble(inst->instAddr()),
RAS[tid].restore(pred_hist.front().RASIndex,
pred_hist.front().RASTarget);
- } else if(pred_hist.front().wasCall && pred_hist.front().pushedRAS) {
+ } else if (pred_hist.front().wasCall && pred_hist.front().pushedRAS) {
// Was a call but predicated false. Pop RAS here
DPRINTF(Branch, "[tid: %i] Squashing"
" Call [sn:%i] PC: %s Popping RAS\n", tid,
for (ThreadID tid = 0; tid < numThreads; tid++) {
if (tid != sender) {
- if(getCpuAddrMonitor(tid)->doMonitor(pkt)) {
+ if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
wakeup(tid);
}
// X86 ISA: Snooping an invalidation for monitor/mwait
AtomicSimpleCPU *cpu = (AtomicSimpleCPU *)(&owner);
for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
- if(cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
+ if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
cpu->wakeup(tid);
}
}
//across a cache line boundary.
Addr secondAddr = roundDown(addr + size - 1, cacheLineSize());
- if(secondAddr > addr)
+ if (secondAddr > addr)
size = secondAddr - addr;
dcache_latency = 0;
req->taskId(taskId());
- while(1) {
+ while (1) {
req->setVirt(0, addr, size, flags, dataMasterId(), thread->pcState().instAddr());
// translate to physical address
// like the I cache. It should be flushed, and when that works
// this code should be uncommented.
//Fetch more instruction memory if necessary
- //if(decoder.needMoreBytes())
+ //if (decoder.needMoreBytes())
//{
icache_access = true;
Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
}
}
- if(fault != NoFault || !t_info.stayAtPC)
+ if (fault != NoFault || !t_info.stayAtPC)
advancePC(fault);
}
//Predecode, ie bundle up an ExtMachInst
//If more fetch data is needed, pass it in.
Addr fetchPC = (pcState.instAddr() & PCMask) + t_info.fetchOffset;
- //if(decoder->needMoreBytes())
+ //if (decoder->needMoreBytes())
decoder->moreBytes(pcState, fetchPC, inst);
//else
// decoder->process();
{
for (ThreadID tid = 0; tid < numThreads; tid++) {
if (tid != sender) {
- if(getCpuAddrMonitor(tid)->doMonitor(pkt)) {
+ if (getCpuAddrMonitor(tid)->doMonitor(pkt)) {
wakeup(tid);
}
TheISA::handleLockedSnoop(threadInfo[tid]->thread, pkt,
TimingSimpleCPU::DcachePort::recvFunctionalSnoop(PacketPtr pkt)
{
for (ThreadID tid = 0; tid < cpu->numThreads; tid++) {
- if(cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
+ if (cpu->getCpuAddrMonitor(tid)->doMonitor(pkt)) {
cpu->wakeup(tid);
}
}
olddir = dir[number];
dim[number] = pkt->get<uint64_t>();
dir[number] = dim[number] & drir;
- for(int x = 0; x < Tsunami::Max_CPUs; x++)
+ for (int x = 0; x < Tsunami::Max_CPUs; x++)
{
bitvector = ULL(1) << x;
// Figure out which bits have changed
if ((dim[number] & bitvector) != (olddim & bitvector))
{
// The bit is now set and it wasn't before (set)
- if((dim[number] & bitvector) && (dir[number] & bitvector))
+ if ((dim[number] & bitvector) && (dir[number] & bitvector))
{
tsunami->intrctrl->post(number, TheISA::INTLEVEL_IRQ1, x);
DPRINTF(Tsunami, "dim write resulting in posting dir"
if (pkt->get<uint64_t>() & 0x10000000)
supportedWrite = true;
- if(!supportedWrite)
+ if (!supportedWrite)
panic("TSDEV_CC_MISC write not implemented\n");
break;
case TSDEV_CC_DIM2:
case TSDEV_CC_DIM3:
int number;
- if(regnum == TSDEV_CC_DIM0)
+ if (regnum == TSDEV_CC_DIM0)
number = 0;
- else if(regnum == TSDEV_CC_DIM1)
+ else if (regnum == TSDEV_CC_DIM1)
number = 1;
- else if(regnum == TSDEV_CC_DIM2)
+ else if (regnum == TSDEV_CC_DIM2)
number = 2;
else
number = 3;
olddir = dir[number];
dim[number] = pkt->get<uint64_t>();
dir[number] = dim[number] & drir;
- for(int x = 0; x < 64; x++)
+ for (int x = 0; x < 64; x++)
{
bitvector = ULL(1) << x;
// Figure out which bits have changed
if ((dim[number] & bitvector) != (olddim & bitvector))
{
// The bit is now set and it wasn't before (set)
- if((dim[number] & bitvector) && (dir[number] & bitvector))
+ if ((dim[number] & bitvector) && (dir[number] & bitvector))
{
tsunami->intrctrl->post(number, TheISA::INTLEVEL_IRQ1, x);
DPRINTF(Tsunami, "posting dir interrupt to cpu 0\n");
assert(size <= Tsunami::Max_CPUs);
drir |= bitvector;
- for(int i=0; i < size; i++) {
+ for (int i=0; i < size; i++) {
dir[i] = dim[i] & drir;
if (dim[i] & bitvector) {
tsunami->intrctrl->post(i, TheISA::INTLEVEL_IRQ1, interrupt);
if (drir & bitvector)
{
drir &= ~bitvector;
- for(int i=0; i < size; i++) {
+ for (int i=0; i < size; i++) {
if (dir[i] & bitvector) {
tsunami->intrctrl->clear(i, TheISA::INTLEVEL_IRQ1, interrupt);
DPRINTF(Tsunami, "clearing dir interrupt to cpu %d,"
* bitwise AND with those two numbers results in an integer with all bits
* cleared.
*/
- if(numPlanes & planeMask)
+ if (numPlanes & planeMask)
fatal("Number of planes is not a power of 2 in flash device.\n");
}
DPRINTF(FlashDevice, "Plane %d is busy for %d ticks\n", count,
time[count]);
- if (time[count] != 0) {
+ if (time[count] != 0) {
struct CallBackEntry cbe;
/**
uint32_t size)
{
/** read from image, and get to memory */
- for(int count = 0; count < (size / SectorSize); count++)
+ for (int count = 0; count < (size / SectorSize); count++)
flashDisk->read(&(readaddr[SectorSize*count]), (offset /
SectorSize) + count);
}
uint32_t size)
{
/** Get from fifo and write to image*/
- for(int count = 0; count < (size / SectorSize); count++)
+ for (int count = 0; count < (size / SectorSize); count++)
flashDisk->write(&(writeaddr[SectorSize * count]),
(offset / SectorSize) + count);
}
memReadCallback = new MakeCallback<UFSHostDevice,
&UFSHostDevice::readCallback>(this);
- for(int count = 0; count < lunAvail; count++) {
+ for (int count = 0; count < lunAvail; count++) {
UFSDevice[count] = new UFSSCSIDevice(p, count, transferDoneCallback,
memReadCallback);
}
uint8_t this_lun = 0;
//while we haven't found the right lun, keep searching
- while((this_lun < lunAvail) && !UFSDevice[this_lun]->finishedCommand())
+ while ((this_lun < lunAvail) && !UFSDevice[this_lun]->finishedCommand())
++this_lun;
if (this_lun < lunAvail) {
}
/**done, generate interrupt if we havent got one already*/
- if(!(UFSHCIMem.ORInterruptStatus & 0x01)) {
+ if (!(UFSHCIMem.ORInterruptStatus & 0x01)) {
UFSHCIMem.ORInterruptStatus |= UTPTransferREQCOMPL;
generateInterrupt();
}
- if(!readDoneEvent.empty()) {
+ if (!readDoneEvent.empty()) {
readDoneEvent.pop_front();
}
}
if (toDisk) {
++writePendingNum;
- while(!writeDoneEvent.empty() && (writeDoneEvent.front().when()
+ while (!writeDoneEvent.empty() && (writeDoneEvent.front().when()
< curTick()))
writeDoneEvent.pop_front();
uint8_t this_lun = 0;
//while we haven't found the right lun, keep searching
- while((this_lun < lunAvail) && !UFSDevice[this_lun]->finishedRead())
+ while ((this_lun < lunAvail) && !UFSDevice[this_lun]->finishedRead())
++this_lun;
DPRINTF(UFSHostDevice, "Found LUN %d messages pending for clean: %d\n",
Intel8254Timer::Counter::latchCount()
{
// behave like a real latch
- if(!latch_on) {
+ if (!latch_on) {
latch_on = true;
read_byte = LSB;
latched_count = currentCount();
void
Intel8254Timer::Counter::setMode(int mode_val)
{
- if(mode_val != InitTc && mode_val != RateGen &&
+ if (mode_val != InitTc && mode_val != RateGen &&
mode_val != SquareWave)
panic("PIT mode %#x is not implemented: \n", mode_val);
olddir = dir[number];
dim[number] = pkt->get<uint64_t>();
dir[number] = dim[number] & drir;
- for(int x = 0; x < Malta::Max_CPUs; x++)
+ for (int x = 0; x < Malta::Max_CPUs; x++)
{
bitvector = ULL(1) << x;
// Figure out which bits have changed
if ((dim[number] & bitvector) != (olddim & bitvector))
{
// The bit is now set and it wasn't before (set)
- if((dim[number] & bitvector) && (dir[number] & bitvector))
+ if ((dim[number] & bitvector) && (dir[number] & bitvector))
{
malta->intrctrl->post(number, TheISA::INTLEVEL_IRQ1, x);
DPRINTF(Malta, "dim write resulting in posting dir"
if (pkt->get<uint64_t>() & 0x10000000)
supportedWrite = true;
- if(!supportedWrite)
+ if (!supportedWrite)
panic("TSDEV_CC_MISC write not implemented\n");
break;
case TSDEV_CC_DIM2:
case TSDEV_CC_DIM3:
int number;
- if(regnum == TSDEV_CC_DIM0)
+ if (regnum == TSDEV_CC_DIM0)
number = 0;
- else if(regnum == TSDEV_CC_DIM1)
+ else if (regnum == TSDEV_CC_DIM1)
number = 1;
- else if(regnum == TSDEV_CC_DIM2)
+ else if (regnum == TSDEV_CC_DIM2)
number = 2;
else
number = 3;
olddir = dir[number];
dim[number] = pkt->get<uint64_t>();
dir[number] = dim[number] & drir;
- for(int x = 0; x < 64; x++)
+ for (int x = 0; x < 64; x++)
{
bitvector = ULL(1) << x;
// Figure out which bits have changed
if ((dim[number] & bitvector) != (olddim & bitvector))
{
// The bit is now set and it wasn't before (set)
- if((dim[number] & bitvector) && (dir[number] & bitvector))
+ if ((dim[number] & bitvector) && (dir[number] & bitvector))
{
malta->intrctrl->post(number, TheISA::INTLEVEL_IRQ1, x);
DPRINTF(Malta, "posting dir interrupt to cpu 0\n");
uint64_t size = sys->threadContexts.size();
assert(size <= Malta::Max_CPUs);
- for(int i=0; i < size; i++) {
+ for (int i=0; i < size; i++) {
//Note: Malta does not use index, but this was added to use the pre-existing implementation
malta->intrctrl->post(i, interrupt, 0);
DPRINTF(Malta, "posting interrupt to cpu %d,"
uint64_t size = sys->threadContexts.size();
assert(size <= Malta::Max_CPUs);
- for(int i=0; i < size; i++) {
+ for (int i=0; i < size; i++) {
//Note: Malta does not use index, but this was added to use the pre-existing implementation
malta->intrctrl->clear(i, interrupt, 0);
DPRINTF(Malta, "clearing interrupt to cpu %d,"
VirtDescriptor *desc(this);
do {
desc->update();
- } while((desc = desc->next()) != NULL && desc != this);
+ } while ((desc = desc->next()) != NULL && desc != this);
if (desc == this)
panic("Loop in descriptor chain!\n");
const VirtDescriptor *desc(this);
do {
desc->dump();
- } while((desc = desc->next()) != NULL);
+ } while ((desc = desc->next()) != NULL);
}
VirtDescriptor *
} else {
offset -= desc->size();
}
- } while((desc = desc->next()) != NULL && desc->isIncoming() && size > 0);
+ } while ((desc = desc->next()) != NULL && desc->isIncoming() && size > 0);
if (size != 0) {
panic("Failed to read %i bytes from chain of %i bytes @ offset %i\n",
} else {
offset -= desc->size();
}
- } while((desc = desc->next()) != NULL && size > 0);
+ } while ((desc = desc->next()) != NULL && size > 0);
if (size != 0) {
panic("Failed to write %i bytes into chain of %i bytes @ offset %i\n",
const VirtDescriptor *desc(this);
do {
size += desc->size();
- } while((desc = desc->next()) != NULL);
+ } while ((desc = desc->next()) != NULL);
return size;
}
// Consume all pending descriptors from the input queue.
VirtDescriptor *d;
- while((d = consumeDescriptor()) != NULL)
+ while ((d = consumeDescriptor()) != NULL)
onNotifyDescriptor(d);
}
bool found = false;
auto i = transmitList.begin();
- while(i != transmitList.end() && !found) {
+ while (i != transmitList.end() && !found) {
if (pkt->checkFunctional((*i).pkt)) {
pkt->makeResponse();
found = true;
// Lookup pc-based information
StrideEntry *entry;
- if(pcTableHit(pc, is_secure, master_id, entry)) {
+ if (pcTableHit(pc, is_secure, master_id, entry)) {
// Hit in table
int new_stride = pkt_addr - entry->lastAddr;
bool stride_match = (new_stride == entry->stride);
if (memSchedPolicy == Enums::fcfs) {
// check if there is a packet going to a free rank
- for(auto i = queue.begin(); i != queue.end() ; ++i) {
+ for (auto i = queue.begin(); i != queue.end() ; ++i) {
DRAMPacket* dram_pkt = *i;
if (ranks[dram_pkt->rank]->isAvailable()) {
queue.erase(i);
bank_ref.colAllowedAt = std::max(act_tick + tRCD, bank_ref.colAllowedAt);
// start by enforcing tRRD
- for(int i = 0; i < banksPerRank; i++) {
+ for (int i = 0; i < banksPerRank; i++) {
// next activate to any bank in this rank must not happen
// before tRRD
if (bankGroupArch && (bank_ref.bankgr == rank_ref.banks[i].bankgr)) {
DPRINTF(DRAM, "Enforcing tXAW with X = %d, next activate "
"no earlier than %llu\n", activationLimit,
rank_ref.actTicks.back() + tXAW);
- for(int j = 0; j < banksPerRank; j++)
+ for (int j = 0; j < banksPerRank; j++)
// next activate must not happen before end of window
rank_ref.banks[j].actAllowedAt =
std::max(rank_ref.actTicks.back() + tXAW,
// update the time for the next read/write burst for each
// bank (add a max with tCCD/tCCD_L here)
Tick cmd_dly;
- for(int j = 0; j < ranksPerChannel; j++) {
- for(int i = 0; i < banksPerRank; i++) {
+ for (int j = 0; j < ranksPerChannel; j++) {
+ for (int i = 0; i < banksPerRank; i++) {
// next burst to same bank group in this rank must not happen
// before tCCD_L. Different bank group timing requirement is
// tBURST; Add tCS for different ranks
// bank in question
vector<bool> got_waiting(ranksPerChannel * banksPerRank, false);
for (const auto& p : queue) {
- if(p->rankRef.isAvailable())
+ if (p->rankRef.isAvailable())
got_waiting[p->bankId] = true;
}
vector<ContextID> lal_cid;
UNSERIALIZE_CONTAINER(lal_addr);
UNSERIALIZE_CONTAINER(lal_cid);
- for(size_t i = 0; i < lal_addr.size(); ++i) {
+ for (size_t i = 0; i < lal_addr.size(); ++i) {
const auto& m = addrMap.find(lal_addr[i]);
m->second->addLockedAddr(LockedAddr(lal_addr[i], lal_cid[i]));
}
BaseSlavePort&
BaseMasterPort::getSlavePort() const
{
- if(_baseSlavePort == NULL)
+ if (_baseSlavePort == NULL)
panic("Cannot getSlavePort on master port %s that is not connected\n",
name());
BaseMasterPort&
BaseSlavePort::getMasterPort() const
{
- if(_baseMasterPort == NULL)
+ if (_baseMasterPort == NULL)
panic("Cannot getMasterPort on slave port %s that is not connected\n",
name());
// check second section
zero = false;
- for(int i = m_filter_size / 2; i < m_filter_size; ++i) {
+ for (int i = m_filter_size / 2; i < m_filter_size; ++i) {
// get intersection of signatures
m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
zero = zero || m_temp_filter[i];
{
// assumes both filters are the same size!
H3BloomFilter * temp = (H3BloomFilter*) other_filter;
- for(int i = 0; i < m_filter_size; ++i){
+ for (int i = 0; i < m_filter_size; ++i){
m_filter[i] |= (*temp)[i];
}
}
int result = 0;
for (int i = 0; i < 64; i++) {
- if(val&mask) result ^= H3[i][index];
+ if (val&mask) result ^= H3[i][index];
val = val >> 1;
}
return result;
{
// assumes both filters are the same size!
MultiBitSelBloomFilter * temp = (MultiBitSelBloomFilter*) other_filter;
- for(int i = 0; i < m_filter_size; ++i){
+ for (int i = 0; i < m_filter_size; ++i){
m_filter[i] |= (*temp)[i];
}
}
for (int i = 0; i < m_filter_size; i++) {
m_filter[i] = 0;
}
- for(int i=0; i < m_page_filter_size; ++i){
+ for (int i=0; i < m_page_filter_size; ++i){
m_page_filter[i] = 0;
}
}
count += m_filter[i];
}
- for(int i=0; i < m_page_filter_size; ++i) {
+ for (int i=0; i < m_page_filter_size; ++i) {
count += m_page_filter[i] = 0;
}
{
// assumes both filters are the same size!
NonCountingBloomFilter * temp = (NonCountingBloomFilter*) other_filter;
- for(int i = 0; i < m_filter_size; ++i){
+ for (int i = 0; i < m_filter_size; ++i){
m_filter[i] |= (*temp)[i];
}
}
void
MessageBuffer::reanalyzeList(list<MsgPtr> <, Tick schdTick)
{
- while(!lt.empty()) {
+ while (!lt.empty()) {
m_msg_counter++;
MsgPtr m = lt.front();
m->setLastEnqueueTime(schdTick);
m_vnet_type.resize(m_virtual_networks);
- for(int i = 0 ; i < m_virtual_networks ; i++)
+ for (int i = 0 ; i < m_virtual_networks ; i++)
{
if (m_vnet_type_names[i] == "response")
m_vnet_type[i] = DATA_VNET_; // carries data (and ctrl) packets
m_topology_ptr->createLinks(this);
// FaultModel: declare each router to the fault model
- if(isFaultModelEnabled()){
+ if (isFaultModelEnabled()){
for (vector<Router_d*>::const_iterator i= m_routers.begin();
i != m_routers.end(); ++i) {
Router_d* router = safe_cast<Router_d*>(*i);
for (int i = 0; i < m_vc_per_vnet; i++) {
int delta = m_vc_allocator[vnet];
m_vc_allocator[vnet]++;
- if(m_vc_allocator[vnet] == m_vc_per_vnet)
+ if (m_vc_allocator[vnet] == m_vc_per_vnet)
m_vc_allocator[vnet] = 0;
if (m_out_vc_state[(vnet*m_vc_per_vnet) + delta]->isInState(
{
m_network_ptr = network_ptr;
- for(int i = 0;i < m_virtual_networks;++i) {
+ for (int i = 0;i < m_virtual_networks;++i) {
m_pending_message_count.push_back(0);
}
}
m_round_robin_start = 0;
}
- if(m_pending_message_count[vnet] > 0) {
+ if (m_pending_message_count[vnet] > 0) {
// for all input ports, use round robin scheduling
for (int counter = 0; counter < m_in.size(); counter++) {
// Round robin scheduling
{
m_touched_by.add(cpu);
m_total++;
- if(type == RubyRequestType_ATOMIC) {
+ if (type == RubyRequestType_ATOMIC) {
m_atomics++;
- } else if(type == RubyRequestType_LD){
+ } else if (type == RubyRequestType_LD){
m_loads++;
} else if (type == RubyRequestType_ST){
m_stores++;
std::vector<MsgVecType*> wokeUpMsgVecs;
MsgBufType wokeUpMsgBufs;
- if(m_waiting_buffers.size() > 0) {
+ if (m_waiting_buffers.size() > 0) {
for (WaitingBufType::iterator buf_iter = m_waiting_buffers.begin();
buf_iter != m_waiting_buffers.end();
++buf_iter) {
m_num_sets = p->size/p->block_size/p->assoc;
m_assoc = p->assoc;
m_last_ref_ptr = new Tick*[m_num_sets];
- for(unsigned i = 0; i < m_num_sets; i++){
+ for (unsigned i = 0; i < m_num_sets; i++){
m_last_ref_ptr[i] = new Tick[m_assoc];
- for(unsigned j = 0; j < m_assoc; j++){
+ for (unsigned j = 0; j < m_assoc; j++){
m_last_ref_ptr[i][j] = 0;
}
}
unsigned int bank = mapIndexToBank(idx);
assert(bank < banks);
- if(busyBanks[bank].endAccess >= curTick()) {
+ if (busyBanks[bank].endAccess >= curTick()) {
if (busyBanks[bank].startAccess == curTick() &&
busyBanks[bank].idx == idx) {
// this is the same reservation (can happen when
assert(address == makeLineAddress(address));
int64_t cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
- if(loc == -1) return NULL;
+ if (loc == -1) return NULL;
return m_cache[cacheSet][loc];
}
assert(address == makeLineAddress(address));
int64_t cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
- if(loc == -1) return NULL;
+ if (loc == -1) return NULL;
return m_cache[cacheSet][loc];
}
int64_t cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
- if(loc != -1)
+ if (loc != -1)
m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
}
int64_t cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
- if(loc != -1) {
+ if (loc != -1) {
if (m_replacementPolicy_ptr->useOccupancy()) {
(static_cast<WeightedLRUPolicy*>(m_replacementPolicy_ptr))->
touch(cacheSet, loc, curTick(), occupancy);
assert(set < m_cache_num_sets);
assert(loc < m_cache_assoc);
int ret = 0;
- if(m_cache[set][loc] != NULL) {
+ if (m_cache[set][loc] != NULL) {
ret = m_cache[set][loc]->getNumValidBlocks();
assert(ret >= 0);
}
int tmp_assoc = m_effective_assoc;
while (true) {
tmp_assoc /= 2;
- if(!tmp_assoc) break;
+ if (!tmp_assoc) break;
m_num_levels++;
}
assert(m_num_levels < sizeof(unsigned int)*4);
RubyMemoryControl::drain()
{
DPRINTF(RubyMemory, "MemoryController drain\n");
- if(m_event.scheduled()) {
+ if (m_event.scheduled()) {
deschedule(m_event);
}
return DrainState::Drained;
inline ENTRY*
TBETable<ENTRY>::lookup(Addr address)
{
- if(m_map.find(address) != m_map.end()) return &(m_map.find(address)->second);
+ if (m_map.find(address) != m_map.end()) return &(m_map.find(address)->second);
return NULL;
}
return RequestStatus_BufferFull;
}
- if(m_controller->isBlocked(line_addr) &&
+ if (m_controller->isBlocked(line_addr) &&
request_type != RubyRequestType_Locked_RMW_Write) {
return RequestStatus_Aliased;
}
// Not valid for Network_test protocl
//
bool success = true;
- if(!m_usingNetworkTester)
+ if (!m_usingNetworkTester)
success = handleLlsc(address, request);
if (request->m_type == RubyRequestType_Locked_RMW_Read) {
// This is a Kernel Begin leave handling to
// virtual xCoalescer::makeRequest
return RequestStatus_Issued;
- }else if(pkt->req->isRelease()) {
+ }else if (pkt->req->isRelease()) {
// This is a Kernel End leave handling to
// virtual xCoalescer::makeRequest
// If we are here then we didn't call
std::pair<int,AtomicOpFunctor *> tmpAtomicOp(tmpOffset,
tmpPkt->getAtomicOp());
atomicOps.push_back(tmpAtomicOp);
- } else if(tmpPkt->isWrite()) {
+ } else if (tmpPkt->isWrite()) {
dataBlock.setData(tmpPkt->getPtr<uint8_t>(),
tmpOffset, tmpSize);
}
void
GPUCoalescer::recordCPReadCallBack(MachineID myMachID, MachineID senderMachID)
{
- if(myMachID == senderMachID) {
+ if (myMachID == senderMachID) {
CP_TCPLdHits++;
- } else if(machineIDToMachineType(senderMachID) == MachineType_TCP) {
+ } else if (machineIDToMachineType(senderMachID) == MachineType_TCP) {
CP_TCPLdTransfers++;
- } else if(machineIDToMachineType(senderMachID) == MachineType_TCC) {
+ } else if (machineIDToMachineType(senderMachID) == MachineType_TCC) {
CP_TCCLdHits++;
} else {
CP_LdMiss++;
void
GPUCoalescer::recordCPWriteCallBack(MachineID myMachID, MachineID senderMachID)
{
- if(myMachID == senderMachID) {
+ if (myMachID == senderMachID) {
CP_TCPStHits++;
- } else if(machineIDToMachineType(senderMachID) == MachineType_TCP) {
+ } else if (machineIDToMachineType(senderMachID) == MachineType_TCP) {
CP_TCPStTransfers++;
- } else if(machineIDToMachineType(senderMachID) == MachineType_TCC) {
+ } else if (machineIDToMachineType(senderMachID) == MachineType_TCC) {
CP_TCCStHits++;
} else {
CP_StMiss++;
// Not valid for Network_test protocl
//
bool success = true;
- if(!m_usingNetworkTester)
+ if (!m_usingNetworkTester)
success = handleLlsc(address, request);
if (request->m_type == RubyRequestType_Locked_RMW_Read) {
// isKernel + isRelease
insertKernel(pkt->req->contextId(), pkt);
wbL1();
- if(m_outstanding_wb == 0) {
+ if (m_outstanding_wb == 0) {
for (auto it = kernelEndList.begin(); it != kernelEndList.end(); it++) {
newKernelEnds.push_back(it->first);
}
{
int size = m_dataCache_ptr->getNumBlocks();
// Walk the cache
- for(int i = 0; i < size; i++) {
+ for (int i = 0; i < size; i++) {
Addr addr = m_dataCache_ptr->getAddressAtIdx(i);
// Evict Read-only data
std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
m_outstanding_inv++;
}
// Walk the cache
- for(int i = 0; i< size; i++) {
+ for (int i = 0; i< size; i++) {
Addr addr = m_dataCache_ptr->getAddressAtIdx(i);
// Write dirty data back
std::shared_ptr<RubyRequest> msg = std::make_shared<RubyRequest>(
: AbstractReplacementPolicy(p), m_cache(p->cache)
{
m_last_occ_ptr = new int*[m_num_sets];
- for(unsigned i = 0; i < m_num_sets; i++){
+ for (unsigned i = 0; i < m_num_sets; i++){
m_last_occ_ptr[i] = new int[m_assoc];
- for(unsigned j = 0; j < m_assoc; j++){
+ for (unsigned j = 0; j < m_assoc; j++){
m_last_occ_ptr[i][j] = 0;
}
}
bool found = false;
auto i = transmitList.begin();
- while(i != transmitList.end() && !found) {
+ while (i != transmitList.end() && !found) {
if (pkt->checkFunctional((*i).pkt)) {
pkt->makeResponse();
found = true;
++level;
// Variable stack_dist is updated only
// when arriving from Left.
- if(from_left) {
+ if (from_left) {
stack_dist += node->sumRight;
}
// Recursively call the getSum operation till the
// root node is reached
- if(node->parent) {
+ if (node->parent) {
stack_dist = getSum(node->parent, node->isLeftNode,
node->sumLeft + node->sumRight,
stack_dist, level);
mo1 = dynamic_cast<MemObject*>(o1);
mo2 = dynamic_cast<MemObject*>(o2);
- if(mo1 == NULL || mo2 == NULL) {
+ if (mo1 == NULL || mo2 == NULL) {
panic ("Error casting SimObjects %s and %s to MemObject", o1->name(),
o2->name());
}
do { \
static const char msg[] = m; \
atomic_write(STDERR_FILENO, msg, sizeof(msg) - 1); \
- } while(0)
+ } while (0)
void
print_backtrace()
{
// Check supplied list of domains for sanity and add them to the
// domain ID -> domain* hash
- for(auto dit = p->domains.begin(); dit != p->domains.end(); ++dit) {
+ for (auto dit = p->domains.begin(); dit != p->domains.end(); ++dit) {
SrcClockDomain *d = *dit;
DomainID domain_id = d->domainID();
UNSERIALIZE_SCALAR(enableHandler);
- if(temp != enableHandler) {
+ if (temp != enableHandler) {
warn("DVFS: Forcing enable handler status to unserialized value of %d",
enableHandler);
}
do { \
event.unserializeSection(cp, #event); \
eventQueue()->checkpointReschedule(&event); \
- } while(0)
+ } while (0)
#define SERIALIZE_OBJ(obj) obj.serializeSection(cp, #obj)
#define UNSERIALIZE_OBJ(obj) obj.unserializeSection(cp, #obj)
int mem_val = *((int *)buf);
delete buf;
- if(val != mem_val) {
+ if (val != mem_val) {
DPRINTF(SyscallVerbose, "sys_futex: FUTEX_WAKE, read: %d, "
"expected: %d\n", mem_val, val);
return -OS::TGT_EWOULDBLOCK;
tcWaitList->pop_front();
wokenUp++;
}
- if(tcWaitList->empty()) {
+ if (tcWaitList->empty()) {
futex_map.erase(uaddr);
delete tcWaitList;
}
int index = 0;
Addr taddr = (Addr)process->getSyscallArg(tc, index);
- if(taddr != 0) {
+ if (taddr != 0) {
typename OS::time_t t = sec;
t = TheISA::htog(t);
SETranslatingPortProxy &p = tc->getMemProxy();
barrier(CLK_LOCAL_MEM_FENCE);
- if(get_local_id(0) == 0) {
+ if (get_local_id(0) == 0) {
int _lcount = atomic_load(&lcount);
atomic_fetch_add((atomic_int *)chars_decoded, _lcount);
}
setupDataStructs()
{
msg = (char *)memalign(CACHE_LINE_SIZE, (grid_size + 1) * sizeof(char));
- if(msg == NULL) {
+ if (msg == NULL) {
printf("%s:%d: error: %s\n", __FILE__, __LINE__,
"could not allocate host buffers\n");
exit(-1);
delete platforms;
}
- if(NULL == platform) {
+ if (NULL == platform) {
printf("NULL platform found so Exiting Application.\n");
return FAILURE;
}
// invalidate flags in this cpu's cache
pthread_create(&threads[0], NULL, DoWork1, NULL);
- while(wait[0]);
+ while (wait[0]);
// launch thread to invalidate address being monitored
pthread_create(&threads[0], NULL, DoWork2, NULL);
int mwait_cnt = 0;
do {
pthread_mutex_lock (&mutex);
- if(flags[0] != 2) {
+ if (flags[0] != 2) {
pthread_mutex_unlock (&mutex);
__builtin_ia32_mwait(0, 0);
} else {
pthread_mutex_unlock (&mutex);
}
mwait_cnt++;
- } while(flags[0] != 2 && mwait_cnt < NUM_TRIES);
+ } while (flags[0] != 2 && mwait_cnt < NUM_TRIES);
// test may hang if mwait is not working
- if(flags[0]==2) {
+ if (flags[0]==2) {
printf("mwait regression PASSED, flags[0] = %d\n", flags[0]);
} else {
printf("mwait regression FAILED, flags[0] = %d\n", flags[0]);
highestInfo = cargv;
os << obuf;
sp += 8;
- } while(cargv);
+ } while (cargv);
//Output the envp pointers
int envCount = 0;
sp, envCount++, cenvp);
os << obuf;
sp += 8;
- } while(cenvp);
+ } while (cenvp);
uint64_t auxType, auxVal;
do {
auxType = ptrace(PTRACE_PEEKDATA, pid, sp, 0);
sprintf(obuf, "0x%016lx: Auxiliary vector = {0x%016lx, 0x%016lx}\n",
sp - 16, auxType, auxVal);
os << obuf;
- } while(auxType != 0 || auxVal != 0);
+ } while (auxType != 0 || auxVal != 0);
//Print out the argument strings, environment strings, and file name.
string current;
uint64_t buf;
for (int i = 0; i < sizeof(uint64_t); i++) {
unsigned char byte = buf & 0xFF;
if (!foundOpcode) {
- if(!(byte == 0x66 || //operand override
+ if (!(byte == 0x66 || //operand override
byte == 0x67 || //address override
byte == 0x2E || //cs
byte == 0x3E || //ds
do {
ptraceSingleStep();
newPC = getPC();
- } while(newPC == origPC);
+ } while (newPC == origPC);
}
}
cargv = ptrace(PTRACE_PEEKDATA, pid, sp, 0);
sprintf(obuf, "0x%08x: argv[%d] = 0x%08x\n",
sp, argCount++, cargv);
- if(cargv)
- if(highestInfo < cargv)
+ if (cargv)
+ if (highestInfo < cargv)
highestInfo = cargv;
os << obuf;
sp += 4;
- } while(cargv);
+ } while (cargv);
//Output the envp pointers
int envCount = 0;
sp, envCount++, cenvp);
os << obuf;
sp += 4;
- } while(cenvp);
+ } while (cenvp);
uint32_t auxType, auxVal;
do {
auxType = ptrace(PTRACE_PEEKDATA, pid, sp, 0);
sprintf(obuf, "0x%08x: Auxiliary vector = {0x%08x, 0x%08x}\n",
sp - 8, auxType, auxVal);
os << obuf;
- } while(auxType != 0 || auxVal != 0);
+ } while (auxType != 0 || auxVal != 0);
//Print out the argument strings, environment strings, and file name.
string current;
uint32_t buf;
}
sp += 4;
clearedInitialPadding = clearedInitialPadding || buf != 0;
- } while(!clearedInitialPadding || buf != 0 || sp <= highestInfo);
+ } while (!clearedInitialPadding || buf != 0 || sp <= highestInfo);
return os;
}
target1 = npc;
target2 = npc + 4;
return 2;
- } else if(ba) {
+ } else if (ba) {
//This branches immediately to the effective address of the branch
//which we'll have to calculate.
uint64_t disp = 0;
//smart enough to turn this into a shift.
disp *= 4;
target1 = pc + disp;
- } else if(bn)
+ } else if (bn)
target1 = npc + 4;
else
target1 = npc;
sp, argCount++, cargv);
os << obuf;
sp += v8 ? 4 : 8;
- } while(cargv);
+ } while (cargv);
//Output the envp pointers
int envCount = 0;
uint64_t cenvp;
{
GlobalSimLoopExitEvent *exit_event = NULL;
- if(sim_end == 0) {
+ if (sim_end == 0) {
exit_event = simulate();
} else {
exit_event = simulate(sim_end);
MemoryManager::~MemoryManager()
{
- for(gp* payload: freePayloads) {
+ for (gp* payload: freePayloads) {
delete payload;
numberOfFrees++;
}
gp*
MemoryManager::allocate()
{
- if(freePayloads.empty()) {
+ if (freePayloads.empty()) {
numberOfAllocations++;
return new gp(this);
} else {
/* NOTE: normal tlm is blocking here. But in our case we return false
* and tell gem5 when a retry can be done. This is the main difference
* in the protocol:
- * if(requestInProgress)
+ * if (requestInProgress)
* {
* wait(endRequestEvent);
* }
tlm::tlm_sync_enum status;
status = iSocket->nb_transport_fw(*trans, phase, delay);
/* Check returned value: */
- if(status == tlm::TLM_ACCEPTED) {
+ if (status == tlm::TLM_ACCEPTED) {
sc_assert(phase == tlm::BEGIN_REQ);
/* Accepted but is now blocking until END_REQ (exclusion rule)*/
blockingRequest = trans;
- } else if(status == tlm::TLM_UPDATED) {
+ } else if (status == tlm::TLM_UPDATED) {
/* The Timing annotation must be honored: */
sc_assert(phase == tlm::END_REQ || phase == tlm::BEGIN_RESP);
pe = new payloadEvent<sc_transactor>(*this,
&sc_transactor::pec, "PEQ");
pe->notify(*trans, phase, delay);
- } else if(status == tlm::TLM_COMPLETED) {
+ } else if (status == tlm::TLM_COMPLETED) {
/* Transaction is over nothing has do be done. */
sc_assert(phase == tlm::END_RESP);
trans->release();
{
sc_time delay;
- if(phase == tlm::END_REQ ||
+ if (phase == tlm::END_REQ ||
&trans == blockingRequest && phase == tlm::BEGIN_RESP) {
sc_assert(&trans == blockingRequest);
blockingRequest = NULL;
iSocket.sendRetryReq();
}
}
- else if(phase == tlm::BEGIN_RESP)
+ else if (phase == tlm::BEGIN_RESP)
{
CAUGHT_UP;
/* Load / Store the access: */
if ( cmd == tlm::TLM_READ_COMMAND ) {
- if(debug) {
+ if (debug) {
SC_REPORT_INFO("target", "tlm::TLM_READ_COMMAND");
}
std::memcpy(ptr, mem_array_ptr, len);
} else if ( cmd == tlm::TLM_WRITE_COMMAND ) {
- if(debug) {
+ if (debug) {
SC_REPORT_INFO("target", "tlm::TLM_WRITE_COMMAND");
}
std::memcpy(mem_array_ptr, ptr, len);
{
sc_time delay;
- if(phase == tlm::BEGIN_REQ) {
- if(debug) SC_REPORT_INFO("target", "tlm::BEGIN_REQ");
+ if (phase == tlm::BEGIN_REQ) {
+ if (debug) SC_REPORT_INFO("target", "tlm::BEGIN_REQ");
/* Increment the transaction reference count */
trans.acquire();
/* Load / Store the access: */
if ( cmd == tlm::TLM_READ_COMMAND ) {
- if(debug) {
+ if (debug) {
SC_REPORT_INFO("target", "tlm::TLM_READ_COMMAND");
}
std::memcpy(ptr, mem_array_ptr, len);
} else if ( cmd == tlm::TLM_WRITE_COMMAND ) {
- if(debug) {
+ if (debug) {
SC_REPORT_INFO("target", "tlm::TLM_WRITE_COMMAND");
}
std::memcpy(mem_array_ptr, ptr, len);