void
-AtomicSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
+AtomicSimpleCPU::takeOverFrom(BaseCPU *old_cpu)
{
- BaseSimpleCPU::takeOverFrom(oldCPU);
+ BaseSimpleCPU::takeOverFrom(old_cpu);
// The tick event should have been descheduled by drain()
assert(!tickEvent.scheduled());
void
AtomicSimpleCPU::verifyMemoryMode() const
{
- if (!system->isAtomicMode()) {
- fatal("The atomic CPU requires the memory system to be in "
- "'atomic' mode.\n");
- }
+ fatal_if(!system->isAtomicMode(),
+ "The atomic CPU requires the memory system to be in "
+ "'atomic' mode.");
}
void
schedule(tickEvent, clockEdge(Cycles(0)));
}
_status = BaseSimpleCPU::Running;
- if (std::find(activeThreads.begin(), activeThreads.end(), thread_num)
- == activeThreads.end()) {
+ if (std::find(activeThreads.begin(), activeThreads.end(), thread_num) ==
+ activeThreads.end()) {
activeThreads.push_back(thread_num);
}
}
bool
-AtomicSimpleCPU::genMemFragmentRequest(const RequestPtr& req, Addr frag_addr,
+AtomicSimpleCPU::genMemFragmentRequest(const RequestPtr &req, Addr frag_addr,
int size, Request::Flags flags,
- const std::vector<bool>& byte_enable,
- int& frag_size, int& size_left) const
+ const std::vector<bool> &byte_enable,
+ int &frag_size, int &size_left) const
{
bool predicate = true;
Addr inst_addr = threadInfo[curThread]->thread->pcState().instAddr();
frag_size = std::min(
cacheLineSize() - addrBlockOffset(frag_addr, cacheLineSize()),
- (Addr) size_left);
+ (Addr)size_left);
size_left -= frag_size;
// Set up byte-enable mask for the current fragment
}
Fault
-AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size,
+AtomicSimpleCPU::readMem(Addr addr, uint8_t *data, unsigned size,
Request::Flags flags,
- const std::vector<bool>& byte_enable)
+ const std::vector<bool> &byte_enable)
{
- SimpleExecContext& t_info = *threadInfo[curThread];
- SimpleThread* thread = t_info.thread;
+ SimpleExecContext &t_info = *threadInfo[curThread];
+ SimpleThread *thread = t_info.thread;
// use the CPU's statically allocated read request and packet objects
const RequestPtr &req = data_read_req;
}
//If there's a fault, return it
- if (fault != NoFault) {
- if (req->isPrefetch()) {
- return NoFault;
- } else {
- return fault;
- }
- }
+ if (fault != NoFault)
+ return req->isPrefetch() ? NoFault : fault;
// If we don't need to access further cache lines, stop now.
if (size_left == 0) {
Request::Flags flags, uint64_t *res,
const std::vector<bool>& byte_enable)
{
- SimpleExecContext& t_info = *threadInfo[curThread];
- SimpleThread* thread = t_info.thread;
+ SimpleExecContext &t_info = *threadInfo[curThread];
+ SimpleThread *thread = t_info.thread;
static uint8_t zero_array[64] = {};
if (data == NULL) {
//If there's a fault or we don't need to access a second cache line,
//stop now.
- if (fault != NoFault || size_left == 0)
- {
+ if (fault != NoFault || size_left == 0) {
if (req->isLockedRMW() && fault == NoFault) {
assert(!req->isMasked());
locked = false;
}
- if (fault != NoFault && req->isPrefetch()) {
- return NoFault;
- } else {
- return fault;
- }
+ //Supress faults from prefetches.
+ return req->isPrefetch() ? NoFault : fault;
}
/*
AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
Request::Flags flags, AtomicOpFunctorPtr amo_op)
{
- SimpleExecContext& t_info = *threadInfo[curThread];
- SimpleThread* thread = t_info.thread;
+ SimpleExecContext &t_info = *threadInfo[curThread];
+ SimpleThread *thread = t_info.thread;
// use the CPU's statically allocated amo request and packet objects
const RequestPtr &req = data_amo_req;
// accesses that cross cache-line boundaries, the cache needs to be
// modified to support locking both cache lines to guarantee the
// atomicity.
- if (secondAddr > addr) {
- panic("AMO request should not access across a cache line boundary\n");
- }
+ panic_if(secondAddr > addr,
+ "AMO request should not access across a cache line boundary.");
dcache_latency = 0;
Packet pkt(req, Packet::makeWriteCmd(req));
pkt.dataStatic(data);
- if (req->isLocalAccess())
+ if (req->isLocalAccess()) {
dcache_latency += req->localAccessor(thread->getTC(), &pkt);
- else {
+ } else {
dcache_latency += sendPacket(dcachePort, &pkt);
}
// Change thread if multi-threaded
swapActiveThread();
- // Set memroy request ids to current thread
+ // Set memory request ids to current thread
if (numThreads > 1) {
ContextID cid = threadContexts[curThread]->contextId();
data_amo_req->setContext(cid);
}
- SimpleExecContext& t_info = *threadInfo[curThread];
- SimpleThread* thread = t_info.thread;
+ SimpleExecContext &t_info = *threadInfo[curThread];
+ SimpleThread *thread = t_info.thread;
Tick latency = 0;
assert(!ifetch_pkt.isError());
- // ifetch_req is initialized to read the instruction directly
- // into the CPU object's inst field.
+ // ifetch_req is initialized to read the instruction
+ // directly into the CPU object's inst field.
//}
}
// @todo remove me after debugging with legion done
if (curStaticInst && (!curStaticInst->isMicroop() ||
- curStaticInst->isFirstMicroop()))
+ curStaticInst->isFirstMicroop())) {
instCnt++;
+ }
if (simulate_inst_stalls && icache_access)
stall_ticks += icache_latency;
* <li>Stay at PC is true.
* </ul>
*/
- bool isCpuDrained() const {
+ bool
+ isCpuDrained() const
+ {
SimpleExecContext &t_info = *threadInfo[curThread];
-
return t_info.thread->microPC() == 0 &&
- !locked &&
- !t_info.stayAtPC;
+ !locked && !t_info.stayAtPC;
}
/**
protected:
- bool recvTimingResp(PacketPtr pkt)
+ bool
+ recvTimingResp(PacketPtr pkt)
{
panic("Atomic CPU doesn't expect recvTimingResp!\n");
- return true;
}
- void recvReqRetry()
+ void
+ recvReqRetry()
{
panic("Atomic CPU doesn't expect recvRetry!\n");
}
{
public:
- AtomicCPUDPort(const std::string &_name, BaseSimpleCPU* _cpu)
+ AtomicCPUDPort(const std::string &_name, BaseSimpleCPU *_cpu)
: AtomicCPUPort(_name, _cpu), cpu(_cpu)
{
cacheBlockMask = ~(cpu->cacheLineSize() - 1);
Tick dcache_latency;
/** Probe Points. */
- ProbePointArg<std::pair<SimpleThread*, const StaticInstPtr>> *ppCommit;
+ ProbePointArg<std::pair<SimpleThread *, const StaticInstPtr>> *ppCommit;
protected:
void drainResume() override;
void switchOut() override;
- void takeOverFrom(BaseCPU *oldCPU) override;
+ void takeOverFrom(BaseCPU *old_cpu) override;
void verifyMemoryMode() const override;
* @param[in,out] size_left Size left to be processed in the memory access.
* @return True if the byte-enable mask for the fragment is not all-false.
*/
- bool genMemFragmentRequest(const RequestPtr& req, Addr frag_addr,
+ bool genMemFragmentRequest(const RequestPtr &req, Addr frag_addr,
int size, Request::Flags flags,
- const std::vector<bool>& byte_enable,
- int& frag_size, int& size_left) const;
+ const std::vector<bool> &byte_enable,
+ int &frag_size, int &size_left) const;
Fault readMem(Addr addr, uint8_t *data, unsigned size,
Request::Flags flags,
- const std::vector<bool>& byte_enable = std::vector<bool>())
+ const std::vector<bool> &byte_enable=std::vector<bool>())
override;
- Fault initiateHtmCmd(Request::Flags flags) override
+ Fault
+ initiateHtmCmd(Request::Flags flags) override
{
panic("initiateHtmCmd() is for timing accesses, and should "
"never be called on AtomicSimpleCPU.\n");
}
- void htmSendAbortSignal(HtmFailureFaultCause cause) override
+ void
+ htmSendAbortSignal(HtmFailureFaultCause cause) override
{
panic("htmSendAbortSignal() is for timing accesses, and should "
"never be called on AtomicSimpleCPU.\n");
Fault writeMem(uint8_t *data, unsigned size,
Addr addr, Request::Flags flags, uint64_t *res,
- const std::vector<bool>& byte_enable = std::vector<bool>())
+ const std::vector<bool> &byte_enable=std::vector<bool>())
override;
- Fault amoMem(Addr addr, uint8_t* data, unsigned size,
+ Fault amoMem(Addr addr, uint8_t *data, unsigned size,
Request::Flags flags, AtomicOpFunctorPtr amo_op) override;
void regProbePoints() override;