template <class XC, class MemT>
Fault
amoMemAtomic(XC *xc, Trace::InstRecord *traceData, MemT &mem, Addr addr,
- Request::Flags flags, AtomicOpFunctor *amo_op)
+ Request::Flags flags, AtomicOpFunctor *_amo_op)
{
- assert(amo_op);
+ assert(_amo_op);
// mem will hold the previous value at addr after the AMO completes
memset(&mem, 0, sizeof(mem));
+ AtomicOpFunctorPtr amo_op = AtomicOpFunctorPtr(_amo_op);
Fault fault = xc->amoMem(addr, (uint8_t *)&mem, sizeof(MemT), flags,
- amo_op);
+ std::move(amo_op));
if (fault == NoFault) {
mem = TheISA::gtoh(mem);
template <class XC, class MemT>
Fault
initiateMemAMO(XC *xc, Trace::InstRecord *traceData, Addr addr, MemT& mem,
- Request::Flags flags, AtomicOpFunctor *amo_op)
+ Request::Flags flags, AtomicOpFunctor *_amo_op)
{
- assert(amo_op);
- return xc->initiateMemAMO(addr, sizeof(MemT), flags, amo_op);
+ assert(_amo_op);
+ AtomicOpFunctorPtr amo_op = AtomicOpFunctorPtr(_amo_op);
+ return xc->initiateMemAMO(addr, sizeof(MemT), flags, std::move(amo_op));
}
#endif
virtual void execute(T * p) = 0;
};
+typedef std::unique_ptr<AtomicOpFunctor> AtomicOpFunctorPtr;
+
enum ByteOrder {
BigEndianByteOrder,
LittleEndianByteOrder
const std::vector<bool>& byteEnable = std::vector<bool>());
Fault initiateMemAMO(Addr addr, unsigned size, Request::Flags flags,
- AtomicOpFunctor *amo_op);
+ AtomicOpFunctorPtr amo_op);
/** True if the DTB address translation has started. */
bool translationStarted() const { return instFlags[TranslationStarted]; }
Fault
BaseDynInst<Impl>::initiateMemAMO(Addr addr, unsigned size,
Request::Flags flags,
- AtomicOpFunctor *amo_op)
+ AtomicOpFunctorPtr amo_op)
{
// atomic memory instructions do not have data to be written to memory yet
// since the atomic operations will be executed directly in cache/memory.
// memory
return cpu->pushRequest(
dynamic_cast<typename DynInstPtr::PtrType>(this),
- /* atomic */ false, nullptr, size, addr, flags, nullptr, amo_op);
+ /* atomic */ false, nullptr, size, addr, flags, nullptr,
+ std::move(amo_op));
}
#endif // __CPU_BASE_DYN_INST_HH__
override;
Fault amoMem(Addr addr, uint8_t* data, unsigned size,
- Request::Flags flags, AtomicOpFunctor *amo_op) override
+ Request::Flags flags, AtomicOpFunctorPtr amo_op) override
{
panic("AMO is not supported yet in CPU checker\n");
}
*/
virtual Fault amoMem(Addr addr, uint8_t *data, unsigned int size,
Request::Flags flags,
- AtomicOpFunctor *amo_op)
+ AtomicOpFunctorPtr amo_op)
{
panic("ExecContext::amoMem() should be overridden\n");
}
*/
virtual Fault initiateMemAMO(Addr addr, unsigned int size,
Request::Flags flags,
- AtomicOpFunctor *amo_op)
+ AtomicOpFunctorPtr amo_op)
{
panic("ExecContext::initiateMemAMO() should be overridden\n");
}
Fault
initiateMemAMO(Addr addr, unsigned int size, Request::Flags flags,
- AtomicOpFunctor *amo_op) override
+ AtomicOpFunctorPtr amo_op) override
{
// AMO requests are pushed through the store path
return execute.getLSQ().pushRequest(inst, false /* amo */, nullptr,
- size, addr, flags, nullptr, amo_op);
+ size, addr, flags, nullptr, std::move(amo_op));
}
RegVal
Fault
LSQ::pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data,
unsigned int size, Addr addr, Request::Flags flags,
- uint64_t *res, AtomicOpFunctor *amo_op,
+ uint64_t *res, AtomicOpFunctorPtr amo_op,
const std::vector<bool>& byteEnable)
{
assert(inst->translationFault == NoFault || inst->inLSQ);
request->request->setVirt(0 /* asid */,
addr, size, flags, cpu.dataMasterId(),
/* I've no idea why we need the PC, but give it */
- inst->pc.instAddr(), amo_op);
+ inst->pc.instAddr(), std::move(amo_op));
request->request->setByteEnable(byteEnable);
requests.push(request);
* the LSQ */
Fault pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data,
unsigned int size, Addr addr, Request::Flags flags,
- uint64_t *res, AtomicOpFunctor *amo_op,
+ uint64_t *res, AtomicOpFunctorPtr amo_op,
const std::vector<bool>& byteEnable =
std::vector<bool>());
/** CPU pushRequest function, forwards request to LSQ. */
Fault pushRequest(const DynInstPtr& inst, bool isLoad, uint8_t *data,
unsigned int size, Addr addr, Request::Flags flags,
- uint64_t *res, AtomicOpFunctor *amo_op = nullptr,
+ uint64_t *res, AtomicOpFunctorPtr amo_op = nullptr,
const std::vector<bool>& byteEnable =
std::vector<bool>())
{
return iew.ldstQueue.pushRequest(inst, isLoad, data, size, addr,
- flags, res, amo_op, byteEnable);
+ flags, res, std::move(amo_op), byteEnable);
}
/** CPU read function, forwards read to LSQ. */
const Request::Flags _flags;
std::vector<bool> _byteEnable;
uint32_t _numOutstandingPackets;
- AtomicOpFunctor *_amo_op;
+ AtomicOpFunctorPtr _amo_op;
protected:
LSQUnit* lsqUnit() { return &_port; }
LSQRequest(LSQUnit* port, const DynInstPtr& inst, bool isLoad) :
const Addr& addr, const uint32_t& size,
const Request::Flags& flags_,
PacketDataPtr data = nullptr, uint64_t* res = nullptr,
- AtomicOpFunctor* amo_op = nullptr)
+ AtomicOpFunctorPtr amo_op = nullptr)
: _state(State::NotIssued), _senderState(nullptr),
numTranslatedFragments(0),
numInTranslationFragments(0),
_res(res), _addr(addr), _size(size),
_flags(flags_),
_numOutstandingPackets(0),
- _amo_op(amo_op)
+ _amo_op(std::move(amo_op))
{
flags.set(Flag::IsLoad, isLoad);
flags.set(Flag::WbStore,
isAnyActiveElement(byteEnable.begin(), byteEnable.end())) {
auto request = std::make_shared<Request>(_inst->getASID(),
addr, size, _flags, _inst->masterId(),
- _inst->instAddr(), _inst->contextId(), _amo_op);
+ _inst->instAddr(), _inst->contextId(),
+ std::move(_amo_op));
if (!byteEnable.empty()) {
request->setByteEnable(byteEnable);
}
const Request::Flags& flags_,
PacketDataPtr data = nullptr,
uint64_t* res = nullptr,
- AtomicOpFunctor* amo_op = nullptr) :
+ AtomicOpFunctorPtr amo_op = nullptr) :
LSQRequest(port, inst, isLoad, addr, size, flags_, data, res,
- amo_op) {}
+ std::move(amo_op)) {}
inline virtual ~SingleDataRequest() {}
virtual void initiateTranslation();
Fault pushRequest(const DynInstPtr& inst, bool isLoad, uint8_t *data,
unsigned int size, Addr addr, Request::Flags flags,
- uint64_t *res, AtomicOpFunctor *amo_op,
+ uint64_t *res, AtomicOpFunctorPtr amo_op,
const std::vector<bool>& byteEnable);
/** The CPU pointer. */
Fault
LSQ<Impl>::pushRequest(const DynInstPtr& inst, bool isLoad, uint8_t *data,
unsigned int size, Addr addr, Request::Flags flags,
- uint64_t *res, AtomicOpFunctor *amo_op,
+ uint64_t *res, AtomicOpFunctorPtr amo_op,
const std::vector<bool>& byteEnable)
{
// This comming request can be either load, store or atomic.
size, flags, data, res);
} else {
req = new SingleDataRequest(&thread[tid], inst, isLoad, addr,
- size, flags, data, res, amo_op);
+ size, flags, data, res, std::move(amo_op));
}
assert(req);
if (!byteEnable.empty()) {
Fault
AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size,
- Request::Flags flags, AtomicOpFunctor *amo_op)
+ Request::Flags flags, AtomicOpFunctorPtr amo_op)
{
SimpleExecContext& t_info = *threadInfo[curThread];
SimpleThread* thread = t_info.thread;
req->taskId(taskId());
req->setVirt(0, addr, size, flags, dataMasterId(),
- thread->pcState().instAddr(), amo_op);
+ thread->pcState().instAddr(), std::move(amo_op));
// translate to physical address
Fault fault = thread->dtb->translateAtomic(req, thread->getTC(),
override;
Fault amoMem(Addr addr, uint8_t* data, unsigned size,
- Request::Flags flags, AtomicOpFunctor *amo_op) override;
+ Request::Flags flags, AtomicOpFunctorPtr amo_op) override;
void regProbePoints() override;
virtual Fault amoMem(Addr addr, uint8_t* data, unsigned size,
Request::Flags flags,
- AtomicOpFunctor *amo_op)
+ AtomicOpFunctorPtr amo_op)
{ panic("amoMem() is not implemented\n"); }
virtual Fault initiateMemAMO(Addr addr, unsigned size,
Request::Flags flags,
- AtomicOpFunctor *amo_op)
+ AtomicOpFunctorPtr amo_op)
{ panic("initiateMemAMO() is not implemented\n"); }
void countInst();
}
Fault amoMem(Addr addr, uint8_t *data, unsigned int size,
- Request::Flags flags, AtomicOpFunctor *amo_op) override
+ Request::Flags flags, AtomicOpFunctorPtr amo_op) override
{
- return cpu->amoMem(addr, data, size, flags, amo_op);
+ return cpu->amoMem(addr, data, size, flags, std::move(amo_op));
}
Fault initiateMemAMO(Addr addr, unsigned int size,
Request::Flags flags,
- AtomicOpFunctor *amo_op) override
+ AtomicOpFunctorPtr amo_op) override
{
- return cpu->initiateMemAMO(addr, size, flags, amo_op);
+ return cpu->initiateMemAMO(addr, size, flags, std::move(amo_op));
}
/**
Fault
TimingSimpleCPU::initiateMemAMO(Addr addr, unsigned size,
Request::Flags flags,
- AtomicOpFunctor *amo_op)
+ AtomicOpFunctorPtr amo_op)
{
SimpleExecContext &t_info = *threadInfo[curThread];
SimpleThread* thread = t_info.thread;
traceData->setMem(addr, size, flags);
RequestPtr req = make_shared<Request>(asid, addr, size, flags,
- dataMasterId(), pc, thread->contextId(), amo_op);
+ dataMasterId(), pc, thread->contextId(),
+ std::move(amo_op));
assert(req->hasAtomicOpFunctor());
override;
Fault initiateMemAMO(Addr addr, unsigned size, Request::Flags flags,
- AtomicOpFunctor *amo_op) override;
+ AtomicOpFunctorPtr amo_op) override;
void fetch();
void sendFetch(const Fault &fault,
InstSeqNum _reqInstSeqNum;
/** A pointer to an atomic operation */
- AtomicOpFunctor *atomicOpFunctor;
+ AtomicOpFunctorPtr atomicOpFunctor;
public:
Request(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
MasterID mid, Addr pc, ContextID cid,
- AtomicOpFunctor *atomic_op)
+ AtomicOpFunctorPtr atomic_op)
{
- setVirt(asid, vaddr, size, flags, mid, pc, atomic_op);
+ setVirt(asid, vaddr, size, flags, mid, pc, std::move(atomic_op));
setContext(cid);
}
translateDelta(other.translateDelta),
accessDelta(other.accessDelta), depth(other.depth)
{
- if (other.atomicOpFunctor)
- atomicOpFunctor = (other.atomicOpFunctor)->clone();
- else
- atomicOpFunctor = nullptr;
- }
- ~Request()
- {
- if (hasAtomicOpFunctor()) {
- delete atomicOpFunctor;
- }
+ atomicOpFunctor.reset(other.atomicOpFunctor ?
+ other.atomicOpFunctor->clone() : nullptr);
}
+ ~Request() {}
+
/**
* Set up Context numbers.
*/
*/
void
setVirt(uint64_t asid, Addr vaddr, unsigned size, Flags flags,
- MasterID mid, Addr pc, AtomicOpFunctor *amo_op = nullptr)
+ MasterID mid, Addr pc, AtomicOpFunctorPtr amo_op = nullptr)
{
_asid = asid;
_vaddr = vaddr;
depth = 0;
accessDelta = 0;
translateDelta = 0;
- atomicOpFunctor = amo_op;
+ atomicOpFunctor = std::move(amo_op);
}
/**
bool
hasAtomicOpFunctor()
{
- return atomicOpFunctor != NULL;
+ return (bool)atomicOpFunctor;
}
AtomicOpFunctor *
getAtomicOpFunctor()
{
- assert(atomicOpFunctor != NULL);
- return atomicOpFunctor;
+ assert(atomicOpFunctor);
+ return atomicOpFunctor.get();
}
/** Accessor for flags. */