def makeCowDisks(disk_paths):
disks = []
for disk_path in disk_paths:
- disk = CowIdeDisk(driveID='master')
+ disk = CowIdeDisk(driveID='device0')
disk.childImage(disk_path);
disks.append(disk)
return disks
def AMBA_INITIATOR_ROLE(width):
return 'AMBA INITIATOR %d' % width
-def SC_MASTER_PORT_ROLE(port_type):
- return 'SC MASTER PORT for %s' % port_type
+def SC_REQUEST_PORT_ROLE(port_type):
+ return 'SC REQUEST PORT for %s' % port_type
-def SC_SLAVE_PORT_ROLE(port_type):
- return 'SC SLAVE PORT for %s' % port_type
+def SC_RESPONSE_PORT_ROLE(port_type):
+ return 'SC RESPONSE PORT for %s' % port_type
class AmbaTargetSocket(Port):
def __init__(self, width, desc):
super(VectorAmbaInitiatorSocket, self).__init__(
my_role, desc, is_source=True)
-class ScMasterPort(Port):
+class ScRequestPort(Port):
def __init__(self, desc, port_type):
- my_role = SC_MASTER_PORT_ROLE(port_type)
- peer_role = SC_SLAVE_PORT_ROLE(port_type)
+ my_role = SC_REQUEST_PORT_ROLE(port_type)
+ peer_role = SC_RESPONSE_PORT_ROLE(port_type)
Port.compat(my_role, peer_role)
- super(ScMasterPort, self).__init__(my_role, desc)
+ super(ScRequestPort, self).__init__(my_role, desc)
-class ScSlavePort(Port):
+class ScResponsePort(Port):
def __init__(self, desc, port_type):
- my_role = SC_SLAVE_PORT_ROLE(port_type)
- peer_role = SC_MASTER_PORT_ROLE(port_type)
+ my_role = SC_RESPONSE_PORT_ROLE(port_type)
+ peer_role = SC_REQUEST_PORT_ROLE(port_type)
Port.compat(my_role, peer_role)
- super(ScSlavePort, self).__init__(my_role, desc)
+ super(ScResponsePort, self).__init__(my_role, desc)
class AmbaToTlmBridge64(SystemC_ScModule):
type = 'AmbaToTlmBridge64'
output_attributes = Param.String("ExtendedID[62:55]=MPAM_PMG, "
"ExtendedID[54:39]=MPAM_PARTID, ExtendedID[38]=MPAM_NS",
"User-defined transform to be applied to bus attributes like "
- "MasterID, ExtendedID or UserFlags. Currently, only works for "
+ "RequestorID, ExtendedID or UserFlags. Currently, only works for "
"MPAM Attributes encoding into bus attributes.")
has_DirtyVLPIOnLoad = Param.Bool(False, "GICR_VPENDBASER.Dirty reflects "
"transient loading state when valid=1")
warn_once("Doing AT (address translation) in functional mode! Fix Me!\n");
auto req = std::make_shared<Request>(
- val, 0, flags, Request::funcMasterId,
+ val, 0, flags, Request::funcRequestorId,
tc->pcState().pc(), tc->contextId());
Fault fault = getDTBPtr(tc)->translateFunctional(
warn_once("Doing AT (address translation) in functional mode! Fix Me!\n");
auto req = std::make_shared<Request>(
- val, 0, flags, Request::funcMasterId,
+ val, 0, flags, Request::funcRequestorId,
tc->pcState().pc(), tc->contextId());
Fault fault = getDTBPtr(tc)->translateFunctional(
{
req = std::make_shared<Request>();
req->setVirt(s1Te.pAddr(s1Req->getVaddr()), s1Req->getSize(),
- s1Req->getFlags(), s1Req->masterId(), 0);
+ s1Req->getFlags(), s1Req->requestorId(), 0);
}
Fault getTe(ThreadContext *tc, TlbEntry *destTe);
Stage2MMU::Stage2MMU(const Params *p)
: SimObject(p), _stage1Tlb(p->tlb), _stage2Tlb(p->stage2_tlb),
port(_stage1Tlb->getTableWalker(), p->sys),
- masterId(p->sys->getMasterId(_stage1Tlb->getTableWalker()))
+ requestorId(p->sys->getRequestorId(_stage1Tlb->getTableWalker()))
{
// we use the stage-one table walker as the parent of the port,
- // and to get our master id, this is done to keep things
+ // and to get our requestor id, this is done to keep things
// symmetrical with other ISAs in terms of naming and stats
- stage1Tlb()->setMMU(this, masterId);
- stage2Tlb()->setMMU(this, masterId);
+ stage1Tlb()->setMMU(this, requestorId);
+ stage2Tlb()->setMMU(this, requestorId);
}
Fault
// translate to physical address using the second stage MMU
auto req = std::make_shared<Request>();
- req->setVirt(descAddr, numBytes, flags | Request::PT_WALK, masterId, 0);
+ req->setVirt(descAddr, numBytes, flags | Request::PT_WALK,
+ requestorId, 0);
if (isFunctional) {
fault = stage2Tlb()->translateFunctional(req, tc, BaseTLB::Read);
} else {
{
// translate to physical address using the second stage MMU
translation->setVirt(
- descAddr, numBytes, flags | Request::PT_WALK, masterId);
+ descAddr, numBytes, flags | Request::PT_WALK, requestorId);
translation->translateTiming(tc);
}
DmaPort port;
/** Request id for requests generated by this MMU */
- MasterID masterId;
+ RequestorID requestorId;
public:
/** This translation class is used to trigger the data fetch once a timing
finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc,
BaseTLB::Mode mode);
- void setVirt(Addr vaddr, int size, Request::Flags flags, int masterId)
+ void setVirt(Addr vaddr, int size, Request::Flags flags,
+ int requestorId)
{
numBytes = size;
- req->setVirt(vaddr, size, flags, masterId, 0);
+ req->setVirt(vaddr, size, flags, requestorId, 0);
}
void translateTiming(ThreadContext *tc)
TableWalker::TableWalker(const Params *p)
: ClockedObject(p),
- stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId),
+ stage2Mmu(NULL), port(NULL), requestorId(Request::invldRequestorId),
isStage2(p->is_stage2), tlb(NULL),
currState(NULL), pending(false),
numSquashable(p->num_squash_per_cycle),
}
void
-TableWalker::setMMU(Stage2MMU *m, MasterID master_id)
+TableWalker::setMMU(Stage2MMU *m, RequestorID requestor_id)
{
stage2Mmu = m;
port = &m->getDMAPort();
- masterId = master_id;
+ requestorId = requestor_id;
}
void
(this->*doDescriptor)();
} else {
RequestPtr req = std::make_shared<Request>(
- descAddr, numBytes, flags, masterId);
+ descAddr, numBytes, flags, requestorId);
req->taskId(ContextSwitchTaskId::DMA);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
/** Port shared by the two table walkers. */
DmaPort* port;
- /** Master id assigned by the MMU. */
- MasterID masterId;
+ /** Requestor id assigned by the MMU. */
+ RequestorID requestorId;
/** Indicates whether this table walker is part of the stage 2 mmu */
const bool isStage2;
void setTlb(TLB *_tlb) { tlb = _tlb; }
TLB* getTlb() { return tlb; }
- void setMMU(Stage2MMU *m, MasterID master_id);
+ void setMMU(Stage2MMU *m, RequestorID requestor_id);
void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr,
uint8_t texcb, bool s);
void memAttrsLPAE(ThreadContext *tc, TlbEntry &te,
}
void
-TLB::setMMU(Stage2MMU *m, MasterID master_id)
+TLB::setMMU(Stage2MMU *m, RequestorID requestor_id)
{
stage2Mmu = m;
- tableWalker->setMMU(m, master_id);
+ tableWalker->setMMU(m, requestor_id);
}
bool
TableWalker *getTableWalker() { return tableWalker; }
- void setMMU(Stage2MMU *m, MasterID master_id);
+ void setMMU(Stage2MMU *m, RequestorID requestor_id);
int getsize() const { return size; }
* reference. For ARM this method will always return a valid port
* pointer.
*
- * @return A pointer to the walker master port
+ * @return A pointer to the walker request port
*/
Port *getTableWalkerPort() override;
ArmISA::TLB* dtb = static_cast<TLB*>(thread->getDTBPtr());
req->setVirt(addr, size, flags, thread->pcState().instAddr(),
- Request::funcMasterId);
+ Request::funcRequestorId);
// Translate to physical address
Fault fault = dtb->translateAtomic(req, thread, BaseTLB::Read);
assert(!misaligned_acc);
req = std::make_shared<Request>(vaddr, sizeof(T), 0,
- gpuDynInst->computeUnit()->masterId(), 0,
+ gpuDynInst->computeUnit()->requestorId(), 0,
gpuDynInst->wfDynId,
gpuDynInst->makeAtomicOpFunctor<T>(
&(reinterpret_cast<T*>(gpuDynInst->a_data))[lane],
&(reinterpret_cast<T*>(gpuDynInst->x_data))[lane]));
} else {
req = std::make_shared<Request>(vaddr, req_size, 0,
- gpuDynInst->computeUnit()->masterId(), 0,
+ gpuDynInst->computeUnit()->requestorId(), 0,
gpuDynInst->wfDynId);
}
bool misaligned_acc = split_addr > vaddr;
RequestPtr req = std::make_shared<Request>(vaddr, req_size, 0,
- gpuDynInst->computeUnit()->masterId(), 0,
+ gpuDynInst->computeUnit()->requestorId(), 0,
gpuDynInst->wfDynId);
if (misaligned_acc) {
gpuDynInst->setStatusVector(0, 1);
RequestPtr req = std::make_shared<Request>(0, 0, 0,
gpuDynInst->computeUnit()->
- masterId(), 0,
+ requestorId(), 0,
gpuDynInst->wfDynId);
gpuDynInst->setRequestFlags(req);
gpuDynInst->computeUnit()->
abstract = True
cxx_header = "arch/generic/tlb.hh"
# Ports to connect with other TLB levels
- slave = VectorSlavePort("Port closer to the CPU side")
- master = RequestPort("Port closer to memory side")
+ cpu_side_ports = VectorResponsePort("Ports closer to the CPU side")
+ slave = DeprecatedParam(cpu_side_ports,
+ '`slave` is now called `cpu_side_ports`')
+ mem_side_port = RequestPort("Port closer to memory side")
+ master = DeprecatedParam(mem_side_port,
+ '`master` is now called `mem_side_port`')
class SubOperandList(OperandList):
'''Find all the operands in the given code block. Returns an operand
descriptor list (instance of class OperandList).'''
- def __init__(self, parser, code, master_list):
+ def __init__(self, parser, code, requestor_list):
self.items = []
self.bases = {}
# delete strings and comments so we don't match on operands inside
if op_base in parser.elemToVector:
elem_op = op_base
op_base = parser.elemToVector[elem_op]
- # find this op in the master list
- op_desc = master_list.find_base(op_base)
+ # find this op in the requestor list
+ op_desc = requestor_list.find_base(op_base)
if not op_desc:
- error('Found operand %s which is not in the master list!'
+ error('Found operand %s which is not in the requestor list!'
% op_base)
else:
# See if we've already found this operand
op_desc = self.find_base(op_base)
if not op_desc:
# if not, add a reference to it to this sub list
- self.append(master_list.bases[op_base])
+ self.append(requestor_list.bases[op_base])
# start next search after end of current match
next_pos = match.end()
else {
//If we didn't return, we're setting up another read.
RequestPtr request = std::make_shared<Request>(
- nextRead, oldRead->getSize(), flags, walker->masterId);
+ nextRead, oldRead->getSize(), flags, walker->requestorId);
read = new Packet(request, MemCmd::ReadReq);
read->allocate();
Request::Flags flags = Request::PHYSICAL;
RequestPtr request = std::make_shared<Request>(
- topAddr, sizeof(PTESv39), flags, walker->masterId);
+ topAddr, sizeof(PTESv39), flags, walker->requestorId);
read = new Packet(request, MemCmd::ReadReq);
read->allocate();
// The TLB we're supposed to load.
TLB * tlb;
System * sys;
- MasterID masterId;
+ RequestorID requestorId;
// The number of outstanding walks that can be squashed per cycle.
unsigned numSquashable;
Walker(const Params *params) :
ClockedObject(params), port(name() + ".port", this),
funcState(this, NULL, NULL, true), tlb(NULL), sys(params->system),
- masterId(sys->getMasterId(this)),
+ requestorId(sys->getRequestorId(this)),
numSquashable(params->num_squash_per_cycle),
startWalkWrapperEvent([this]{ startWalkWrapper(); }, name())
{
type = 'X86LocalApic'
cxx_class = 'X86ISA::Interrupts'
cxx_header = 'arch/x86/interrupts.hh'
- int_master = RequestPort("Port for sending interrupt messages")
- int_slave = ResponsePort("Port for receiving interrupt messages")
+
+ int_requestor = RequestPort("Port for sending interrupt messages")
+ int_master = DeprecatedParam(int_requestor,
+ '`int_master` is now called `int_requestor`')
+
+ int_responder = ResponsePort("Port for receiving interrupt messages")
+ int_slave = DeprecatedParam(int_responder,
+ '`int_slave` is now called `int_responder`')
+
int_latency = Param.Latency('1ns', \
"Latency for an interrupt to propagate through this device.")
pio = ResponsePort("Programmed I/O port")
void
X86ISA::Interrupts::init()
{
- panic_if(!intMasterPort.isConnected(),
+ panic_if(!intRequestPort.isConnected(),
"Int port not connected to anything!");
panic_if(!pioPort.isConnected(),
"Pio port of %s not connected to anything!", name());
- intSlavePort.sendRangeChange();
+ intResponsePort.sendRangeChange();
pioPort.sendRangeChange();
}
regs[APIC_INTERRUPT_COMMAND_LOW] = low;
for (auto id: apics) {
PacketPtr pkt = buildIntTriggerPacket(id, message);
- intMasterPort.sendMessage(pkt, sys->isTimingMode(),
+ intRequestPort.sendMessage(pkt, sys->isTimingMode(),
[this](PacketPtr pkt) { completeIPI(pkt); });
}
newVal = regs[APIC_INTERRUPT_COMMAND_LOW];
pendingStartup(false), startupVector(0),
startedUp(false), pendingUnmaskableInt(false),
pendingIPIs(0),
- intSlavePort(name() + ".int_slave", this, this),
- intMasterPort(name() + ".int_master", this, this, p->int_latency),
+ intResponsePort(name() + ".int_responder", this, this),
+ intRequestPort(name() + ".int_requestor", this, this, p->int_latency),
pioPort(this), pioDelay(p->pio_latency)
{
memset(regs, 0, sizeof(regs));
int initialApicId;
// Ports for interrupts.
- IntSlavePort<Interrupts> intSlavePort;
- IntMasterPort<Interrupts> intMasterPort;
+ IntResponsePort<Interrupts> intResponsePort;
+ IntRequestPort<Interrupts> intRequestPort;
// Port for memory mapped register accesses.
PioPort<Interrupts> pioPort;
Port &getPort(const std::string &if_name,
PortID idx=InvalidPortID) override
{
- if (if_name == "int_master") {
- return intMasterPort;
- } else if (if_name == "int_slave") {
- return intSlavePort;
+ if (if_name == "int_requestor") {
+ return intRequestPort;
+ } else if (if_name == "int_responder") {
+ return intResponsePort;
} else if (if_name == "pio") {
return pioPort;
}
Request::Flags flags = oldRead->req->getFlags();
flags.set(Request::UNCACHEABLE, uncacheable);
RequestPtr request = std::make_shared<Request>(
- nextRead, oldRead->getSize(), flags, walker->masterId);
+ nextRead, oldRead->getSize(), flags, walker->requestorId);
read = new Packet(request, MemCmd::ReadReq);
read->allocate();
// If we need to write, adjust the read packet to write the modified
flags.set(Request::UNCACHEABLE);
RequestPtr request = std::make_shared<Request>(
- topAddr, dataSize, flags, walker->masterId);
+ topAddr, dataSize, flags, walker->requestorId);
read = new Packet(request, MemCmd::ReadReq);
read->allocate();
// The TLB we're supposed to load.
TLB * tlb;
System * sys;
- MasterID masterId;
+ RequestorID requestorId;
// The number of outstanding walks that can be squashed per cycle.
unsigned numSquashable;
Walker(const Params *params) :
ClockedObject(params), port(name() + ".port", this),
funcState(this, NULL, NULL, true), tlb(NULL), sys(params->system),
- masterId(sys->getMasterId(this)),
+ requestorId(sys->getRequestorId(this)),
numSquashable(params->num_squash_per_cycle),
startWalkWrapperEvent([this]{ startWalkWrapper(); }, name())
{
if buildEnv['TARGET_ISA'] in ['x86', 'arm', 'riscv']:
_cached_ports += ["itb.walker.port", "dtb.walker.port"]
- _uncached_slave_ports = []
- _uncached_master_ports = []
+ _uncached_interrupt_response_ports = []
+ _uncached_interrupt_request_ports = []
if buildEnv['TARGET_ISA'] == 'x86':
- _uncached_slave_ports += ["interrupts[0].pio",
- "interrupts[0].int_slave"]
- _uncached_master_ports += ["interrupts[0].int_master"]
+ _uncached_interrupt_response_ports += ["interrupts[0].pio",
+ "interrupts[0].int_responder"]
+ _uncached_interrupt_request_ports += ["interrupts[0].int_requestor"]
def createInterruptController(self):
self.interrupts = [ArchInterrupts() for i in range(self.numThreads)]
def connectCachedPorts(self, bus):
for p in self._cached_ports:
- exec('self.%s = bus.slave' % p)
+ exec('self.%s = bus.cpu_side_ports' % p)
def connectUncachedPorts(self, bus):
- for p in self._uncached_slave_ports:
- exec('self.%s = bus.master' % p)
- for p in self._uncached_master_ports:
- exec('self.%s = bus.slave' % p)
+ for p in self._uncached_interrupt_response_ports:
+ exec('self.%s = bus.mem_side_ports' % p)
+ for p in self._uncached_interrupt_request_ports:
+ exec('self.%s = bus.cpu_side_ports' % p)
def connectAllPorts(self, cached_bus, uncached_bus = None):
self.connectCachedPorts(cached_bus)
self.toL2Bus = xbar if xbar else L2XBar()
self.connectCachedPorts(self.toL2Bus)
self.l2cache = l2c
- self.toL2Bus.master = self.l2cache.cpu_side
+ self.toL2Bus.mem_side_ports = self.l2cache.cpu_side
self._cached_ports = ['l2cache.mem_side']
def createThreads(self):
BaseCPU::BaseCPU(Params *p, bool is_checker)
: ClockedObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id),
- _instMasterId(p->system->getMasterId(this, "inst")),
- _dataMasterId(p->system->getMasterId(this, "data")),
+ _instRequestorId(p->system->getRequestorId(this, "inst")),
+ _dataRequestorId(p->system->getRequestorId(this, "data")),
_taskId(ContextSwitchTaskId::Unknown), _pid(invldPid),
_switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()),
interrupts(p->interrupts), numThreads(p->numThreads), system(p->system),
if (secondAddr > addr)
size = secondAddr - addr;
- req->setVirt(addr, size, 0x0, dataMasterId(), tc->instAddr());
+ req->setVirt(addr, size, 0x0, dataRequestorId(), tc->instAddr());
// translate to physical address
Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read);
const uint32_t _socketId;
/** instruction side request id that must be placed in all requests */
- MasterID _instMasterId;
+ RequestorID _instRequestorId;
/** data side request id that must be placed in all requests */
- MasterID _dataMasterId;
+ RequestorID _dataRequestorId;
/** An intrenal representation of a task identifier within gem5. This is
* used so the CPU can add which taskId (which is an internal representation
uint32_t socketId() const { return _socketId; }
/** Reads this CPU's unique data requestor ID */
- MasterID dataMasterId() const { return _dataMasterId; }
+ RequestorID dataRequestorId() const { return _dataRequestorId; }
/** Reads this CPU's unique instruction requestor ID */
- MasterID instMasterId() const { return _instMasterId; }
+ RequestorID instRequestorId() const { return _instRequestorId; }
/**
* Get a port on this CPU. All CPUs have a data and
uint32_t socketId() const { return cpu->socketId(); }
/** Read this CPU's data requestor ID */
- MasterID masterId() const { return cpu->dataMasterId(); }
+ RequestorID requestorId() const { return cpu->dataRequestorId(); }
/** Read this context's system-wide ID **/
ContextID contextId() const { return thread->contextId(); }
void
CheckerCPU::init()
{
- masterId = systemPtr->getMasterId(this);
+ requestorId = systemPtr->getRequestorId(this);
}
CheckerCPU::CheckerCPU(Params *p)
auto it_end = byte_enable.cbegin() + (size - size_left);
if (isAnyActiveElement(it_start, it_end)) {
mem_req = std::make_shared<Request>(frag_addr, frag_size,
- flags, masterId, thread->pcState().instAddr(),
+ flags, requestorId, thread->pcState().instAddr(),
tc->contextId());
mem_req->setByteEnable(std::vector<bool>(it_start, it_end));
}
} else {
mem_req = std::make_shared<Request>(frag_addr, frag_size,
- flags, masterId, thread->pcState().instAddr(),
+ flags, requestorId, thread->pcState().instAddr(),
tc->contextId());
}
using VecRegContainer = TheISA::VecRegContainer;
/** id attached to all issued requests */
- MasterID masterId;
+ RequestorID requestorId;
public:
void init() override;
if (!curMacroStaticInst) {
// set up memory request for instruction fetch
auto mem_req = std::make_shared<Request>(
- fetch_PC, sizeof(MachInst), 0, masterId, fetch_PC,
+ fetch_PC, sizeof(MachInst), 0, requestorId, fetch_PC,
thread->contextId());
mem_req->setVirt(fetch_PC, sizeof(MachInst),
- Request::INST_FETCH, masterId,
+ Request::INST_FETCH, requestorId,
thread->instAddr());
fault = itb->translateFunctional(
syncThreadContext();
RequestPtr mmio_req = std::make_shared<Request>(
- paddr, size, Request::UNCACHEABLE, dataMasterId());
+ paddr, size, Request::UNCACHEABLE, dataRequestorId());
mmio_req->setContext(tc->contextId());
// Some architectures do need to massage physical addresses a bit
for (int i = 0; i < count; ++i) {
RequestPtr io_req = std::make_shared<Request>(
pAddr, kvm_run.io.size,
- Request::UNCACHEABLE, dataMasterId());
+ Request::UNCACHEABLE, dataRequestorId());
io_req->setContext(tc->contextId());
request->request->setContext(cpu.threads[tid]->getTC()->contextId());
request->request->setVirt(
- aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
+ aligned_pc, request_size, Request::INST_FETCH, cpu.instRequestorId(),
/* I've no idea why we need the PC, but give it */
thread.pc.instAddr());
if (byte_enable.empty()) {
fragment->setVirt(
fragment_addr, fragment_size, request->getFlags(),
- request->masterId(), request->getPC());
+ request->requestorId(), request->getPC());
} else {
// Set up byte-enable mask for the current fragment
auto it_start = byte_enable.begin() +
if (isAnyActiveElement(it_start, it_end)) {
fragment->setVirt(
fragment_addr, fragment_size, request->getFlags(),
- request->masterId(), request->getPC());
+ request->requestorId(), request->getPC());
fragment->setByteEnable(std::vector<bool>(it_start, it_end));
} else {
disabled_fragment = true;
int cid = cpu.threads[inst->id.threadId]->getTC()->contextId();
request->request->setContext(cid);
request->request->setVirt(
- addr, size, flags, cpu.dataMasterId(),
+ addr, size, flags, cpu.dataRequestorId(),
/* I've no idea why we need the PC, but give it */
inst->pc.instAddr(), std::move(amo_op));
request->request->setByteEnable(byte_enable);
{
// Verify that we still have an interrupt to handle
if (!cpu->checkInterrupts(0)) {
- DPRINTF(Commit, "Pending interrupt is cleared by master before "
+ DPRINTF(Commit, "Pending interrupt is cleared by requestor before "
"it got handled. Restart fetching from the orig path.\n");
toIEW->commitInfo[0].clearInterrupt = true;
interrupt = NoFault;
// notify l1 d-cache (ruby) that core has aborted transaction
RequestPtr req =
- std::make_shared<Request>(addr, size, flags, _dataMasterId);
+ std::make_shared<Request>(addr, size, flags, _dataRequestorId);
req->taskId(taskId());
req->setContext(this->thread[tid]->contextId());
// Build request here.
RequestPtr mem_req = std::make_shared<Request>(
fetchBufferBlockPC, fetchBufferSize,
- Request::INST_FETCH, cpu->instMasterId(), pc,
+ Request::INST_FETCH, cpu->instRequestorId(), pc,
cpu->thread[tid]->contextId());
mem_req->taskId(cpu->taskId());
if (byte_enable.empty() ||
isAnyActiveElement(byte_enable.begin(), byte_enable.end())) {
auto request = std::make_shared<Request>(
- addr, size, _flags, _inst->masterId(),
+ addr, size, _flags, _inst->requestorId(),
_inst->instAddr(), _inst->contextId(),
std::move(_amo_op));
if (!byte_enable.empty()) {
*/
void
setVirt(Addr vaddr, unsigned size, Request::Flags flags_,
- MasterID mid, Addr pc)
+ RequestorID requestor_id, Addr pc)
{
- request()->setVirt(vaddr, size, flags_, mid, pc);
+ request()->setVirt(vaddr, size, flags_, requestor_id, pc);
}
void
uint32_t size_so_far = 0;
mainReq = std::make_shared<Request>(base_addr,
- _size, _flags, _inst->masterId(),
+ _size, _flags, _inst->requestorId(),
_inst->instAddr(), _inst->contextId());
if (!_byteEnable.empty()) {
mainReq->setByteEnable(_byteEnable);
auto it_start = byte_enable.begin() + (size - (frag_size + size_left));
auto it_end = byte_enable.begin() + (size - size_left);
if (isAnyActiveElement(it_start, it_end)) {
- req->setVirt(frag_addr, frag_size, flags, dataMasterId(),
+ req->setVirt(frag_addr, frag_size, flags, dataRequestorId(),
inst_addr);
req->setByteEnable(std::vector<bool>(it_start, it_end));
} else {
predicate = false;
}
} else {
- req->setVirt(frag_addr, frag_size, flags, dataMasterId(),
+ req->setVirt(frag_addr, frag_size, flags, dataRequestorId(),
inst_addr);
req->setByteEnable(std::vector<bool>());
}
dcache_latency = 0;
req->taskId(taskId());
- req->setVirt(addr, size, flags, dataMasterId(),
+ req->setVirt(addr, size, flags, dataRequestorId(),
thread->pcState().instAddr(), std::move(amo_op));
// translate to physical address
DPRINTF(Fetch, "Fetch: Inst PC:%08p, Fetch PC:%08p\n", instAddr, fetchPC);
req->setVirt(fetchPC, sizeof(MachInst), Request::INST_FETCH,
- instMasterId(), instAddr);
+ instRequestorId(), instAddr);
}
auto map_itr = bbMap.find(currentBBV);
if (map_itr == bbMap.end()){
// If a new (previously unseen) basic block is found,
- // add a new unique id, record num of insts and insert into bbMap.
+ // add a new unique id, record num of insts and insert
+ // into bbMap.
BBInfo info;
info.id = bbMap.size() + 1;
info.insts = currentBBVInstCount;
traceData->setMem(addr, size, flags);
RequestPtr req = std::make_shared<Request>(
- addr, size, flags, dataMasterId(), pc, thread->contextId());
+ addr, size, flags, dataRequestorId(), pc, thread->contextId());
if (!byte_enable.empty()) {
req->setByteEnable(byte_enable);
}
traceData->setMem(addr, size, flags);
RequestPtr req = std::make_shared<Request>(
- addr, size, flags, dataMasterId(), pc, thread->contextId());
+ addr, size, flags, dataRequestorId(), pc, thread->contextId());
if (!byte_enable.empty()) {
req->setByteEnable(byte_enable);
}
traceData->setMem(addr, size, flags);
RequestPtr req = make_shared<Request>(addr, size, flags,
- dataMasterId(), pc, thread->contextId(),
+ dataRequestorId(), pc, thread->contextId(),
std::move(amo_op));
assert(req->hasAtomicOpFunctor());
traceData->setMem(addr, size, flags);
RequestPtr req = std::make_shared<Request>(
- addr, size, flags, dataMasterId());
+ addr, size, flags, dataRequestorId());
req->setPC(pc);
req->setContext(thread->contextId());
// notify l1 d-cache (ruby) that core has aborted transaction
RequestPtr req = std::make_shared<Request>(
- addr, size, flags, dataMasterId());
+ addr, size, flags, dataRequestorId());
req->setPC(pc);
req->setContext(thread->contextId());
DirectedGenerator::DirectedGenerator(const Params *p)
: SimObject(p),
- masterId(p->system->getMasterId(this))
+ requestorId(p->system->getRequestorId(this))
{
m_num_cpus = p->num_cpus;
m_directed_tester = NULL;
protected:
int m_num_cpus;
- MasterID masterId;
+ RequestorID requestorId;
RubyDirectedTester* m_directed_tester;
};
Packet::Command cmd;
// For simplicity, requests are assumed to be 1 byte-sized
- RequestPtr req = std::make_shared<Request>(m_address, 1, flags, masterId);
+ RequestPtr req = std::make_shared<Request>(m_address, 1, flags,
+ requestorId);
//
// Based on the current state, issue a load or a store
class RubyDirectedTester(ClockedObject):
type = 'RubyDirectedTester'
cxx_header = "cpu/testers/directedtest/RubyDirectedTester.hh"
- cpuPort = VectorMasterPort("the cpu ports")
+ cpuPort = VectorRequestPort("the cpu ports")
requests_to_complete = Param.Int("checks to complete")
generator = Param.DirectedGenerator("the request generator")
Request::Flags flags;
// For simplicity, requests are assumed to be 1 byte-sized
- RequestPtr req = std::make_shared<Request>(m_address, 1, flags, masterId);
+ RequestPtr req = std::make_shared<Request>(m_address, 1, flags,
+ requestorId);
Packet::Command cmd;
bool do_write = (random_mt.random(0, 100) < m_percent_writes);
injVnet(p->inj_vnet),
precision(p->precision),
responseLimit(p->response_limit),
- masterId(p->system->getMasterId(this))
+ requestorId(p->system->getRequestorId(this))
{
// set up counters
noResponseCycles = 0;
if (injReqType == 0) {
// generate packet for virtual network 0
requestType = MemCmd::ReadReq;
- req = std::make_shared<Request>(paddr, access_size, flags, masterId);
+ req = std::make_shared<Request>(paddr, access_size, flags,
+ requestorId);
} else if (injReqType == 1) {
// generate packet for virtual network 1
requestType = MemCmd::ReadReq;
flags.set(Request::INST_FETCH);
req = std::make_shared<Request>(
- 0x0, access_size, flags, masterId, 0x0, 0);
+ 0x0, access_size, flags, requestorId, 0x0, 0);
req->setPaddr(paddr);
} else { // if (injReqType == 2)
// generate packet for virtual network 2
requestType = MemCmd::WriteReq;
- req = std::make_shared<Request>(paddr, access_size, flags, masterId);
+ req = std::make_shared<Request>(paddr, access_size, flags,
+ requestorId);
}
req->setContext(id);
const Cycles responseLimit;
- MasterID masterId;
+ RequestorID requestorId;
void completeRequest(PacketPtr pkt);
percentReads(p->percent_reads),
percentFunctional(p->percent_functional),
percentUncacheable(p->percent_uncacheable),
- masterId(p->system->getMasterId(this)),
+ requestorId(p->system->getRequestorId(this)),
blockSize(p->system->cacheLineSize()),
blockAddrMask(blockSize - 1),
progressInterval(p->progress_interval),
bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
!uncacheable;
- RequestPtr req = std::make_shared<Request>(paddr, 1, flags, masterId);
+ RequestPtr req = std::make_shared<Request>(paddr, 1, flags, requestorId);
req->setContext(id);
outstandingAddrs.insert(paddr);
const unsigned percentUncacheable;
/** Request id for all generated traffic */
- MasterID masterId;
+ RequestorID requestorId;
unsigned int id;
// Prefetches are assumed to be 0 sized
RequestPtr req = std::make_shared<Request>(
- m_address, 0, flags, m_tester_ptr->masterId());
+ m_address, 0, flags, m_tester_ptr->requestorId());
req->setPC(m_pc);
req->setContext(index);
Request::Flags flags;
RequestPtr req = std::make_shared<Request>(
- m_address, CHECK_SIZE, flags, m_tester_ptr->masterId());
+ m_address, CHECK_SIZE, flags, m_tester_ptr->requestorId());
req->setPC(m_pc);
Packet::Command cmd;
// Stores are assumed to be 1 byte-sized
RequestPtr req = std::make_shared<Request>(
- writeAddr, 1, flags, m_tester_ptr->masterId());
+ writeAddr, 1, flags, m_tester_ptr->requestorId());
req->setPC(m_pc);
req->setContext(index);
// Checks are sized depending on the number of bytes written
RequestPtr req = std::make_shared<Request>(
- m_address, CHECK_SIZE, flags, m_tester_ptr->masterId());
+ m_address, CHECK_SIZE, flags, m_tester_ptr->requestorId());
req->setPC(m_pc);
req->setContext(index);
: ClockedObject(p),
checkStartEvent([this]{ wakeup(); }, "RubyTester tick",
false, Event::CPU_Tick_Pri),
- _masterId(p->system->getMasterId(this)),
+ _requestorId(p->system->getRequestorId(this)),
m_checkTable_ptr(nullptr),
m_num_cpus(p->num_cpus),
m_checks_to_complete(p->checks_to_complete),
void print(std::ostream& out) const;
bool getCheckFlush() { return m_check_flush; }
- MasterID masterId() { return _masterId; }
+ RequestorID requestorId() { return _requestorId; }
protected:
EventFunctionWrapper checkStartEvent;
- MasterID _masterId;
+ RequestorID _requestorId;
private:
void hitCallback(NodeID proc, SubBlock* data);
type = 'RubyTester'
cxx_header = "cpu/testers/rubytest/RubyTester.hh"
num_cpus = Param.Int("number of cpus / RubyPorts")
- cpuInstDataPort = VectorMasterPort("cpu combo ports to inst & data caches")
- cpuInstPort = VectorMasterPort("cpu ports to only inst caches")
- cpuDataPort = VectorMasterPort("cpu ports to only data caches")
+ cpuInstDataPort = VectorRequestPort("cpu combo ports to inst & "
+ "data caches")
+ cpuInstPort = VectorRequestPort("cpu ports to only inst caches")
+ cpuDataPort = VectorRequestPort("cpu ports to only data caches")
checks_to_complete = Param.Int(100, "checks to complete")
deadlock_threshold = Param.Int(50000, "how often to check for deadlock")
wakeup_frequency = Param.Int(10, "number of cycles between wakeups")
# generated (Random, Linear, Trace etc)
class StreamGenType(ScopedEnum): vals = [ 'none', 'fixed', 'random' ]
-# The traffic generator is a master module that generates stimuli for
+# The traffic generator is a requestor module that generates stimuli for
# the memory system, based on a collection of simple behaviours that
# are either probabilistic or based on traces. It can be used stand
# alone for creating test cases for interconnect and memory
cxx_header = "cpu/testers/traffic_gen/traffic_gen.hh"
# Port used for sending requests and receiving responses
- port = RequestPort("Master port")
+ port = RequestPort("This port sends requests and receives responses")
# System used to determine the mode of the memory system
system = Param.System(Parent.any, "System this generator is part of")
def connectCachedPorts(self, bus):
if hasattr(self, '_cached_ports') and (len(self._cached_ports) > 0):
for p in self._cached_ports:
- exec('self.%s = bus.slave' % p)
+ exec('self.%s = bus.cpu_side_ports' % p)
else:
- self.port = bus.slave
+ self.port = bus.cpu_side_ports
def connectAllPorts(self, cached_bus, uncached_bus = None):
self.connectCachedPorts(cached_bus)
retryPktTick(0), blockedWaitingResp(false),
updateEvent([this]{ update(); }, name()),
stats(this),
- masterID(system->getMasterId(this)),
+ requestorId(system->getRequestorId(this)),
streamGenerator(StreamGen::create(p))
{
}
std::shared_ptr<BaseGen>
BaseTrafficGen::createIdle(Tick duration)
{
- return std::shared_ptr<BaseGen>(new IdleGen(*this, masterID, duration));
+ return std::shared_ptr<BaseGen>(new IdleGen(*this, requestorId,
+ duration));
}
std::shared_ptr<BaseGen>
BaseTrafficGen::createExit(Tick duration)
{
- return std::shared_ptr<BaseGen>(new ExitGen(*this, masterID, duration));
+ return std::shared_ptr<BaseGen>(new ExitGen(*this, requestorId,
+ duration));
}
std::shared_ptr<BaseGen>
Tick min_period, Tick max_period,
uint8_t read_percent, Addr data_limit)
{
- return std::shared_ptr<BaseGen>(new LinearGen(*this, masterID,
+ return std::shared_ptr<BaseGen>(new LinearGen(*this, requestorId,
duration, start_addr,
end_addr, blocksize,
system->cacheLineSize(),
Tick min_period, Tick max_period,
uint8_t read_percent, Addr data_limit)
{
- return std::shared_ptr<BaseGen>(new RandomGen(*this, masterID,
+ return std::shared_ptr<BaseGen>(new RandomGen(*this, requestorId,
duration, start_addr,
end_addr, blocksize,
system->cacheLineSize(),
Enums::AddrMap addr_mapping,
unsigned int nbr_of_ranks)
{
- return std::shared_ptr<BaseGen>(new DramGen(*this, masterID,
+ return std::shared_ptr<BaseGen>(new DramGen(*this, requestorId,
duration, start_addr,
end_addr, blocksize,
system->cacheLineSize(),
unsigned int nbr_of_ranks,
unsigned int max_seq_count_per_rank)
{
- return std::shared_ptr<BaseGen>(new DramRotGen(*this, masterID,
+ return std::shared_ptr<BaseGen>(new DramRotGen(*this, requestorId,
duration, start_addr,
end_addr, blocksize,
system->cacheLineSize(),
unsigned int nbr_of_ranks_nvm,
uint8_t nvm_percent)
{
- return std::shared_ptr<BaseGen>(new HybridGen(*this, masterID,
+ return std::shared_ptr<BaseGen>(new HybridGen(*this, requestorId,
duration, start_addr_dram,
end_addr_dram, blocksize_dram,
start_addr_nvm,
Enums::AddrMap addr_mapping,
unsigned int nbr_of_ranks)
{
- return std::shared_ptr<BaseGen>(new NvmGen(*this, masterID,
+ return std::shared_ptr<BaseGen>(new NvmGen(*this, requestorId,
duration, start_addr,
end_addr, blocksize,
system->cacheLineSize(),
{
#if HAVE_PROTOBUF
return std::shared_ptr<BaseGen>(
- new TraceGen(*this, masterID, duration, trace_file, addr_offset));
+ new TraceGen(*this, requestorId, duration, trace_file, addr_offset));
#else
panic("Can't instantiate trace generation without Protobuf support!\n");
#endif
struct BaseTrafficGenParams;
/**
- * The traffic generator is a master module that generates stimuli for
+ * The traffic generator is a module that generates stimuli for
* the memory system, based on a collection of simple generator
* behaviours that are either probabilistic or based on traces. It can
* be used stand alone for creating test cases for interconnect and
const int maxOutstandingReqs;
- /** Master port specialisation for the traffic generator */
+ /** Request port specialisation for the traffic generator */
class TrafficGenPort : public RequestPort
{
public:
*/
void update();
- /** The instance of master port used by the traffic generator. */
+ /** The instance of request port used by the traffic generator. */
TrafficGenPort port;
/** Packet waiting to be sent. */
virtual std::shared_ptr<BaseGen> nextGenerator() = 0;
/**
- * MasterID used in generated requests.
+ * RequestorID used in generated requests.
*/
- const MasterID masterID;
+ const RequestorID requestorId;
/** Currently active generator */
std::shared_ptr<BaseGen> activeGenerator;
#include "debug/TrafficGen.hh"
#include "sim/system.hh"
-BaseGen::BaseGen(SimObject &obj, MasterID master_id, Tick _duration)
- : _name(obj.name()), masterID(master_id),
+BaseGen::BaseGen(SimObject &obj, RequestorID requestor_id, Tick _duration)
+ : _name(obj.name()), requestorId(requestor_id),
duration(_duration)
{
}
Request::FlagsType flags)
{
// Create new request
- RequestPtr req = std::make_shared<Request>(addr, size, flags, masterID);
+ RequestPtr req = std::make_shared<Request>(addr, size, flags,
+ requestorId);
// Dummy PC to have PC-based prefetchers latch on; get entropy into higher
// bits
- req->setPC(((Addr)masterID) << 2);
+ req->setPC(((Addr)requestorId) << 2);
// Embed it in a packet
PacketPtr pkt = new Packet(req, cmd);
pkt->dataDynamic(pkt_data);
if (cmd.isWrite()) {
- std::fill_n(pkt_data, req->getSize(), (uint8_t)masterID);
+ std::fill_n(pkt_data, req->getSize(), (uint8_t)requestorId);
}
return pkt;
}
StochasticGen::StochasticGen(SimObject &obj,
- MasterID master_id, Tick _duration,
+ RequestorID requestor_id, Tick _duration,
Addr start_addr, Addr end_addr,
Addr _blocksize, Addr cacheline_size,
Tick min_period, Tick max_period,
uint8_t read_percent, Addr data_limit)
- : BaseGen(obj, master_id, _duration),
+ : BaseGen(obj, requestor_id, _duration),
startAddr(start_addr), endAddr(end_addr),
blocksize(_blocksize), cacheLineSize(cacheline_size),
minPeriod(min_period), maxPeriod(max_period),
/** Name to use for status and debug printing */
const std::string _name;
- /** The MasterID used for generating requests */
- const MasterID masterID;
+ /** The RequestorID used for generating requests */
+ const RequestorID requestorId;
/**
* Generate a new request and associated packet
* Create a base generator.
*
* @param obj simobject owning the generator
- * @param master_id MasterID set on each request
+ * @param requestor_id RequestorID set on each request
* @param _duration duration of this state before transitioning
*/
- BaseGen(SimObject &obj, MasterID master_id, Tick _duration);
+ BaseGen(SimObject &obj, RequestorID requestor_id, Tick _duration);
virtual ~BaseGen() { }
{
public:
StochasticGen(SimObject &obj,
- MasterID master_id, Tick _duration,
+ RequestorID requestor_id, Tick _duration,
Addr start_addr, Addr end_addr,
Addr _blocksize, Addr cacheline_size,
Tick min_period, Tick max_period,
#include "enums/AddrMap.hh"
DramGen::DramGen(SimObject &obj,
- MasterID master_id, Tick _duration,
+ RequestorID requestor_id, Tick _duration,
Addr start_addr, Addr end_addr,
Addr _blocksize, Addr cacheline_size,
Tick min_period, Tick max_period,
unsigned int nbr_of_banks_util,
Enums::AddrMap addr_mapping,
unsigned int nbr_of_ranks)
- : RandomGen(obj, master_id, _duration, start_addr, end_addr,
+ : RandomGen(obj, requestor_id, _duration, start_addr, end_addr,
_blocksize, cacheline_size, min_period, max_period,
read_percent, data_limit),
numSeqPkts(num_seq_pkts), countNumSeqPkts(0), addr(0),
* Create a DRAM address sequence generator.
*
* @param obj SimObject owning this sequence generator
- * @param master_id MasterID related to the memory requests
+ * @param requestor_id RequestorID related to the memory requests
* @param _duration duration of this state before transitioning
* @param start_addr Start address
* @param end_addr End address
* assumes single channel system
*/
DramGen(SimObject &obj,
- MasterID master_id, Tick _duration,
+ RequestorID requestor_id, Tick _duration,
Addr start_addr, Addr end_addr,
Addr _blocksize, Addr cacheline_size,
Tick min_period, Tick max_period,
* 3) Ranks per channel
*
* @param obj SimObject owning this sequence generator
- * @param master_id MasterID related to the memory requests
+ * @param requestor_id RequestorID related to the memory requests
* @param _duration duration of this state before transitioning
* @param start_addr Start address
* @param end_addr End address
* @param addr_mapping Address mapping to be used,
* assumes single channel system
*/
- DramRotGen(SimObject &obj, MasterID master_id, Tick _duration,
+ DramRotGen(SimObject &obj, RequestorID requestor_id, Tick _duration,
Addr start_addr, Addr end_addr,
Addr _blocksize, Addr cacheline_size,
Tick min_period, Tick max_period,
Enums::AddrMap addr_mapping,
unsigned int nbr_of_ranks,
unsigned int max_seq_count_per_rank)
- : DramGen(obj, master_id, _duration, start_addr, end_addr,
+ : DramGen(obj, requestor_id, _duration, start_addr, end_addr,
_blocksize, cacheline_size, min_period, max_period,
read_percent, data_limit,
num_seq_pkts, page_size, nbr_of_banks_DRAM,
public:
- ExitGen(SimObject &obj, MasterID master_id, Tick _duration)
- : BaseGen(obj, master_id, _duration)
+ ExitGen(SimObject &obj, RequestorID requestor_id, Tick _duration)
+ : BaseGen(obj, requestor_id, _duration)
{ }
void enter();
using namespace std;
HybridGen::HybridGen(SimObject &obj,
- MasterID master_id, Tick _duration,
+ RequestorID requestor_id, Tick _duration,
Addr start_addr_dram, Addr end_addr_dram,
Addr blocksize_dram,
Addr start_addr_nvm, Addr end_addr_nvm,
unsigned int nbr_of_ranks_dram,
unsigned int nbr_of_ranks_nvm,
uint8_t nvm_percent)
- : BaseGen(obj, master_id, _duration),
+ : BaseGen(obj, requestor_id, _duration),
startAddrDram(start_addr_dram),
endAddrDram(end_addr_dram),
blocksizeDram(blocksize_dram),
* Create a hybrid DRAM + NVM address sequence generator.
*
* @param obj SimObject owning this sequence generator
- * @param master_id MasterID related to the memory requests
+ * @param requestor_id RequestorID related to the memory requests
* @param _duration duration of this state before transitioning
* @param start_addr_dram Start address for DRAM range
* @param end_addr_dram End address for DRAM range
* @param nvm_percent Percentage of traffic going to NVM
*/
HybridGen(SimObject &obj,
- MasterID master_id, Tick _duration,
+ RequestorID requestor_id, Tick _duration,
Addr start_addr_dram, Addr end_addr_dram,
Addr blocksize_dram,
Addr start_addr_nvm, Addr end_addr_nvm,
public:
- IdleGen(SimObject &obj, MasterID master_id, Tick _duration)
- : BaseGen(obj, master_id, _duration)
+ IdleGen(SimObject &obj, RequestorID requestor_id, Tick _duration)
+ : BaseGen(obj, requestor_id, _duration)
{ }
void enter();
* time.
*
* @param obj SimObject owning this sequence generator
- * @param master_id MasterID related to the memory requests
+ * @param requestor_id RequestorID related to the memory requests
* @param _duration duration of this state before transitioning
* @param start_addr Start address
* @param end_addr End address
* @param data_limit Upper limit on how much data to read/write
*/
LinearGen(SimObject &obj,
- MasterID master_id, Tick _duration,
+ RequestorID requestor_id, Tick _duration,
Addr start_addr, Addr end_addr,
Addr _blocksize, Addr cacheline_size,
Tick min_period, Tick max_period,
uint8_t read_percent, Addr data_limit)
- : StochasticGen(obj, master_id, _duration, start_addr, end_addr,
+ : StochasticGen(obj, requestor_id, _duration, start_addr, end_addr,
_blocksize, cacheline_size, min_period, max_period,
read_percent, data_limit),
nextAddr(0),
#include "enums/AddrMap.hh"
NvmGen::NvmGen(SimObject &obj,
- MasterID master_id, Tick _duration,
+ RequestorID requestor_id, Tick _duration,
Addr start_addr, Addr end_addr,
Addr _blocksize, Addr cacheline_size,
Tick min_period, Tick max_period,
unsigned int nbr_of_banks_util,
Enums::AddrMap addr_mapping,
unsigned int nbr_of_ranks)
- : RandomGen(obj, master_id, _duration, start_addr, end_addr,
+ : RandomGen(obj, requestor_id, _duration, start_addr, end_addr,
_blocksize, cacheline_size, min_period, max_period,
read_percent, data_limit),
numSeqPkts(num_seq_pkts), countNumSeqPkts(0), addr(0),
* Create a NVM address sequence generator.
*
* @param obj SimObject owning this sequence generator
- * @param master_id MasterID related to the memory requests
+ * @param requestor_id RequestorID related to the memory requests
* @param _duration duration of this state before transitioning
* @param start_addr Start address
* @param end_addr End address
* assumes single channel system
*/
NvmGen(SimObject &obj,
- MasterID master_id, Tick _duration,
+ RequestorID requestor_id, Tick _duration,
Addr start_addr, Addr end_addr,
Addr _blocksize, Addr cacheline_size,
Tick min_period, Tick max_period,
*
* @param gen Traffic generator owning this sequence generator
* @param _duration duration of this state before transitioning
+ * @param requestor_id RequestorID related to the memory requests
* @param start_addr Start address
* @param end_addr End address
* @param _blocksize Size used for transactions injected
+ * @param cacheline_size cache line size in the system
* @param min_period Lower limit of random inter-transaction time
* @param max_period Upper limit of random inter-transaction time
* @param read_percent Percent of transactions that are reads
* @param data_limit Upper limit on how much data to read/write
*/
RandomGen(SimObject &obj,
- MasterID master_id, Tick _duration,
+ RequestorID requestor_id, Tick _duration,
Addr start_addr, Addr end_addr,
Addr _blocksize, Addr cacheline_size,
Tick min_period, Tick max_period,
uint8_t read_percent, Addr data_limit)
- : StochasticGen(obj, master_id, _duration, start_addr, end_addr,
+ : StochasticGen(obj, requestor_id, _duration, start_addr, end_addr,
_blocksize, cacheline_size, min_period, max_period,
read_percent, data_limit),
dataManipulated(0)
* Create a trace generator.
*
* @param obj SimObject owning this sequence generator
- * @param master_id MasterID related to the memory requests
+ * @param requestor_id RequestorID related to the memory requests
* @param _duration duration of this state before transitioning
* @param trace_file File to read the transactions from
* @param addr_offset Positive offset to add to trace address
*/
- TraceGen(SimObject &obj, MasterID master_id, Tick _duration,
+ TraceGen(SimObject &obj, RequestorID requestor_id, Tick _duration,
const std::string& trace_file, Addr addr_offset)
- : BaseGen(obj, master_id, _duration),
+ : BaseGen(obj, requestor_id, _duration),
trace(trace_file),
tickOffset(0),
addrOffset(addr_offset),
struct TrafficGenParams;
/**
- * The traffic generator is a master module that generates stimuli for
+ * The traffic generator is a module that generates stimuli for
* the memory system, based on a collection of simple behaviours that
* are either probabilistic or based on traces. It can be used stand
* alone for creating test cases for interconnect and memory
* components that are not yet modelled in detail, e.g. a video engine
* or baseband subsystem in an SoC.
*
- * The traffic generator has a single master port that is used to send
+ * The traffic generator has a single request port that is used to send
* requests, independent of the specific behaviour. The behaviour of
* the traffic generator is specified in a configuration file, and this
* file describes a state transition graph where each state is a
: BaseCPU(params),
icachePort(this),
dcachePort(this),
- instMasterID(params->system->getMasterId(this, "inst")),
- dataMasterID(params->system->getMasterId(this, "data")),
+ instRequestorID(params->system->getRequestorId(this, "inst")),
+ dataRequestorID(params->system->getRequestorId(this, "data")),
instTraceFile(params->instTraceFile),
dataTraceFile(params->dataTraceFile),
- icacheGen(*this, "iside", icachePort, instMasterID, instTraceFile),
- dcacheGen(*this, "dside", dcachePort, dataMasterID, dataTraceFile,
+ icacheGen(*this, ".iside", icachePort, instRequestorID, instTraceFile),
+ dcacheGen(*this, ".dside", dcachePort, dataRequestorID, dataTraceFile,
params),
icacheNextEvent([this]{ schedIcacheNext(); }, name()),
dcacheNextEvent([this]{ schedDcacheNext(); }, name()),
// Create a request and the packet containing request
auto req = std::make_shared<Request>(
- node_ptr->physAddr, node_ptr->size, node_ptr->flags, masterID);
+ node_ptr->physAddr, node_ptr->size, node_ptr->flags, requestorId);
req->setReqInstSeqNum(node_ptr->seqNum);
// If this is not done it triggers assert in L1 cache for invalid contextId
// of the request.
if (node_ptr->virtAddr != 0) {
req->setVirt(node_ptr->virtAddr, node_ptr->size,
- node_ptr->flags, masterID, node_ptr->pc);
+ node_ptr->flags, requestorId, node_ptr->pc);
req->setPaddr(node_ptr->physAddr);
req->setReqInstSeqNum(node_ptr->seqNum);
}
}
pkt->dataDynamic(pkt_data);
- // Call MasterPort method to send a timing request for this packet
+ // Call RequestPort method to send a timing request for this packet
bool success = port.sendTimingReq(pkt);
++elasticStats.numSendAttempted;
{
// Create new request
- auto req = std::make_shared<Request>(addr, size, flags, masterID);
+ auto req = std::make_shared<Request>(addr, size, flags, requestorId);
req->setPC(pc);
// If this is not done it triggers assert in L1 cache for invalid contextId
memset(pkt_data, 0xA, req->getSize());
}
- // Call MasterPort method to send a timing request for this packet
+ // Call RequestPort method to send a timing request for this packet
bool success = port.sendTimingReq(pkt);
if (!success) {
// If it fails, save the packet to retry when a retry is signalled by
* same trace is used for playback on different memory sub-systems.
*
* The TraceCPU inherits from BaseCPU so some virtual methods need to be
- * defined. It has two port subclasses inherited from MasterPort for
+ * defined. It has two port subclasses inherited from RequestPort for
* instruction and data ports. It issues the memory requests deducing the
* timing from the trace and without performing real execution of micro-ops. As
* soon as the last dependency for an instruction is complete, its
/** Port to connect to L1 data cache. */
DcachePort dcachePort;
- /** Master id for instruction read requests. */
- const MasterID instMasterID;
+ /** Requestor id for instruction read requests. */
+ const RequestorID instRequestorID;
- /** Master id for data read and write requests. */
- const MasterID dataMasterID;
+ /** Requestor id for data read and write requests. */
+ const RequestorID dataRequestorID;
/** File names for input instruction and data traces. */
std::string instTraceFile, dataTraceFile;
public:
/* Constructor */
FixedRetryGen(TraceCPU& _owner, const std::string& _name,
- RequestPort& _port, MasterID master_id,
+ RequestPort& _port, RequestorID requestor_id,
const std::string& trace_file)
: owner(_owner),
port(_port),
- masterID(master_id),
+ requestorId(requestor_id),
trace(trace_file),
genName(owner.name() + ".fixedretry." + _name),
retryPkt(nullptr),
/** Reference of the port to be used to issue memory requests. */
RequestPort& port;
- /** MasterID used for the requests being sent. */
- const MasterID masterID;
+ /** RequestorID used for the requests being sent. */
+ const RequestorID requestorId;
/** Input stream used for reading the input trace file. */
InputStream trace;
public:
/* Constructor */
ElasticDataGen(TraceCPU& _owner, const std::string& _name,
- RequestPort& _port, MasterID master_id,
+ RequestPort& _port, RequestorID requestor_id,
const std::string& trace_file, TraceCPUParams *params)
: owner(_owner),
port(_port),
- masterID(master_id),
+ requestorId(requestor_id),
trace(trace_file, 1.0 / params->freqMultiplier),
genName(owner.name() + ".elastic." + _name),
retryPkt(nullptr),
/** Reference of the port to be used to issue memory requests. */
RequestPort& port;
- /** MasterID used for the requests being sent. */
- const MasterID masterID;
+ /** RequestorID used for the requests being sent. */
+ const RequestorID requestorId;
/** Input stream used for reading the input trace file. */
InputStream trace;
type = 'AmbaDmaDevice'
abstract = True
cxx_header = "dev/arm/amba_device.hh"
- pio_addr = Param.Addr("Address for AMBA slave interface")
- pio_latency = Param.Latency("10ns", "Time between action and write/read result by AMBA DMA Device")
+ pio_addr = Param.Addr("Address for AMBA responder interface")
+ pio_latency = Param.Latency("10ns", "Time between action and write/read"
+ "result by AMBA DMA Device")
interrupt = Param.ArmInterruptPin("Interrupt that connects to GIC")
amba_id = Param.UInt32("ID of AMBA device for kernel detection")
def _attach_memory(self, mem, bus, mem_ports=None):
if hasattr(mem, "port"):
if mem_ports is None:
- mem.port = bus.master
+ mem.port = bus.mem_side_ports
else:
mem_ports.append(mem.port)
def _attach_device(self, device, bus, dma_ports=None):
if hasattr(device, "pio"):
- device.pio = bus.master
+ device.pio = bus.mem_side_ports
if hasattr(device, "dma"):
if dma_ports is None:
- device.dma = bus.slave
+ device.dma = bus.cpu_side_ports
else:
dma_ports.append(device.dma)
"""
Instantiate a single SMMU and attach a group of client devices to it.
The devices' dma port is wired to the SMMU and the SMMU's dma port
- (master) is attached to the bus. In order to make it work, the list
- of clients shouldn't contain any device part of the _off_chip_devices
- or _on_chip_devices.
+ is attached to the bus. In order to make it work, the list of clients
+ shouldn't contain any device part of the _off_chip_devices or
+ _on_chip_devices.
This method should be called only once.
Parameters:
devices (list): List of devices which will be using the SMMU
- bus (Bus): The bus downstream of the SMMU. Its slave port will
- receive memory requests from the SMMU, and its master
+ bus (Bus): The bus downstream of the SMMU. Its response port will
+ receive memory requests from the SMMU, and its request
port will forward accesses to the memory mapped devices
"""
if hasattr(self, 'smmu'):
self.smmu = SMMUv3(reg_map=AddrRange(0x2b400000, size=0x00020000))
- self.smmu.master = bus.slave
- self.smmu.control = bus.master
+ self.smmu.request = bus.cpu_side_ports
+ self.smmu.control = bus.mem_side_ports
dma_ports = []
for dev in devices:
Source('smmu_v3_ports.cc');
Source('smmu_v3_proc.cc');
Source('smmu_v3_ptops.cc');
- Source('smmu_v3_slaveifc.cc');
+ Source('smmu_v3_deviceifc.cc');
Source('smmu_v3_transl.cc');
Source('timer_sp804.cc')
Source('watchdog_sp805.cc')
from m5.SimObject import *
from m5.objects.ClockedObject import ClockedObject
-class SMMUv3SlaveInterface(ClockedObject):
- type = 'SMMUv3SlaveInterface'
- cxx_header = 'dev/arm/smmu_v3_slaveifc.hh'
-
- slave = ResponsePort('Device port')
- ats_master = RequestPort('ATS master port')
- ats_slave = ResponsePort('ATS slave port')
+class SMMUv3DeviceInterface(ClockedObject):
+ type = 'SMMUv3DeviceInterface'
+ cxx_header = 'dev/arm/smmu_v3_deviceifc.hh'
+
+ device_port = ResponsePort('Device port')
+ slave = DeprecatedParam(device_port,
+ '`slave` is now called `device_port`')
+ ats_mem_side_port = RequestPort('ATS mem side port,'
+ 'sends requests and receives responses')
+ ats_master = DeprecatedParam(ats_mem_side_port,
+ '`ats_master` is now called `ats_mem_side_port`')
+ ats_dev_side_port = ResponsePort('ATS dev_side_port,'
+ 'sends responses and receives requests')
+ ats_slave = DeprecatedParam(ats_dev_side_port,
+ '`ats_slave` is now called `ats_dev_side_port`')
port_width = Param.Unsigned(16, 'Port width in bytes (= 1 beat)')
wrbuf_slots = Param.Unsigned(16, 'Write buffer size (in beats)')
type = 'SMMUv3'
cxx_header = 'dev/arm/smmu_v3.hh'
- master = RequestPort('Master port')
- master_walker = RequestPort(
- 'Master port for SMMU initiated HWTW requests (optional)')
+ request = RequestPort('Request port')
+ walker = RequestPort(
+ 'Request port for SMMU initiated HWTW requests (optional)')
control = ResponsePort(
'Control port for accessing memory-mapped registers')
sample_period = Param.Clock('10us', 'Stats sample period')
reg_map = Param.AddrRange('Address range for control registers')
system = Param.System(Parent.any, "System this device is part of")
- slave_interfaces = VectorParam.SMMUv3SlaveInterface([], "Slave interfaces")
+ device_interfaces = VectorParam.SMMUv3DeviceInterface([],
+ "Responder interfaces")
- # SLAVE INTERFACE<->SMMU link parameters
+ # RESPONDER INTERFACE<->SMMU link parameters
ifc_smmu_lat = Param.Cycles(8, 'IFC to SMMU communication latency')
smmu_ifc_lat = Param.Cycles(8, 'SMMU to IFC communication latency')
xlate_slots = Param.Unsigned(64, 'SMMU translation slots')
ptw_slots = Param.Unsigned(16, 'SMMU page table walk slots')
- master_port_width = Param.Unsigned(16,
- 'Master port width in bytes (= 1 beat)')
+ request_port_width = Param.Unsigned(16,
+ 'Request port width in bytes (= 1 beat)')
tlb_entries = Param.Unsigned(2048, 'TLB size (entries)')
tlb_assoc = Param.Unsigned(4, 'TLB associativity (0=full)')
def connect(self, device):
"""
- Helper method used to connect the SMMU. The master could
+ Helper method used to connect the SMMU. The requestor could
be either a dma port (if the SMMU is attached directly to a
- dma device), or to a master port (this is the case where the SMMU
+ dma device), or to a request port (this is the case where the SMMU
is attached to a bridge).
"""
- slave_interface = SMMUv3SlaveInterface()
+ device_interface = SMMUv3DeviceInterface()
- if hasattr(device, "master"):
- slave_interface.slave = device.master
+ if hasattr(device, "request_port"):
+ device_interface.device_port = device.request_port
elif hasattr(device, "dma"):
- slave_interface.slave = device.dma
+ device_interface.device_port = device.dma
else:
print("Unable to attach SMMUv3\n")
sys.exit(1)
- self.slave_interfaces.append(slave_interface)
+ self.device_interfaces.append(device_interface)
# Storing a reference to the smmu to be used when generating
# the binding in the device DTB.
class UFSHostDevice(DmaDevice):
type = 'UFSHostDevice'
cxx_header = "dev/arm/ufs_device.hh"
- pio_addr = Param.Addr("Address for SCSI configuration slave interface")
+ pio_addr = Param.Addr("Address for SCSI configuration responder interface")
pio_latency = Param.Latency("10ns", "Time between action and write/read \
result by AMBA DMA Device")
gic = Param.BaseGic(Parent.any, "Gic to use for interrupting")
namespace AMBA
{
-typedef MasterID OrderID;
+typedef RequestorID OrderID;
static OrderID
orderId(PacketPtr pkt)
{
- return pkt->req->masterId();
+ return pkt->req->requestorId();
}
} // namespace AMBA
a.type = ItsActionType::SEND_REQ;
RequestPtr req = std::make_shared<Request>(
- addr, size, 0, its.masterId);
+ addr, size, 0, its.requestorId);
req->taskId(ContextSwitchTaskId::DMA);
a.type = ItsActionType::SEND_REQ;
RequestPtr req = std::make_shared<Request>(
- addr, size, 0, its.masterId);
+ addr, size, 0, its.requestorId);
req->taskId(ContextSwitchTaskId::DMA);
gitsCbaser(0), gitsCreadr(0),
gitsCwriter(0), gitsIidr(0),
tableBases(NUM_BASER_REGS, 0),
- masterId(params->system->getMasterId(this)),
+ requestorId(params->system->getRequestorId(this)),
gic(nullptr),
commandEvent([this] { checkCommandQueue(); }, name()),
pendingCommands(false),
private:
std::queue<ItsAction> packetsToRetry;
- uint32_t masterId;
+ uint32_t requestorId;
Gicv3 *gic;
EventFunctionWrapper commandEvent;
SMMUv3::SMMUv3(SMMUv3Params *params) :
ClockedObject(params),
system(*params->system),
- masterId(params->system->getMasterId(this)),
- masterPort(name() + ".master", *this),
- masterTableWalkPort(name() + ".master_walker", *this),
+ requestorId(params->system->getRequestorId(this)),
+ requestPort(name() + ".request", *this),
+ tableWalkPort(name() + ".walker", *this),
controlPort(name() + ".control", *this, params->reg_map),
tlb(params->tlb_entries, params->tlb_assoc, params->tlb_policy),
configCache(params->cfg_entries, params->cfg_assoc, params->cfg_policy),
walkCacheNonfinalEnable(params->wc_nonfinal_enable),
walkCacheS1Levels(params->wc_s1_levels),
walkCacheS2Levels(params->wc_s2_levels),
- masterPortWidth(params->master_port_width),
+ requestPortWidth(params->request_port_width),
tlbSem(params->tlb_slots),
ifcSmmuSem(1),
smmuIfcSem(1),
configSem(params->cfg_slots),
ipaSem(params->ipa_slots),
walkSem(params->walk_slots),
- masterPortSem(1),
+ requestPortSem(1),
transSem(params->xlate_slots),
ptwSem(params->ptw_slots),
cycleSem(1),
configLat(params->cfg_lat),
ipaLat(params->ipa_lat),
walkLat(params->walk_lat),
- slaveInterfaces(params->slave_interfaces),
+ deviceInterfaces(params->device_interfaces),
commandExecutor(name() + ".cmd_exec", *this),
regsMap(params->reg_map),
processCommandsEvent(this)
// store an unallowed values or if the are configuration conflicts.
warn("SMMUv3 IDx register values unchecked\n");
- for (auto ifc : slaveInterfaces)
+ for (auto ifc : deviceInterfaces)
ifc->setSMMU(this);
}
bool
-SMMUv3::masterRecvTimingResp(PacketPtr pkt)
+SMMUv3::recvTimingResp(PacketPtr pkt)
{
- DPRINTF(SMMUv3, "[t] master resp addr=%#x size=%#x\n",
+ DPRINTF(SMMUv3, "[t] requestor resp addr=%#x size=%#x\n",
pkt->getAddr(), pkt->getSize());
// @todo: We need to pay for this and not just zero it out
}
void
-SMMUv3::masterRecvReqRetry()
+SMMUv3::recvReqRetry()
{
assert(!packetsToRetry.empty());
assert(a.type==ACTION_SEND_REQ || a.type==ACTION_SEND_REQ_FINAL);
- DPRINTF(SMMUv3, "[t] master retr addr=%#x size=%#x\n",
+ DPRINTF(SMMUv3, "[t] requestor retr addr=%#x size=%#x\n",
a.pkt->getAddr(), a.pkt->getSize());
- if (!masterPort.sendTimingReq(a.pkt))
+ if (!requestPort.sendTimingReq(a.pkt))
break;
packetsToRetry.pop();
/*
* ACTION_SEND_REQ_FINAL means that we have just forwarded the packet
- * on the master interface; this means that we no longer hold on to
+ * on the requestor interface; this means that we no longer hold on to
* that transaction and therefore can accept a new one.
- * If the slave port was stalled then unstall it (send retry).
+ * If the response port was stalled then unstall it (send retry).
*/
if (a.type == ACTION_SEND_REQ_FINAL)
- scheduleSlaveRetries();
+ scheduleDeviceRetries();
}
}
bool
-SMMUv3::masterTableWalkRecvTimingResp(PacketPtr pkt)
+SMMUv3::tableWalkRecvTimingResp(PacketPtr pkt)
{
- DPRINTF(SMMUv3, "[t] master HWTW resp addr=%#x size=%#x\n",
+ DPRINTF(SMMUv3, "[t] requestor HWTW resp addr=%#x size=%#x\n",
pkt->getAddr(), pkt->getSize());
// @todo: We need to pay for this and not just zero it out
}
void
-SMMUv3::masterTableWalkRecvReqRetry()
+SMMUv3::tableWalkRecvReqRetry()
{
assert(tableWalkPortEnable);
assert(!packetsTableWalkToRetry.empty());
assert(a.type==ACTION_SEND_REQ);
- DPRINTF(SMMUv3, "[t] master HWTW retr addr=%#x size=%#x\n",
+ DPRINTF(SMMUv3, "[t] requestor HWTW retr addr=%#x size=%#x\n",
a.pkt->getAddr(), a.pkt->getSize());
- if (!masterTableWalkPort.sendTimingReq(a.pkt))
+ if (!tableWalkPort.sendTimingReq(a.pkt))
break;
packetsTableWalkToRetry.pop();
}
void
-SMMUv3::scheduleSlaveRetries()
+SMMUv3::scheduleDeviceRetries()
{
- for (auto ifc : slaveInterfaces) {
+ for (auto ifc : deviceInterfaces) {
ifc->scheduleDeviceRetry();
}
}
switch (action.type) {
case ACTION_SEND_REQ:
- // Send an MMU initiated request on the table walk port if it is
- // enabled. Otherwise, fall through and handle same as the final
- // ACTION_SEND_REQ_FINAL request.
+ // Send an MMU initiated request on the table walk port if
+ // it is enabled. Otherwise, fall through and handle same
+ // as the final ACTION_SEND_REQ_FINAL request.
if (tableWalkPortEnable) {
- delay += masterTableWalkPort.sendAtomic(action.pkt);
+ delay += tableWalkPort.sendAtomic(action.pkt);
pkt = action.pkt;
break;
}
M5_FALLTHROUGH;
case ACTION_SEND_REQ_FINAL:
- delay += masterPort.sendAtomic(action.pkt);
+ delay += requestPort.sendAtomic(action.pkt);
pkt = action.pkt;
break;
if (tableWalkPortEnable) {
action.pkt->pushSenderState(proc);
- DPRINTF(SMMUv3, "[t] master HWTW req addr=%#x size=%#x\n",
+ DPRINTF(SMMUv3, "[t] requestor HWTW req addr=%#x size=%#x\n",
action.pkt->getAddr(), action.pkt->getSize());
if (packetsTableWalkToRetry.empty()
- && masterTableWalkPort.sendTimingReq(action.pkt)) {
- scheduleSlaveRetries();
+ && tableWalkPort.sendTimingReq(action.pkt)) {
+ scheduleDeviceRetries();
} else {
- DPRINTF(SMMUv3, "[t] master HWTW req needs retry,"
+ DPRINTF(SMMUv3, "[t] requestor HWTW req needs retry,"
" qlen=%d\n", packetsTableWalkToRetry.size());
packetsTableWalkToRetry.push(action);
}
case ACTION_SEND_REQ_FINAL:
action.pkt->pushSenderState(proc);
- DPRINTF(SMMUv3, "[t] master req addr=%#x size=%#x\n",
+ DPRINTF(SMMUv3, "[t] requestor req addr=%#x size=%#x\n",
action.pkt->getAddr(), action.pkt->getSize());
- if (packetsToRetry.empty() && masterPort.sendTimingReq(action.pkt)) {
- scheduleSlaveRetries();
+ if (packetsToRetry.empty() &&
+ requestPort.sendTimingReq(action.pkt)) {
+ scheduleDeviceRetries();
} else {
- DPRINTF(SMMUv3, "[t] master req needs retry, qlen=%d\n",
+ DPRINTF(SMMUv3, "[t] requestor req needs retry, qlen=%d\n",
packetsToRetry.size());
packetsToRetry.push(action);
}
// @todo: We need to pay for this and not just zero it out
action.pkt->headerDelay = action.pkt->payloadDelay = 0;
- DPRINTF(SMMUv3, "[t] slave resp addr=%#x size=%#x\n",
+ DPRINTF(SMMUv3, "[t] responder resp addr=%#x size=%#x\n",
action.pkt->getAddr(),
action.pkt->getSize());
// @todo: We need to pay for this and not just zero it out
action.pkt->headerDelay = action.pkt->payloadDelay = 0;
- DPRINTF(SMMUv3, "[t] ATS slave resp addr=%#x size=%#x\n",
+ DPRINTF(SMMUv3, "[t] ATS responder resp addr=%#x size=%#x\n",
action.pkt->getAddr(), action.pkt->getSize());
assert(action.ifc);
DPRINTF(SMMUv3, "CMD_CFGI_STE sid=%#x\n", cmd.dw0.sid);
configCache.invalidateSID(cmd.dw0.sid);
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateSID(cmd.dw0.sid);
- slave_interface->mainTLB->invalidateSID(cmd.dw0.sid);
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateSID(cmd.dw0.sid);
+ dev_interface->mainTLB->invalidateSID(cmd.dw0.sid);
}
break;
}
DPRINTF(SMMUv3, "CMD_CFGI_ALL\n");
configCache.invalidateAll();
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateAll();
- slave_interface->mainTLB->invalidateAll();
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateAll();
+ dev_interface->mainTLB->invalidateAll();
}
} else {
DPRINTF(SMMUv3, "CMD_CFGI_STE_RANGE\n");
for (auto sid = start_sid; sid <= end_sid; sid++) {
configCache.invalidateSID(sid);
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateSID(sid);
- slave_interface->mainTLB->invalidateSID(sid);
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateSID(sid);
+ dev_interface->mainTLB->invalidateSID(sid);
}
}
}
cmd.dw0.sid, cmd.dw0.ssid);
configCache.invalidateSSID(cmd.dw0.sid, cmd.dw0.ssid);
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateSSID(
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateSSID(
cmd.dw0.sid, cmd.dw0.ssid);
- slave_interface->mainTLB->invalidateSSID(
+ dev_interface->mainTLB->invalidateSSID(
cmd.dw0.sid, cmd.dw0.ssid);
}
break;
DPRINTF(SMMUv3, "CMD_CFGI_CD_ALL sid=%#x\n", cmd.dw0.sid);
configCache.invalidateSID(cmd.dw0.sid);
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateSID(cmd.dw0.sid);
- slave_interface->mainTLB->invalidateSID(cmd.dw0.sid);
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateSID(cmd.dw0.sid);
+ dev_interface->mainTLB->invalidateSID(cmd.dw0.sid);
}
break;
}
case CMD_TLBI_NH_ALL: {
DPRINTF(SMMUv3, "CMD_TLBI_NH_ALL vmid=%#x\n", cmd.dw0.vmid);
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
- slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
+ dev_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
}
tlb.invalidateVMID(cmd.dw0.vmid);
walkCache.invalidateVMID(cmd.dw0.vmid);
case CMD_TLBI_NH_ASID: {
DPRINTF(SMMUv3, "CMD_TLBI_NH_ASID asid=%#x vmid=%#x\n",
cmd.dw0.asid, cmd.dw0.vmid);
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateASID(
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateASID(
cmd.dw0.asid, cmd.dw0.vmid);
- slave_interface->mainTLB->invalidateASID(
+ dev_interface->mainTLB->invalidateASID(
cmd.dw0.asid, cmd.dw0.vmid);
}
tlb.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid);
const Addr addr = cmd.addr();
DPRINTF(SMMUv3, "CMD_TLBI_NH_VAA va=%#08x vmid=%#x\n",
addr, cmd.dw0.vmid);
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateVAA(
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateVAA(
addr, cmd.dw0.vmid);
- slave_interface->mainTLB->invalidateVAA(
+ dev_interface->mainTLB->invalidateVAA(
addr, cmd.dw0.vmid);
}
tlb.invalidateVAA(addr, cmd.dw0.vmid);
const Addr addr = cmd.addr();
DPRINTF(SMMUv3, "CMD_TLBI_NH_VA va=%#08x asid=%#x vmid=%#x\n",
addr, cmd.dw0.asid, cmd.dw0.vmid);
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateVA(
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateVA(
addr, cmd.dw0.asid, cmd.dw0.vmid);
- slave_interface->mainTLB->invalidateVA(
+ dev_interface->mainTLB->invalidateVA(
addr, cmd.dw0.asid, cmd.dw0.vmid);
}
tlb.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid);
case CMD_TLBI_S12_VMALL: {
DPRINTF(SMMUv3, "CMD_TLBI_S12_VMALL vmid=%#x\n", cmd.dw0.vmid);
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
- slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateVMID(cmd.dw0.vmid);
+ dev_interface->mainTLB->invalidateVMID(cmd.dw0.vmid);
}
tlb.invalidateVMID(cmd.dw0.vmid);
ipaCache.invalidateVMID(cmd.dw0.vmid);
case CMD_TLBI_NSNH_ALL: {
DPRINTF(SMMUv3, "CMD_TLBI_NSNH_ALL\n");
- for (auto slave_interface : slaveInterfaces) {
- slave_interface->microTLB->invalidateAll();
- slave_interface->mainTLB->invalidateAll();
+ for (auto dev_interface : deviceInterfaces) {
+ dev_interface->microTLB->invalidateAll();
+ dev_interface->mainTLB->invalidateAll();
}
tlb.invalidateAll();
ipaCache.invalidateAll();
SMMUv3::init()
{
// make sure both sides are connected and have the same block size
- if (!masterPort.isConnected())
- fatal("Master port is not connected.\n");
+ if (!requestPort.isConnected())
+ fatal("Request port is not connected.\n");
- // If the second master port is connected for the table walks, enable
+ // If the second request port is connected for the table walks, enable
// the mode to send table walks through this port instead
- if (masterTableWalkPort.isConnected())
+ if (tableWalkPort.isConnected())
tableWalkPortEnable = true;
- // notify the master side of our address ranges
- for (auto ifc : slaveInterfaces) {
+ // notify the request side of our address ranges
+ for (auto ifc : deviceInterfaces) {
ifc->sendRange();
}
using namespace Stats;
- for (size_t i = 0; i < slaveInterfaces.size(); i++) {
- slaveInterfaces[i]->microTLB->regStats(
+ for (size_t i = 0; i < deviceInterfaces.size(); i++) {
+ deviceInterfaces[i]->microTLB->regStats(
csprintf("%s.utlb%d", name(), i));
- slaveInterfaces[i]->mainTLB->regStats(
+ deviceInterfaces[i]->mainTLB->regStats(
csprintf("%s.maintlb%d", name(), i));
}
Port&
SMMUv3::getPort(const std::string &name, PortID id)
{
- if (name == "master") {
- return masterPort;
- } else if (name == "master_walker") {
- return masterTableWalkPort;
+ if (name == "request") {
+ return requestPort;
+ } else if (name == "walker") {
+ return tableWalkPort;
} else if (name == "control") {
return controlPort;
} else {
#include "dev/arm/smmu_v3_caches.hh"
#include "dev/arm/smmu_v3_cmdexec.hh"
#include "dev/arm/smmu_v3_defs.hh"
+#include "dev/arm/smmu_v3_deviceifc.hh"
#include "dev/arm/smmu_v3_events.hh"
#include "dev/arm/smmu_v3_ports.hh"
#include "dev/arm/smmu_v3_proc.hh"
#include "dev/arm/smmu_v3_ptops.hh"
-#include "dev/arm/smmu_v3_slaveifc.hh"
#include "mem/packet.hh"
#include "params/SMMUv3.hh"
#include "sim/clocked_object.hh"
friend class SMMUProcess;
friend class SMMUTranslationProcess;
friend class SMMUCommandExecProcess;
- friend class SMMUv3SlaveInterface;
+ friend class SMMUv3DeviceInterface;
const System &system;
- const MasterID masterId;
+ const RequestorID requestorId;
- SMMUMasterPort masterPort;
- SMMUMasterTableWalkPort masterTableWalkPort;
+ SMMURequestPort requestPort;
+ SMMUTableWalkPort tableWalkPort;
SMMUControlPort controlPort;
ARMArchTLB tlb;
const bool walkCacheNonfinalEnable;
const unsigned walkCacheS1Levels;
const unsigned walkCacheS2Levels;
- const unsigned masterPortWidth; // in bytes
+ const unsigned requestPortWidth; // in bytes
SMMUSemaphore tlbSem;
SMMUSemaphore ifcSmmuSem;
SMMUSemaphore configSem;
SMMUSemaphore ipaSem;
SMMUSemaphore walkSem;
- SMMUSemaphore masterPortSem;
+ SMMUSemaphore requestPortSem;
SMMUSemaphore transSem; // max N transactions in SMMU
SMMUSemaphore ptwSem; // max N concurrent PTWs
Stats::Distribution translationTimeDist;
Stats::Distribution ptwTimeDist;
- std::vector<SMMUv3SlaveInterface *> slaveInterfaces;
+ std::vector<SMMUv3DeviceInterface *> deviceInterfaces;
SMMUCommandExecProcess commandExecutor;
std::queue<SMMUAction> packetsTableWalkToRetry;
- void scheduleSlaveRetries();
+ void scheduleDeviceRetries();
SMMUAction runProcess(SMMUProcess *proc, PacketPtr pkt);
SMMUAction runProcessAtomic(SMMUProcess *proc, PacketPtr pkt);
virtual void init() override;
virtual void regStats() override;
- Tick slaveRecvAtomic(PacketPtr pkt, PortID id);
- bool slaveRecvTimingReq(PacketPtr pkt, PortID id);
- bool masterRecvTimingResp(PacketPtr pkt);
- void masterRecvReqRetry();
+ Tick recvAtomic(PacketPtr pkt, PortID id);
+ bool recvTimingReq(PacketPtr pkt, PortID id);
+ bool recvTimingResp(PacketPtr pkt);
+ void recvReqRetry();
- bool masterTableWalkRecvTimingResp(PacketPtr pkt);
- void masterTableWalkRecvReqRetry();
+ bool tableWalkRecvTimingResp(PacketPtr pkt);
+ void tableWalkRecvReqRetry();
Tick readControl(PacketPtr pkt);
Tick writeControl(PacketPtr pkt);
--- /dev/null
+/*
+ * Copyright (c) 2019 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "dev/arm/smmu_v3_deviceifc.hh"
+
+#include "base/trace.hh"
+#include "debug/SMMUv3.hh"
+#include "dev/arm/smmu_v3.hh"
+#include "dev/arm/smmu_v3_transl.hh"
+
+SMMUv3DeviceInterface::SMMUv3DeviceInterface(
+ const SMMUv3DeviceInterfaceParams *p) :
+ ClockedObject(p),
+ smmu(nullptr),
+ microTLB(new SMMUTLB(p->utlb_entries,
+ p->utlb_assoc,
+ p->utlb_policy)),
+ mainTLB(new SMMUTLB(p->tlb_entries,
+ p->tlb_assoc,
+ p->tlb_policy)),
+ microTLBEnable(p->utlb_enable),
+ mainTLBEnable(p->tlb_enable),
+ devicePortSem(1),
+ microTLBSem(p->utlb_slots),
+ mainTLBSem(p->tlb_slots),
+ microTLBLat(p->utlb_lat),
+ mainTLBLat(p->tlb_lat),
+ devicePort(new SMMUDevicePort(csprintf("%s.device_port",
+ name()), *this)),
+ atsDevicePort(name() + ".atsDevicePort", *this),
+ atsMemPort(name() + ".atsMemPort", *this),
+ portWidth(p->port_width),
+ wrBufSlotsRemaining(p->wrbuf_slots),
+ xlateSlotsRemaining(p->xlate_slots),
+ pendingMemAccesses(0),
+ prefetchEnable(p->prefetch_enable),
+ prefetchReserveLastWay(
+ p->prefetch_reserve_last_way),
+ deviceNeedsRetry(false),
+ atsDeviceNeedsRetry(false),
+ sendDeviceRetryEvent(*this),
+ atsSendDeviceRetryEvent(this)
+{}
+
+void
+SMMUv3DeviceInterface::sendRange()
+{
+ if (devicePort->isConnected()) {
+ inform("Device port is connected to %s\n", devicePort->getPeer());
+
+ devicePort->sendRangeChange();
+ } else {
+ fatal("Device port is not connected.\n");
+ }
+}
+
+Port&
+SMMUv3DeviceInterface::getPort(const std::string &name, PortID id)
+{
+ if (name == "ats_mem_side_port") {
+ return atsMemPort;
+ } else if (name == "device_port") {
+ return *devicePort;
+ } else if (name == "ats_dev_side_port") {
+ return atsDevicePort;
+ } else {
+ return ClockedObject::getPort(name, id);
+ }
+}
+
+void
+SMMUv3DeviceInterface::schedTimingResp(PacketPtr pkt)
+{
+ devicePort->schedTimingResp(pkt, nextCycle());
+}
+
+void
+SMMUv3DeviceInterface::schedAtsTimingResp(PacketPtr pkt)
+{
+ atsDevicePort.schedTimingResp(pkt, nextCycle());
+
+ if (atsDeviceNeedsRetry) {
+ atsDeviceNeedsRetry = false;
+ schedule(atsSendDeviceRetryEvent, nextCycle());
+ }
+}
+
+Tick
+SMMUv3DeviceInterface::recvAtomic(PacketPtr pkt)
+{
+ DPRINTF(SMMUv3, "[a] req from %s addr=%#x size=%#x\n",
+ devicePort->getPeer(), pkt->getAddr(), pkt->getSize());
+
+ std::string proc_name = csprintf("%s.port", name());
+ SMMUTranslationProcess proc(proc_name, *smmu, *this);
+ proc.beginTransaction(SMMUTranslRequest::fromPacket(pkt));
+
+ SMMUAction a = smmu->runProcessAtomic(&proc, pkt);
+ assert(a.type == ACTION_SEND_RESP);
+
+ return a.delay;
+}
+
+bool
+SMMUv3DeviceInterface::recvTimingReq(PacketPtr pkt)
+{
+ DPRINTF(SMMUv3, "[t] req from %s addr=%#x size=%#x\n",
+ devicePort->getPeer(), pkt->getAddr(), pkt->getSize());
+
+ // @todo: We need to pay for this and not just zero it out
+ pkt->headerDelay = pkt->payloadDelay = 0;
+
+ unsigned nbeats =
+ (pkt->getSize() + (portWidth-1)) / portWidth;
+
+ if (xlateSlotsRemaining==0 ||
+ (pkt->isWrite() && wrBufSlotsRemaining < nbeats))
+ {
+ deviceNeedsRetry = true;
+ return false;
+ }
+
+ if (pkt->isWrite())
+ wrBufSlotsRemaining -= nbeats;
+
+ std::string proc_name = csprintf("%s.port", name());
+ SMMUTranslationProcess *proc =
+ new SMMUTranslationProcess(proc_name, *smmu, *this);
+ proc->beginTransaction(SMMUTranslRequest::fromPacket(pkt));
+
+ smmu->runProcessTiming(proc, pkt);
+
+ return true;
+}
+
+Tick
+SMMUv3DeviceInterface::atsRecvAtomic(PacketPtr pkt)
+{
+ DPRINTF(SMMUv3, "[a] ATS responder req addr=%#x size=%#x\n",
+ pkt->getAddr(), pkt->getSize());
+
+ std::string proc_name = csprintf("%s.atsport", name());
+ const bool ats_request = true;
+ SMMUTranslationProcess proc(
+ proc_name, *smmu, *this);
+ proc.beginTransaction(SMMUTranslRequest::fromPacket(pkt, ats_request));
+
+ SMMUAction a = smmu->runProcessAtomic(&proc, pkt);
+ assert(a.type == ACTION_SEND_RESP_ATS);
+
+ return a.delay;
+}
+
+bool
+SMMUv3DeviceInterface::atsRecvTimingReq(PacketPtr pkt)
+{
+ DPRINTF(SMMUv3, "[t] ATS responder req addr=%#x size=%#x\n",
+ pkt->getAddr(), pkt->getSize());
+
+ // @todo: We need to pay for this and not just zero it out
+ pkt->headerDelay = pkt->payloadDelay = 0;
+
+ if (xlateSlotsRemaining == 0) {
+ deviceNeedsRetry = true;
+ return false;
+ }
+
+ std::string proc_name = csprintf("%s.atsport", name());
+ const bool ats_request = true;
+ SMMUTranslationProcess *proc =
+ new SMMUTranslationProcess(proc_name, *smmu, *this);
+ proc->beginTransaction(SMMUTranslRequest::fromPacket(pkt, ats_request));
+
+ smmu->runProcessTiming(proc, pkt);
+
+ return true;
+}
+
+bool
+SMMUv3DeviceInterface::atsRecvTimingResp(PacketPtr pkt)
+{
+ DPRINTF(SMMUv3, "[t] ATS requestor resp addr=%#x size=%#x\n",
+ pkt->getAddr(), pkt->getSize());
+
+ // @todo: We need to pay for this and not just zero it out
+ pkt->headerDelay = pkt->payloadDelay = 0;
+
+ SMMUProcess *proc =
+ safe_cast<SMMUProcess *>(pkt->popSenderState());
+
+ smmu->runProcessTiming(proc, pkt);
+
+ return true;
+}
+
+void
+SMMUv3DeviceInterface::sendDeviceRetry()
+{
+ devicePort->sendRetryReq();
+}
+
+void
+SMMUv3DeviceInterface::atsSendDeviceRetry()
+{
+ DPRINTF(SMMUv3, "ATS retry\n");
+ atsDevicePort.sendRetryReq();
+}
+
+void
+SMMUv3DeviceInterface::scheduleDeviceRetry()
+{
+ if (deviceNeedsRetry && !sendDeviceRetryEvent.scheduled()) {
+ DPRINTF(SMMUv3, "sched responder retry\n");
+ deviceNeedsRetry = false;
+ schedule(sendDeviceRetryEvent, nextCycle());
+ }
+}
+
+DrainState
+SMMUv3DeviceInterface::drain()
+{
+ // Wait until all SMMU translations are completed
+ if (xlateSlotsRemaining < params()->xlate_slots) {
+ return DrainState::Draining;
+ }
+ return DrainState::Drained;
+}
+
+SMMUv3DeviceInterface*
+SMMUv3DeviceInterfaceParams::create()
+{
+ return new SMMUv3DeviceInterface(this);
+}
--- /dev/null
+/*
+ * Copyright (c) 2013, 2018-2019 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __DEV_ARM_SMMU_V3_DEVICEIFC_HH__
+#define __DEV_ARM_SMMU_V3_DEVICEIFC_HH__
+
+#include <list>
+
+#include "dev/arm/smmu_v3_caches.hh"
+#include "dev/arm/smmu_v3_defs.hh"
+#include "dev/arm/smmu_v3_events.hh"
+#include "dev/arm/smmu_v3_ports.hh"
+#include "dev/arm/smmu_v3_proc.hh"
+#include "params/SMMUv3DeviceInterface.hh"
+#include "sim/clocked_object.hh"
+
+class SMMUTranslationProcess;
+class SMMUv3;
+class SMMUDevicePort;
+
+class SMMUv3DeviceInterface : public ClockedObject
+{
+ protected:
+ friend class SMMUTranslationProcess;
+
+ public:
+ SMMUv3 *smmu;
+ SMMUTLB* microTLB;
+ SMMUTLB* mainTLB;
+
+ const bool microTLBEnable;
+ const bool mainTLBEnable;
+
+ SMMUSemaphore devicePortSem;
+ SMMUSemaphore microTLBSem;
+ SMMUSemaphore mainTLBSem;
+
+ const Cycles microTLBLat;
+ const Cycles mainTLBLat;
+
+ SMMUDevicePort *devicePort;
+ SMMUATSDevicePort atsDevicePort;
+ SMMUATSMemoryPort atsMemPort;
+
+ // in bytes
+ const unsigned portWidth;
+
+ unsigned wrBufSlotsRemaining;
+ unsigned xlateSlotsRemaining;
+ unsigned pendingMemAccesses;
+
+ const bool prefetchEnable;
+ const bool prefetchReserveLastWay;
+
+ std::list<SMMUTranslationProcess *> duplicateReqs;
+ SMMUSignal duplicateReqRemoved;
+
+ std::list<SMMUTranslationProcess *> dependentReads[SMMU_MAX_TRANS_ID];
+ std::list<SMMUTranslationProcess *> dependentWrites[SMMU_MAX_TRANS_ID];
+ SMMUSignal dependentReqRemoved;
+
+ // Receiving translation requests from the requestor device
+ Tick recvAtomic(PacketPtr pkt);
+ bool recvTimingReq(PacketPtr pkt);
+ void schedTimingResp(PacketPtr pkt);
+
+ Tick atsRecvAtomic(PacketPtr pkt);
+ bool atsRecvTimingReq(PacketPtr pkt);
+ bool atsRecvTimingResp(PacketPtr pkt);
+ void schedAtsTimingResp(PacketPtr pkt);
+
+ void scheduleDeviceRetry();
+ void sendDeviceRetry();
+ void atsSendDeviceRetry();
+
+ bool deviceNeedsRetry;
+ bool atsDeviceNeedsRetry;
+
+ SMMUDeviceRetryEvent sendDeviceRetryEvent;
+ EventWrapper<
+ SMMUv3DeviceInterface,
+ &SMMUv3DeviceInterface::atsSendDeviceRetry> atsSendDeviceRetryEvent;
+
+ Port& getPort(const std::string &name, PortID id) override;
+
+ public:
+ SMMUv3DeviceInterface(const SMMUv3DeviceInterfaceParams *p);
+
+ ~SMMUv3DeviceInterface()
+ {
+ delete microTLB;
+ delete mainTLB;
+ }
+
+ const SMMUv3DeviceInterfaceParams *
+ params() const
+ {
+ return static_cast<const SMMUv3DeviceInterfaceParams *>(_params);
+ }
+
+ DrainState drain() override;
+
+ void setSMMU(SMMUv3 *_smmu) { smmu = _smmu; }
+ void sendRange();
+};
+
+#endif /* __DEV_ARM_SMMU_V3_DEVICEIFC_HH__ */
#include "dev/arm/smmu_v3_events.hh"
-#include "dev/arm/smmu_v3_slaveifc.hh"
+#include "dev/arm/smmu_v3_deviceifc.hh"
void
SMMUDeviceRetryEvent::process()
#include <base/types.hh>
#include <sim/eventq.hh>
-class SMMUv3SlaveInterface;
+class SMMUv3DeviceInterface;
class SMMUDeviceRetryEvent : public Event
{
private:
- SMMUv3SlaveInterface &smmuIfc;
+ SMMUv3DeviceInterface &smmuIfc;
public:
- SMMUDeviceRetryEvent(SMMUv3SlaveInterface &ifc)
+ SMMUDeviceRetryEvent(SMMUv3DeviceInterface &ifc)
: smmuIfc(ifc)
{}
const std::string name() const;
const char *description() const
- { return "SlaveRetryEvent"; }
+ { return "DeviceRetryEvent"; }
};
#endif /* __DEV_ARM_SMMU_V3_EVENTS_HH__ */
#include "base/logging.hh"
#include "dev/arm/smmu_v3.hh"
-#include "dev/arm/smmu_v3_slaveifc.hh"
+#include "dev/arm/smmu_v3_deviceifc.hh"
-SMMUMasterPort::SMMUMasterPort(const std::string &_name, SMMUv3 &_smmu) :
+SMMURequestPort::SMMURequestPort(const std::string &_name, SMMUv3 &_smmu) :
RequestPort(_name, &_smmu),
smmu(_smmu)
{}
bool
-SMMUMasterPort::recvTimingResp(PacketPtr pkt)
+SMMURequestPort::recvTimingResp(PacketPtr pkt)
{
- return smmu.masterRecvTimingResp(pkt);
+ return smmu.recvTimingResp(pkt);
}
void
-SMMUMasterPort::recvReqRetry()
+SMMURequestPort::recvReqRetry()
{
- return smmu.masterRecvReqRetry();
+ return smmu.recvReqRetry();
}
-SMMUMasterTableWalkPort::SMMUMasterTableWalkPort(const std::string &_name,
+SMMUTableWalkPort::SMMUTableWalkPort(const std::string &_name,
SMMUv3 &_smmu) :
RequestPort(_name, &_smmu),
smmu(_smmu)
{}
bool
-SMMUMasterTableWalkPort::recvTimingResp(PacketPtr pkt)
+SMMUTableWalkPort::recvTimingResp(PacketPtr pkt)
{
- return smmu.masterTableWalkRecvTimingResp(pkt);
+ return smmu.tableWalkRecvTimingResp(pkt);
}
void
-SMMUMasterTableWalkPort::recvReqRetry()
+SMMUTableWalkPort::recvReqRetry()
{
- return smmu.masterTableWalkRecvReqRetry();
+ return smmu.tableWalkRecvReqRetry();
}
-SMMUSlavePort::SMMUSlavePort(const std::string &_name,
- SMMUv3SlaveInterface &_ifc,
+SMMUDevicePort::SMMUDevicePort(const std::string &_name,
+ SMMUv3DeviceInterface &_ifc,
PortID _id)
:
- QueuedSlavePort(_name, &_ifc, respQueue, _id),
+ QueuedResponsePort(_name, &_ifc, respQueue, _id),
ifc(_ifc),
respQueue(_ifc, *this)
{}
void
-SMMUSlavePort::recvFunctional(PacketPtr pkt)
+SMMUDevicePort::recvFunctional(PacketPtr pkt)
{
if (!respQueue.trySatisfyFunctional(pkt))
recvAtomic(pkt);
}
Tick
-SMMUSlavePort::recvAtomic(PacketPtr pkt)
+SMMUDevicePort::recvAtomic(PacketPtr pkt)
{
return ifc.recvAtomic(pkt);
}
bool
-SMMUSlavePort::recvTimingReq(PacketPtr pkt)
+SMMUDevicePort::recvTimingReq(PacketPtr pkt)
{
return ifc.recvTimingReq(pkt);
}
return list;
}
-SMMUATSMasterPort::SMMUATSMasterPort(const std::string &_name,
- SMMUv3SlaveInterface &_ifc) :
- QueuedMasterPort(_name, &_ifc, reqQueue, snoopRespQueue),
+SMMUATSMemoryPort::SMMUATSMemoryPort(const std::string &_name,
+ SMMUv3DeviceInterface &_ifc) :
+ QueuedRequestPort(_name, &_ifc, reqQueue, snoopRespQueue),
ifc(_ifc),
reqQueue(_ifc, *this),
snoopRespQueue(_ifc, *this)
{}
bool
-SMMUATSMasterPort::recvTimingResp(PacketPtr pkt)
+SMMUATSMemoryPort::recvTimingResp(PacketPtr pkt)
{
- return ifc.atsMasterRecvTimingResp(pkt);
+ return ifc.atsRecvTimingResp(pkt);
}
-SMMUATSSlavePort::SMMUATSSlavePort(const std::string &_name,
- SMMUv3SlaveInterface &_ifc) :
- QueuedSlavePort(_name, &_ifc, respQueue),
+SMMUATSDevicePort::SMMUATSDevicePort(const std::string &_name,
+ SMMUv3DeviceInterface &_ifc) :
+ QueuedResponsePort(_name, &_ifc, respQueue),
ifc(_ifc),
respQueue(_ifc, *this)
{}
void
-SMMUATSSlavePort::recvFunctional(PacketPtr pkt)
+SMMUATSDevicePort::recvFunctional(PacketPtr pkt)
{
panic("Functional access on ATS port!");
}
Tick
-SMMUATSSlavePort::recvAtomic(PacketPtr pkt)
+SMMUATSDevicePort::recvAtomic(PacketPtr pkt)
{
- return ifc.atsSlaveRecvAtomic(pkt);
+ return ifc.atsRecvAtomic(pkt);
}
bool
-SMMUATSSlavePort::recvTimingReq(PacketPtr pkt)
+SMMUATSDevicePort::recvTimingReq(PacketPtr pkt)
{
- return ifc.atsSlaveRecvTimingReq(pkt);
+ return ifc.atsRecvTimingReq(pkt);
}
#include "mem/tport.hh"
class SMMUv3;
-class SMMUv3SlaveInterface;
+class SMMUv3DeviceInterface;
-class SMMUMasterPort : public RequestPort
+class SMMURequestPort : public RequestPort
{
protected:
SMMUv3 &smmu;
virtual void recvReqRetry();
public:
- SMMUMasterPort(const std::string &_name, SMMUv3 &_smmu);
- virtual ~SMMUMasterPort() {}
+ SMMURequestPort(const std::string &_name, SMMUv3 &_smmu);
+ virtual ~SMMURequestPort() {}
};
-// Separate master port to send MMU initiated requests on
-class SMMUMasterTableWalkPort : public RequestPort
+// Separate request port to send MMU initiated requests on
+class SMMUTableWalkPort : public RequestPort
{
protected:
SMMUv3 &smmu;
virtual void recvReqRetry();
public:
- SMMUMasterTableWalkPort(const std::string &_name, SMMUv3 &_smmu);
- virtual ~SMMUMasterTableWalkPort() {}
+ SMMUTableWalkPort(const std::string &_name, SMMUv3 &_smmu);
+ virtual ~SMMUTableWalkPort() {}
};
-class SMMUSlavePort : public QueuedSlavePort
+class SMMUDevicePort : public QueuedResponsePort
{
protected:
- SMMUv3SlaveInterface &ifc;
+ SMMUv3DeviceInterface &ifc;
RespPacketQueue respQueue;
virtual void recvFunctional(PacketPtr pkt);
virtual bool recvTimingReq(PacketPtr pkt);
public:
- SMMUSlavePort(const std::string &_name,
- SMMUv3SlaveInterface &_ifc,
+ SMMUDevicePort(const std::string &_name,
+ SMMUv3DeviceInterface &_ifc,
PortID _id = InvalidPortID);
- virtual ~SMMUSlavePort() {}
+ virtual ~SMMUDevicePort() {}
virtual AddrRangeList getAddrRanges() const
{ return AddrRangeList { AddrRange(0, UINT64_MAX) }; }
virtual ~SMMUControlPort() {}
};
-class SMMUATSMasterPort : public QueuedMasterPort
+class SMMUATSMemoryPort : public QueuedRequestPort
{
protected:
- SMMUv3SlaveInterface &ifc;
+ SMMUv3DeviceInterface &ifc;
ReqPacketQueue reqQueue;
SnoopRespPacketQueue snoopRespQueue;
virtual bool recvTimingResp(PacketPtr pkt);
public:
- SMMUATSMasterPort(const std::string &_name, SMMUv3SlaveInterface &_ifc);
- virtual ~SMMUATSMasterPort() {}
+ SMMUATSMemoryPort(const std::string &_name, SMMUv3DeviceInterface &_ifc);
+ virtual ~SMMUATSMemoryPort() {}
};
-class SMMUATSSlavePort : public QueuedSlavePort
+class SMMUATSDevicePort : public QueuedResponsePort
{
protected:
- SMMUv3SlaveInterface &ifc;
+ SMMUv3DeviceInterface &ifc;
RespPacketQueue respQueue;
virtual void recvFunctional(PacketPtr pkt);
{ return AddrRangeList(); }
public:
- SMMUATSSlavePort(const std::string &_name, SMMUv3SlaveInterface &_ifc);
- virtual ~SMMUATSSlavePort() {}
+ SMMUATSDevicePort(const std::string &_name, SMMUv3DeviceInterface &_ifc);
+ virtual ~SMMUATSDevicePort() {}
};
#endif /* __DEV_ARM_SMMU_V3_PORTS_HH__ */
void
SMMUProcess::doRead(Yield &yield, Addr addr, void *ptr, size_t size)
{
- doSemaphoreDown(yield, smmu.masterPortSem);
+ doSemaphoreDown(yield, smmu.requestPortSem);
doDelay(yield, Cycles(1)); // request - assume 1 cycle
- doSemaphoreUp(smmu.masterPortSem);
+ doSemaphoreUp(smmu.requestPortSem);
SMMUAction a;
a.type = ACTION_SEND_REQ;
RequestPtr req = std::make_shared<Request>(
- addr, size, 0, smmu.masterId);
+ addr, size, 0, smmu.requestorId);
req->taskId(ContextSwitchTaskId::DMA);
void
SMMUProcess::doWrite(Yield &yield, Addr addr, const void *ptr, size_t size)
{
- unsigned nbeats = (size + (smmu.masterPortWidth-1)) / smmu.masterPortWidth;
+ unsigned nbeats = (size + (smmu.requestPortWidth-1))
+ / smmu.requestPortWidth;
- doSemaphoreDown(yield, smmu.masterPortSem);
+ doSemaphoreDown(yield, smmu.requestPortSem);
doDelay(yield, Cycles(nbeats));
- doSemaphoreUp(smmu.masterPortSem);
+ doSemaphoreUp(smmu.requestPortSem);
SMMUAction a;
a.type = ACTION_SEND_REQ;
RequestPtr req = std::make_shared<Request>(
- addr, size, 0, smmu.masterId);
+ addr, size, 0, smmu.requestorId);
req->taskId(ContextSwitchTaskId::DMA);
#include "base/types.hh"
#include "mem/packet.hh"
-class SMMUv3SlaveInterface;
+class SMMUv3DeviceInterface;
/*
* The meaning of these becomes apparent when you
{
SMMUActionType type;
PacketPtr pkt;
- SMMUv3SlaveInterface *ifc;
+ SMMUv3DeviceInterface *ifc;
Tick delay;
};
+++ /dev/null
-/*
- * Copyright (c) 2019 ARM Limited
- * All rights reserved
- *
- * The license below extends only to copyright in the software and shall
- * not be construed as granting a license to any other intellectual
- * property including but not limited to intellectual property relating
- * to a hardware implementation of the functionality of the software
- * licensed hereunder. You may use the software subject to the license
- * terms below provided that you ensure that this notice is replicated
- * unmodified and in its entirety in all distributions of the software,
- * modified or unmodified, in source code or in binary form.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "dev/arm/smmu_v3_slaveifc.hh"
-
-#include "base/trace.hh"
-#include "debug/SMMUv3.hh"
-#include "dev/arm/smmu_v3.hh"
-#include "dev/arm/smmu_v3_transl.hh"
-
-SMMUv3SlaveInterface::SMMUv3SlaveInterface(
- const SMMUv3SlaveInterfaceParams *p) :
- ClockedObject(p),
- smmu(nullptr),
- microTLB(new SMMUTLB(p->utlb_entries,
- p->utlb_assoc,
- p->utlb_policy)),
- mainTLB(new SMMUTLB(p->tlb_entries,
- p->tlb_assoc,
- p->tlb_policy)),
- microTLBEnable(p->utlb_enable),
- mainTLBEnable(p->tlb_enable),
- slavePortSem(1),
- microTLBSem(p->utlb_slots),
- mainTLBSem(p->tlb_slots),
- microTLBLat(p->utlb_lat),
- mainTLBLat(p->tlb_lat),
- slavePort(new SMMUSlavePort(csprintf("%s.slave", name()), *this)),
- atsSlavePort(name() + ".atsSlave", *this),
- atsMasterPort(name() + ".atsMaster", *this),
- portWidth(p->port_width),
- wrBufSlotsRemaining(p->wrbuf_slots),
- xlateSlotsRemaining(p->xlate_slots),
- pendingMemAccesses(0),
- prefetchEnable(p->prefetch_enable),
- prefetchReserveLastWay(
- p->prefetch_reserve_last_way),
- deviceNeedsRetry(false),
- atsDeviceNeedsRetry(false),
- sendDeviceRetryEvent(*this),
- atsSendDeviceRetryEvent(this)
-{}
-
-void
-SMMUv3SlaveInterface::sendRange()
-{
- if (slavePort->isConnected()) {
- inform("Slave port is connected to %s\n", slavePort->getPeer());
-
- slavePort->sendRangeChange();
- } else {
- fatal("Slave port is not connected.\n");
- }
-}
-
-Port&
-SMMUv3SlaveInterface::getPort(const std::string &name, PortID id)
-{
- if (name == "ats_master") {
- return atsMasterPort;
- } else if (name == "slave") {
- return *slavePort;
- } else if (name == "ats_slave") {
- return atsSlavePort;
- } else {
- return ClockedObject::getPort(name, id);
- }
-}
-
-void
-SMMUv3SlaveInterface::schedTimingResp(PacketPtr pkt)
-{
- slavePort->schedTimingResp(pkt, nextCycle());
-}
-
-void
-SMMUv3SlaveInterface::schedAtsTimingResp(PacketPtr pkt)
-{
- atsSlavePort.schedTimingResp(pkt, nextCycle());
-
- if (atsDeviceNeedsRetry) {
- atsDeviceNeedsRetry = false;
- schedule(atsSendDeviceRetryEvent, nextCycle());
- }
-}
-
-Tick
-SMMUv3SlaveInterface::recvAtomic(PacketPtr pkt)
-{
- DPRINTF(SMMUv3, "[a] req from %s addr=%#x size=%#x\n",
- slavePort->getPeer(), pkt->getAddr(), pkt->getSize());
-
- std::string proc_name = csprintf("%s.port", name());
- SMMUTranslationProcess proc(proc_name, *smmu, *this);
- proc.beginTransaction(SMMUTranslRequest::fromPacket(pkt));
-
- SMMUAction a = smmu->runProcessAtomic(&proc, pkt);
- assert(a.type == ACTION_SEND_RESP);
-
- return a.delay;
-}
-
-bool
-SMMUv3SlaveInterface::recvTimingReq(PacketPtr pkt)
-{
- DPRINTF(SMMUv3, "[t] req from %s addr=%#x size=%#x\n",
- slavePort->getPeer(), pkt->getAddr(), pkt->getSize());
-
- // @todo: We need to pay for this and not just zero it out
- pkt->headerDelay = pkt->payloadDelay = 0;
-
- unsigned nbeats =
- (pkt->getSize() + (portWidth-1)) / portWidth;
-
- if (xlateSlotsRemaining==0 ||
- (pkt->isWrite() && wrBufSlotsRemaining < nbeats))
- {
- deviceNeedsRetry = true;
- return false;
- }
-
- if (pkt->isWrite())
- wrBufSlotsRemaining -= nbeats;
-
- std::string proc_name = csprintf("%s.port", name());
- SMMUTranslationProcess *proc =
- new SMMUTranslationProcess(proc_name, *smmu, *this);
- proc->beginTransaction(SMMUTranslRequest::fromPacket(pkt));
-
- smmu->runProcessTiming(proc, pkt);
-
- return true;
-}
-
-Tick
-SMMUv3SlaveInterface::atsSlaveRecvAtomic(PacketPtr pkt)
-{
- DPRINTF(SMMUv3, "[a] ATS slave req addr=%#x size=%#x\n",
- pkt->getAddr(), pkt->getSize());
-
- std::string proc_name = csprintf("%s.atsport", name());
- const bool ats_request = true;
- SMMUTranslationProcess proc(
- proc_name, *smmu, *this);
- proc.beginTransaction(SMMUTranslRequest::fromPacket(pkt, ats_request));
-
- SMMUAction a = smmu->runProcessAtomic(&proc, pkt);
- assert(a.type == ACTION_SEND_RESP_ATS);
-
- return a.delay;
-}
-
-bool
-SMMUv3SlaveInterface::atsSlaveRecvTimingReq(PacketPtr pkt)
-{
- DPRINTF(SMMUv3, "[t] ATS slave req addr=%#x size=%#x\n",
- pkt->getAddr(), pkt->getSize());
-
- // @todo: We need to pay for this and not just zero it out
- pkt->headerDelay = pkt->payloadDelay = 0;
-
- if (xlateSlotsRemaining == 0) {
- deviceNeedsRetry = true;
- return false;
- }
-
- std::string proc_name = csprintf("%s.atsport", name());
- const bool ats_request = true;
- SMMUTranslationProcess *proc =
- new SMMUTranslationProcess(proc_name, *smmu, *this);
- proc->beginTransaction(SMMUTranslRequest::fromPacket(pkt, ats_request));
-
- smmu->runProcessTiming(proc, pkt);
-
- return true;
-}
-
-bool
-SMMUv3SlaveInterface::atsMasterRecvTimingResp(PacketPtr pkt)
-{
- DPRINTF(SMMUv3, "[t] ATS master resp addr=%#x size=%#x\n",
- pkt->getAddr(), pkt->getSize());
-
- // @todo: We need to pay for this and not just zero it out
- pkt->headerDelay = pkt->payloadDelay = 0;
-
- SMMUProcess *proc =
- safe_cast<SMMUProcess *>(pkt->popSenderState());
-
- smmu->runProcessTiming(proc, pkt);
-
- return true;
-}
-
-void
-SMMUv3SlaveInterface::sendDeviceRetry()
-{
- slavePort->sendRetryReq();
-}
-
-void
-SMMUv3SlaveInterface::atsSendDeviceRetry()
-{
- DPRINTF(SMMUv3, "ATS retry\n");
- atsSlavePort.sendRetryReq();
-}
-
-void
-SMMUv3SlaveInterface::scheduleDeviceRetry()
-{
- if (deviceNeedsRetry && !sendDeviceRetryEvent.scheduled()) {
- DPRINTF(SMMUv3, "sched slave retry\n");
- deviceNeedsRetry = false;
- schedule(sendDeviceRetryEvent, nextCycle());
- }
-}
-
-DrainState
-SMMUv3SlaveInterface::drain()
-{
- // Wait until all SMMU translations are completed
- if (xlateSlotsRemaining < params()->xlate_slots) {
- return DrainState::Draining;
- }
- return DrainState::Drained;
-}
-
-SMMUv3SlaveInterface*
-SMMUv3SlaveInterfaceParams::create()
-{
- return new SMMUv3SlaveInterface(this);
-}
+++ /dev/null
-/*
- * Copyright (c) 2013, 2018-2019 ARM Limited
- * All rights reserved
- *
- * The license below extends only to copyright in the software and shall
- * not be construed as granting a license to any other intellectual
- * property including but not limited to intellectual property relating
- * to a hardware implementation of the functionality of the software
- * licensed hereunder. You may use the software subject to the license
- * terms below provided that you ensure that this notice is replicated
- * unmodified and in its entirety in all distributions of the software,
- * modified or unmodified, in source code or in binary form.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __DEV_ARM_SMMU_V3_SLAVEIFC_HH__
-#define __DEV_ARM_SMMU_V3_SLAVEIFC_HH__
-
-#include <list>
-
-#include "dev/arm/smmu_v3_caches.hh"
-#include "dev/arm/smmu_v3_defs.hh"
-#include "dev/arm/smmu_v3_events.hh"
-#include "dev/arm/smmu_v3_ports.hh"
-#include "dev/arm/smmu_v3_proc.hh"
-#include "params/SMMUv3SlaveInterface.hh"
-#include "sim/clocked_object.hh"
-
-class SMMUTranslationProcess;
-class SMMUv3;
-class SMMUSlavePort;
-
-class SMMUv3SlaveInterface : public ClockedObject
-{
- protected:
- friend class SMMUTranslationProcess;
-
- public:
- SMMUv3 *smmu;
- SMMUTLB* microTLB;
- SMMUTLB* mainTLB;
-
- const bool microTLBEnable;
- const bool mainTLBEnable;
-
- SMMUSemaphore slavePortSem;
- SMMUSemaphore microTLBSem;
- SMMUSemaphore mainTLBSem;
-
- const Cycles microTLBLat;
- const Cycles mainTLBLat;
-
- SMMUSlavePort *slavePort;
- SMMUATSSlavePort atsSlavePort;
- SMMUATSMasterPort atsMasterPort;
-
- // in bytes
- const unsigned portWidth;
-
- unsigned wrBufSlotsRemaining;
- unsigned xlateSlotsRemaining;
- unsigned pendingMemAccesses;
-
- const bool prefetchEnable;
- const bool prefetchReserveLastWay;
-
- std::list<SMMUTranslationProcess *> duplicateReqs;
- SMMUSignal duplicateReqRemoved;
-
- std::list<SMMUTranslationProcess *> dependentReads[SMMU_MAX_TRANS_ID];
- std::list<SMMUTranslationProcess *> dependentWrites[SMMU_MAX_TRANS_ID];
- SMMUSignal dependentReqRemoved;
-
- // Receiving translation requests from the master device
- Tick recvAtomic(PacketPtr pkt);
- bool recvTimingReq(PacketPtr pkt);
- void schedTimingResp(PacketPtr pkt);
-
- Tick atsSlaveRecvAtomic(PacketPtr pkt);
- bool atsSlaveRecvTimingReq(PacketPtr pkt);
- bool atsMasterRecvTimingResp(PacketPtr pkt);
- void schedAtsTimingResp(PacketPtr pkt);
-
- void scheduleDeviceRetry();
- void sendDeviceRetry();
- void atsSendDeviceRetry();
-
- bool deviceNeedsRetry;
- bool atsDeviceNeedsRetry;
-
- SMMUDeviceRetryEvent sendDeviceRetryEvent;
- EventWrapper<
- SMMUv3SlaveInterface,
- &SMMUv3SlaveInterface::atsSendDeviceRetry> atsSendDeviceRetryEvent;
-
- Port& getPort(const std::string &name, PortID id) override;
-
- public:
- SMMUv3SlaveInterface(const SMMUv3SlaveInterfaceParams *p);
-
- ~SMMUv3SlaveInterface()
- {
- delete microTLB;
- delete mainTLB;
- }
-
- const SMMUv3SlaveInterfaceParams *
- params() const
- {
- return static_cast<const SMMUv3SlaveInterfaceParams *>(_params);
- }
-
- DrainState drain() override;
-
- void setSMMU(SMMUv3 *_smmu) { smmu = _smmu; }
- void sendRange();
-};
-
-#endif /* __DEV_ARM_SMMU_V3_SLAVEIFC_HH__ */
}
SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name,
- SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc)
+ SMMUv3 &_smmu, SMMUv3DeviceInterface &_ifc)
:
SMMUProcess(name, _smmu),
ifc(_ifc)
{
- // Decrease number of pending translation slots on the slave interface
+ // Decrease number of pending translation slots on the device interface
assert(ifc.xlateSlotsRemaining > 0);
ifc.xlateSlotsRemaining--;
SMMUTranslationProcess::~SMMUTranslationProcess()
{
- // Increase number of pending translation slots on the slave interface
+ // Increase number of pending translation slots on the device interface
assert(ifc.pendingMemAccesses > 0);
ifc.pendingMemAccesses--;
// If no more SMMU memory accesses are pending,
- // signal SMMU Slave Interface as drained
+ // signal SMMU Device Interface as drained
if (ifc.pendingMemAccesses == 0) {
ifc.signalDrainDone();
}
request.addr, request.size);
- unsigned numSlaveBeats = request.isWrite ?
+ unsigned numResponderBeats = request.isWrite ?
(request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1;
- doSemaphoreDown(yield, ifc.slavePortSem);
- doDelay(yield, Cycles(numSlaveBeats));
- doSemaphoreUp(ifc.slavePortSem);
+ doSemaphoreDown(yield, ifc.devicePortSem);
+ doDelay(yield, Cycles(numResponderBeats));
+ doSemaphoreUp(ifc.devicePortSem);
recvTick = curTick();
bool haveConfig = true;
if (!configCacheLookup(yield, context)) {
- if(findConfig(yield, context, tr)) {
+ if (findConfig(yield, context, tr)) {
configCacheUpdate(yield, context);
} else {
haveConfig = false;
smmuTLBUpdate(yield, tr);
}
- // Simulate pipelined SMMU->SLAVE INTERFACE link
+ // Simulate pipelined SMMU->RESPONSE INTERFACE link
doSemaphoreDown(yield, smmu.smmuIfcSem);
doDelay(yield, Cycles(1)); // serialize transactions
doSemaphoreUp(smmu.smmuIfcSem);
if (!e) {
DPRINTF(SMMUv3,
- "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
+ "RESPONSE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n",
request.addr, request.sid, request.ssid);
return false;
}
DPRINTF(SMMUv3,
- "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
+ "RESPONSE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x "
"paddr=%#x\n", request.addr, e->vaMask, request.sid,
request.ssid, e->pa);
doSemaphoreDown(yield, ifc.mainTLBSem);
DPRINTF(SMMUv3,
- "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
+ "RESPONSE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x "
"ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid);
ifc.mainTLB->store(e, alloc);
{
assert(tr.fault == FAULT_NONE);
- unsigned numMasterBeats = request.isWrite ?
- (request.size + (smmu.masterPortWidth-1))
- / smmu.masterPortWidth :
+ unsigned numRequestorBeats = request.isWrite ?
+ (request.size + (smmu.requestPortWidth-1))
+ / smmu.requestPortWidth :
1;
- doSemaphoreDown(yield, smmu.masterPortSem);
- doDelay(yield, Cycles(numMasterBeats));
- doSemaphoreUp(smmu.masterPortSem);
+ doSemaphoreDown(yield, smmu.requestPortSem);
+ doDelay(yield, Cycles(numRequestorBeats));
+ doSemaphoreUp(smmu.requestPortSem);
smmu.translationTimeDist.sample(curTick() - recvTick);
ifc.wrBufSlotsRemaining +=
(request.size + (ifc.portWidth-1)) / ifc.portWidth;
- smmu.scheduleSlaveRetries();
+ smmu.scheduleDeviceRetries();
SMMUAction a;
#define __DEV_ARM_SMMU_V3_TRANSL_HH__
#include "base/compiler.hh"
+#include "dev/arm/smmu_v3_deviceifc.hh"
#include "dev/arm/smmu_v3_proc.hh"
#include "dev/arm/smmu_v3_ptops.hh"
-#include "dev/arm/smmu_v3_slaveifc.hh"
#include "mem/packet.hh"
struct SMMUTranslRequest
bool writable;
};
- SMMUv3SlaveInterface &ifc;
+ SMMUv3DeviceInterface &ifc;
SMMUTranslRequest request;
TranslContext context;
public:
SMMUTranslationProcess(const std::string &name, SMMUv3 &_smmu,
- SMMUv3SlaveInterface &_ifc);
+ SMMUv3DeviceInterface &_ifc);
virtual ~SMMUTranslationProcess();
DPRINTF(VGIC, "VGIC HVCtrl read register %#x\n", daddr);
- /* Munge the address: 0-0xfff is the usual space banked by requester CPU.
+ /* Munge the address: 0-0xfff is the usual space banked by requestor CPU.
* Anything > that is 0x200-sized slices of 'per CPU' regs.
*/
if (daddr & ~0x1ff) {
DPRINTF(VGIC, "VGIC HVCtrl write register %#x <= %#x\n",
daddr, pkt->getLE<uint32_t>());
- /* Munge the address: 0-0xfff is the usual space banked by requester CPU.
+ /* Munge the address: 0-0xfff is the usual space banked by requestor CPU.
* Anything > that is 0x200-sized slices of 'per CPU' regs.
*/
if (daddr & ~0x1ff) {
DmaPort::DmaPort(ClockedObject *dev, System *s,
uint32_t sid, uint32_t ssid)
: RequestPort(dev->name() + ".dma", dev),
- device(dev), sys(s), masterId(s->getMasterId(dev)),
+ device(dev), sys(s), requestorId(s->getRequestorId(dev)),
sendEvent([this]{ sendDma(); }, dev->name()),
pendingCount(0), inRetry(false),
defaultSid(sid),
!gen.done(); gen.next()) {
req = std::make_shared<Request>(
- gen.addr(), gen.size(), flag, masterId);
+ gen.addr(), gen.size(), flag, requestorId);
req->setStreamId(sid);
req->setSubStreamId(ssid);
System *const sys;
/** Id for all requests */
- const MasterID masterId;
+ const RequestorID requestorId;
protected:
/** Use a deque as we never do any insertion or removal in the middle */
# earlier, since the bus object itself is typically defined at the
# System level.
def attachIO(self, bus):
- self.cchip.pio = bus.master
- self.io.pio = bus.master
- self.uart.pio = bus.master
+ self.cchip.pio = bus.mem_side_ports
+ self.io.pio = bus.mem_side_ports
+ self.uart.pio = bus.mem_side_ports
DistIface::SyncEvent *DistIface::syncEvent = nullptr;
unsigned DistIface::distIfaceNum = 0;
unsigned DistIface::recvThreadsNum = 0;
-DistIface *DistIface::master = nullptr;
+DistIface *DistIface::primary = nullptr;
bool DistIface::isSwitch = false;
void
needExit = ReqType::pending;
if (needStopSync != ReqType::none)
needStopSync = ReqType::pending;
- DistIface::master->sendCmd(header);
+ DistIface::primary->sendCmd(header);
// now wait until all receiver threads complete the synchronisation
auto lf = [this]{ return waitNum == 0; };
cv.wait(sync_lock, lf);
} else {
header.needStopSync = ReqType::none;
}
- DistIface::master->sendCmd(header);
+ DistIface::primary->sendCmd(header);
return true;
}
start();
} else {
// Wake up thread contexts on non-switch nodes.
- for (auto *tc: master->sys->threads) {
+ for (auto *tc: primary->sys->threads) {
if (tc->status() == ThreadContext::Suspended)
tc->activate();
else
"send_tick:%llu send_delay:%llu link_delay:%llu recv_tick:%llu\n",
send_tick, send_delay, linkDelay, recv_tick);
// Every packet must be sent and arrive in the same quantum
- assert(send_tick > master->syncEvent->when() -
- master->syncEvent->repeat);
+ assert(send_tick > primary->syncEvent->when() -
+ primary->syncEvent->repeat);
// No packet may be scheduled for receive in the arrival quantum
- assert(send_tick + send_delay + linkDelay > master->syncEvent->when());
+ assert(send_tick + send_delay + linkDelay > primary->syncEvent->when());
// Now we are about to schedule a recvDone event for the new data packet.
// We use the same recvDone object for all incoming data packets. Packet
rank(dist_rank), size(dist_size)
{
DPRINTF(DistEthernet, "DistIface() ctor rank:%d\n",dist_rank);
- isMaster = false;
- if (master == nullptr) {
+ isPrimary = false;
+ if (primary == nullptr) {
assert(sync == nullptr);
assert(syncEvent == nullptr);
isSwitch = is_switch;
else
sync = new SyncNode();
syncEvent = new SyncEvent();
- master = this;
- isMaster = true;
+ primary = this;
+ isPrimary = true;
}
distIfaceId = distIfaceNum;
distIfaceNum++;
assert(sync);
delete sync;
}
- if (this == master)
- master = nullptr;
+ if (this == primary)
+ primary = nullptr;
}
void
{
DPRINTF(DistEthernet,"DistIFace::drain() called\n");
// This can be called multiple times in the same drain cycle.
- if (this == master)
+ if (this == primary)
syncEvent->draining(true);
return DrainState::Drained;
}
void
DistIface::drainResume() {
DPRINTF(DistEthernet,"DistIFace::drainResume() called\n");
- if (this == master)
+ if (this == primary)
syncEvent->draining(false);
recvScheduler.resumeRecvTicks();
}
SERIALIZE_SCALAR(dist_iface_id_orig);
recvScheduler.serializeSection(cp, "recvScheduler");
- if (this == master) {
+ if (this == primary) {
sync->serializeSection(cp, "Sync");
}
}
dist_iface_id_orig);
recvScheduler.unserializeSection(cp, "recvScheduler");
- if (this == master) {
+ if (this == primary) {
sync->unserializeSection(cp, "Sync");
}
}
// Initialize the seed for random generator to avoid the same sequence
// in all gem5 peer processes
- assert(master != nullptr);
- if (this == master)
+ assert(primary != nullptr);
+ if (this == primary)
random_mt.init(5489 * (rank+1) + 257);
}
{
DPRINTF(DistEthernet, "DistIface::startup() started\n");
// Schedule synchronization unless we are not a switch in pseudo_op mode.
- if (this == master && (!syncStartOnPseudoOp || isSwitch))
+ if (this == primary && (!syncStartOnPseudoOp || isSwitch))
syncEvent->start();
DPRINTF(DistEthernet, "DistIface::startup() done\n");
}
bool ret = true;
DPRINTF(DistEthernet, "DistIface::readyToCkpt() called, delay:%lu "
"period:%lu\n", delay, period);
- if (master) {
+ if (primary) {
if (delay == 0) {
inform("m5 checkpoint called with zero delay => triggering collaborative "
"checkpoint\n");
DistIface::toggleSync(ThreadContext *tc)
{
// Unforunate that we have to populate the system pointer member this way.
- master->sys = tc->getSystemPtr();
+ primary->sys = tc->getSystemPtr();
// The invariant for both syncing and "unsyncing" is that all threads will
// stop executing intructions until the desired sync state has been reached
// for all nodes. This is the easiest way to prevent deadlock (in the case
// of "unsyncing") and causality errors (in the case of syncing).
- if (master->syncEvent->scheduled()) {
+ if (primary->syncEvent->scheduled()) {
inform("Request toggling syncronization off\n");
- master->sync->requestStopSync(ReqType::collective);
+ primary->sync->requestStopSync(ReqType::collective);
// At this point, we have no clue when everyone will reach the sync
// stop point. Suspend execution of all local thread contexts.
// Dist-gem5 will reactivate all thread contexts when everyone has
// reached the sync stop point.
#if THE_ISA != NULL_ISA
- for (auto *tc: master->sys->threads) {
+ for (auto *tc: primary->sys->threads) {
if (tc->status() == ThreadContext::Active)
tc->quiesce();
}
#endif
} else {
inform("Request toggling syncronization on\n");
- master->syncEvent->start();
+ primary->syncEvent->start();
// We need to suspend all CPUs until the sync point is reached by all
// nodes to prevent causality errors. We can also schedule CPU
// activation here, since we know exactly when the next sync will
// occur.
#if THE_ISA != NULL_ISA
- for (auto *tc: master->sys->threads) {
+ for (auto *tc: primary->sys->threads) {
if (tc->status() == ThreadContext::Active)
- tc->quiesceTick(master->syncEvent->when() + 1);
+ tc->quiesceTick(primary->syncEvent->when() + 1);
}
#endif
}
bool ret = true;
DPRINTF(DistEthernet, "DistIface::readyToExit() called, delay:%lu\n",
delay);
- if (master) {
+ if (primary) {
// To successfully coordinate an exit, all nodes must be synchronising
- if (!master->syncEvent->scheduled())
- master->syncEvent->start();
+ if (!primary->syncEvent->scheduled())
+ primary->syncEvent->start();
if (delay == 0) {
inform("m5 exit called with zero delay => triggering collaborative "
DistIface::rankParam()
{
uint64_t val;
- if (master) {
- val = master->rank;
+ if (primary) {
+ val = primary->rank;
} else {
warn("Dist-rank parameter is queried in single gem5 simulation.");
val = 0;
DistIface::sizeParam()
{
uint64_t val;
- if (master) {
- val = master->size;
+ if (primary) {
+ val = primary->size;
} else {
warn("Dist-size parameter is queried in single gem5 simulation.");
val = 1;
*/
unsigned distIfaceId;
- bool isMaster;
+ bool isPrimary;
private:
/**
*/
static SyncEvent *syncEvent;
/**
- * The very first DistIface object created becomes the master. We need
- * a master to co-ordinate the global synchronisation.
+ * The very first DistIface object created becomes the primary interface.
+ * We need a primary interface to co-ordinate the global synchronisation.
*/
- static DistIface *master;
+ static DistIface *primary;
/**
* System pointer used to wakeup sleeping threads when stopping sync.
*/
*/
static uint64_t sizeParam();
/**
- * Trigger the master to start/stop synchronization.
+ * Trigger the primary to start/stop synchronization.
*/
static void toggleSync(ThreadContext *tc);
};
is_switch, num_nodes), serverName(server_name),
serverPort(server_port), isSwitch(is_switch), listening(false)
{
- if (is_switch && isMaster) {
+ if (is_switch && isPrimary) {
while (!listen(serverPort)) {
DPRINTF(DistEthernet, "TCPIface(listen): Can't bind port %d\n",
serverPort);
{
DPRINTF(DistEthernetCmd, "TCPIface::sendCmd() type: %d\n",
static_cast<int>(header.msgType));
- // Global commands (i.e. sync request) are always sent by the master
+ // Global commands (i.e. sync request) are always sent by the primary
// DistIface. The transfer method is simply implemented as point-to-point
// messages for now
for (auto s: sockRegistry)
class CopyEngine(PciDevice):
type = 'CopyEngine'
cxx_header = "dev/pci/copy_engine.hh"
- dma = VectorMasterPort("Copy engine DMA port")
+ dma = VectorRequestPort("Copy engine DMA port")
VendorID = 0x8086
DeviceID = 0x1a38
Revision = 0xA2 # CM2 stepping (newest listed)
pollQueue.schedule(dataEvent);
stringstream stream;
- ccprintf(stream, "==== m5 slave terminal: Terminal %d ====", number);
+ ccprintf(stream, "==== m5 terminal: Terminal %d ====", number);
// we need an actual carriage return followed by a newline for the
// terminal
iob = Iob()
# Attach I/O devices that are on chip
def attachOnChipIO(self, bus):
- self.iob.pio = bus.master
- self.htod.pio = bus.master
+ self.iob.pio = bus.mem_side_ports
+ self.htod.pio = bus.mem_side_ports
# Attach I/O devices to specified bus object. Can't do this
def attachIO(self, bus):
self.hvuart.device = self.hterm
self.puart0.device = self.pterm
- self.fake_clk.pio = bus.master
- self.fake_membnks.pio = bus.master
- self.fake_l2_1.pio = bus.master
- self.fake_l2_2.pio = bus.master
- self.fake_l2_3.pio = bus.master
- self.fake_l2_4.pio = bus.master
- self.fake_l2esr_1.pio = bus.master
- self.fake_l2esr_2.pio = bus.master
- self.fake_l2esr_3.pio = bus.master
- self.fake_l2esr_4.pio = bus.master
- self.fake_ssi.pio = bus.master
- self.fake_jbi.pio = bus.master
- self.puart0.pio = bus.master
- self.hvuart.pio = bus.master
+ self.fake_clk.pio = bus.mem_side_ports
+ self.fake_membnks.pio = bus.mem_side_ports
+ self.fake_l2_1.pio = bus.mem_side_ports
+ self.fake_l2_2.pio = bus.mem_side_ports
+ self.fake_l2_3.pio = bus.mem_side_ports
+ self.fake_l2_4.pio = bus.mem_side_ports
+ self.fake_l2esr_1.pio = bus.mem_side_ports
+ self.fake_l2esr_2.pio = bus.mem_side_ports
+ self.fake_l2esr_3.pio = bus.mem_side_ports
+ self.fake_l2esr_4.pio = bus.mem_side_ports
+ self.fake_ssi.pio = bus.mem_side_ports
+ self.fake_jbi.pio = bus.mem_side_ports
+ self.puart0.pio = bus.mem_side_ports
+ self.hvuart.pio = bus.mem_side_ports
from m5.params import *
from m5.objects.PciDevice import PciDevice
-class IdeID(Enum): vals = ['master', 'slave']
+class IdeID(Enum): vals = ['device0', 'device1']
class IdeDisk(SimObject):
type = 'IdeDisk'
cxx_header = "dev/storage/ide_disk.hh"
delay = Param.Latency('1us', "Fixed disk delay in microseconds")
- driveID = Param.IdeID('master', "Drive ID")
+ driveID = Param.IdeID('device0', "Drive ID")
image = Param.DiskImage("Disk image")
class IdeController(PciDevice):
string newName, Addr _cmdSize, Addr _ctrlSize) :
_name(newName),
cmdAddr(0), cmdSize(_cmdSize), ctrlAddr(0), ctrlSize(_ctrlSize),
- master(NULL), slave(NULL), selected(NULL)
+ device0(NULL), device1(NULL), selected(NULL)
{
bmiRegs.reset();
bmiRegs.status.dmaCap0 = 1;
continue;
switch (i) {
case 0:
- primary.master = params()->disks[0];
+ primary.device0 = params()->disks[0];
break;
case 1:
- primary.slave = params()->disks[1];
+ primary.device1 = params()->disks[1];
break;
case 2:
- secondary.master = params()->disks[2];
+ secondary.device0 = params()->disks[2];
break;
case 3:
- secondary.slave = params()->disks[3];
+ secondary.device1 = params()->disks[3];
break;
default:
panic("IDE controllers support a maximum "
IdeController::setDmaComplete(IdeDisk *disk)
{
Channel *channel;
- if (disk == primary.master || disk == primary.slave) {
+ if (disk == primary.device0 || disk == primary.device1) {
channel = &primary;
- } else if (disk == secondary.master || disk == secondary.slave) {
+ } else if (disk == secondary.device0 || disk == secondary.device1) {
channel = &secondary;
} else {
panic("Unable to find disk based on pointer %#x\n", disk);
uint32_t bmidtp;
} bmiRegs;
- /** IDE disks connected to this controller */
- IdeDisk *master, *slave;
+ /** IDE disks connected to this controller
+ * For more details about device0 and device1 see:
+ * https://en.wikipedia.org/wiki/Parallel_ATA
+ * #Multiple_devices_on_a_cable
+ *
+ */
+ IdeDisk *device0, *device1;
/** Currently selected disk */
IdeDisk *selected;
bool selectBit;
void
- select(bool selSlave)
+ select(bool select_device_1)
{
- selectBit = selSlave;
- selected = selectBit ? slave : master;
+ selectBit = select_device_1;
+ selected = selectBit ? device1 : device0;
}
void accessCommand(Addr offset, int size, uint8_t *data, bool read);
uint32_t curPrdAddr;
/** PRD entry */
PrdTableEntry curPrd;
- /** Device ID (master=0/slave=1) */
+ /** Device ID (device0=0/device1=1) */
int devID;
/** Interrupt pending */
bool intrPending;
cxx_class = 'X86ISA::I82094AA'
cxx_header = "dev/x86/i82094aa.hh"
apic_id = Param.Int(1, 'APIC id for this IO APIC')
- int_master = RequestPort("Port for sending interrupt messages")
+ int_requestor = RequestPort("Port for sending interrupt messages")
int_latency = Param.Latency('1ns', \
"Latency for an interrupt to propagate through this device.")
external_int_pic = Param.I8259(NULL, "External PIC, if any")
def attachIO(self, bus, dma_ports = []):
self.south_bridge.attachIO(bus, dma_ports)
- self.i_dont_exist1.pio = bus.master
- self.i_dont_exist2.pio = bus.master
- self.behind_pci.pio = bus.master
- self.com_1.pio = bus.master
- self.fake_com_2.pio = bus.master
- self.fake_com_3.pio = bus.master
- self.fake_com_4.pio = bus.master
- self.fake_floppy.pio = bus.master
+ self.i_dont_exist1.pio = bus.mem_side_ports
+ self.i_dont_exist2.pio = bus.mem_side_ports
+ self.behind_pci.pio = bus.mem_side_ports
+ self.com_1.pio = bus.mem_side_ports
+ self.fake_com_2.pio = bus.mem_side_ports
+ self.fake_com_3.pio = bus.mem_side_ports
+ self.fake_com_4.pio = bus.mem_side_ports
+ self.fake_floppy.pio = bus.mem_side_ports
self.pci_host.pio = bus.default
self.speaker.i8254 = self.pit
self.io_apic.external_int_pic = self.pic1
# Connect to the bus
- self.cmos.pio = bus.master
- self.dma1.pio = bus.master
- self.ide.pio = bus.master
+ self.cmos.pio = bus.mem_side_ports
+ self.dma1.pio = bus.mem_side_ports
+ self.ide.pio = bus.mem_side_ports
if dma_ports.count(self.ide.dma) == 0:
- self.ide.dma = bus.slave
- self.keyboard.pio = bus.master
- self.pic1.pio = bus.master
- self.pic2.pio = bus.master
- self.pit.pio = bus.master
- self.speaker.pio = bus.master
- self.io_apic.pio = bus.master
- self.io_apic.int_master = bus.slave
+ self.ide.dma = bus.cpu_side_ports
+ self.keyboard.pio = bus.mem_side_ports
+ self.pic1.pio = bus.mem_side_ports
+ self.pic2.pio = bus.mem_side_ports
+ self.pit.pio = bus.mem_side_ports
+ self.speaker.pio = bus.mem_side_ports
+ self.io_apic.pio = bus.mem_side_ports
+ self.io_apic.int_requestor = bus.cpu_side_ports
X86ISA::I82094AA::I82094AA(Params *p)
: BasicPioDevice(p, 20), extIntPic(p->external_int_pic),
lowestPriorityOffset(0),
- intMasterPort(name() + ".int_master", this, this, p->int_latency)
+ intRequestPort(name() + ".int_request", this, this, p->int_latency)
{
// This assumes there's only one I/O APIC in the system and since the apic
// id is stored in a 8-bit field with 0xff meaning broadcast, the id must
// the piodevice init() function.
BasicPioDevice::init();
- // If the master port isn't connected, we can't send interrupts anywhere.
- panic_if(!intMasterPort.isConnected(),
+ // If the request port isn't connected, we can't send interrupts anywhere.
+ panic_if(!intRequestPort.isConnected(),
"Int port not connected to anything!");
}
Port &
X86ISA::I82094AA::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "int_master")
- return intMasterPort;
+ if (if_name == "int_request")
+ return intRequestPort;
if (if_name == "inputs")
return *inputs.at(idx);
else
}
for (auto id: apics) {
PacketPtr pkt = buildIntTriggerPacket(id, message);
- intMasterPort.sendMessage(pkt, sys->isTimingMode());
+ intRequestPort.sendMessage(pkt, sys->isTimingMode());
}
}
}
std::vector<IntSinkPin<I82094AA> *> inputs;
- IntMasterPort<I82094AA> intMasterPort;
+ IntRequestPort<I82094AA> intRequestPort;
public:
typedef I82094AAParams Params;
case 0x2:
DPRINTF(I8259, "Received initialization command word 3.\n");
if (mode == Enums::I8259Master) {
- DPRINTF(I8259, "Slaves attached to IRQs:%s%s%s%s%s%s%s%s\n",
+ DPRINTF(I8259, "Responders attached to "
+ "IRQs:%s%s%s%s%s%s%s%s\n",
bits(val, 0) ? " 0" : "",
bits(val, 1) ? " 1" : "",
bits(val, 2) ? " 2" : "",
bits(val, 7) ? " 7" : "");
cascadeBits = val;
} else {
- DPRINTF(I8259, "Slave ID is %d.\n", val & mask(3));
+ DPRINTF(I8259, "Responder ID is %d.\n", val & mask(3));
cascadeBits = val & mask(3);
}
if (expectICW4)
X86ISA::I8259::getVector()
{
/*
- * This code only handles one slave. Since that's how the PC platform
+ * This code only handles one responder. Since that's how the PC platform
* always uses the 8259 PIC, there shouldn't be any need for more. If
- * there -is- a need for more for some reason, "slave" can become a
- * vector of slaves.
+ * there -is- a need for more for some reason, "responder" can become a
+ * vector of responders.
*/
int line = findMsbSet(IRR);
IRR &= ~(1 << line);
ISR |= 1 << line;
}
if (slave && bits(cascadeBits, line)) {
- DPRINTF(I8259, "Interrupt was from slave who will "
+ DPRINTF(I8259, "Interrupt was from responder who will "
"provide the vector.\n");
return slave->getVector();
}
std::vector<IntSourcePin<I8259> *> output;
std::vector<IntSinkPin<I8259> *> inputs;
Enums::X86I8259CascadeMode mode;
- I8259 * slave;
+ I8259 *slave;
// Interrupt Request Register
uint8_t IRR;
uint8_t vectorOffset;
bool cascadeMode;
- // A bit vector of lines with slaves attached, or the slave id, depending
- // on if this is a master or slave PIC.
+ // A bit vector of lines with responders attached, or the
+ // responder id, depending
+ // on if this is a requestor or responder PIC.
uint8_t cascadeBits;
bool edgeTriggered;
{
template <class Device>
-class IntSlavePort : public SimpleTimingPort
+class IntResponsePort : public SimpleTimingPort
{
Device * device;
public:
- IntSlavePort(const std::string& _name, SimObject* _parent,
+ IntResponsePort(const std::string& _name, SimObject* _parent,
Device* dev) :
SimpleTimingPort(_name, _parent), device(dev)
{
buildIntPacket(Addr addr, T payload)
{
RequestPtr req = std::make_shared<Request>(
- addr, sizeof(T), Request::UNCACHEABLE, Request::intMasterId);
+ addr, sizeof(T), Request::UNCACHEABLE, Request::intRequestorId);
PacketPtr pkt = new Packet(req, MemCmd::WriteReq);
pkt->allocate();
pkt->setRaw<T>(payload);
}
template <class Device>
-class IntMasterPort : public QueuedMasterPort
+class IntRequestPort : public QueuedRequestPort
{
private:
ReqPacketQueue reqQueue;
static void defaultOnCompletion(PacketPtr pkt) { delete pkt; }
public:
- IntMasterPort(const std::string& _name, SimObject* _parent,
+ IntRequestPort(const std::string& _name, SimObject* _parent,
Device* dev, Tick _latency) :
- QueuedMasterPort(_name, _parent, reqQueue, snoopRespQueue),
+ QueuedRequestPort(_name, _parent, reqQueue, snoopRespQueue),
reqQueue(*_parent, *this), snoopRespQueue(*_parent, *this),
device(dev), latency(_latency)
{
coalescer_to_vrf_bus_width = Param.Int(64, "Coalescer->VRF data bus "\
"width in bytes")
- memory_port = VectorMasterPort("Port to the memory system")
- translation_port = VectorMasterPort('Port to the TLB hierarchy')
+ memory_port = VectorRequestPort("Port to the memory system")
+ translation_port = VectorRequestPort('Port to the TLB hierarchy')
sqc_port = RequestPort("Port to the SQC (I-cache")
sqc_tlb_port = RequestPort("Port to the TLB for the SQC (I-cache)")
scalar_port = RequestPort("Port to the scalar data cache")
missLatency1 = Param.Int(5, "Latency #1 of a TLB miss")
missLatency2 = Param.Int(100, "Latency #2 of a TLB miss")
maxOutstandingReqs = Param.Int(64, "# of maximum outstanding requests")
- slave = VectorSlavePort("Port on side closer to CPU/CU")
- master = VectorMasterPort("Port on side closer to memory")
+ cpu_side_ports = VectorResponsePort("Ports on side closer to CPU/CU")
+ slave = DeprecatedParam(cpu_side_ports,
+ '`slave` is now called `cpu_side_ports`')
+ mem_side_ports = VectorRequestPort("Ports on side closer to memory")
+ master = DeprecatedParam(mem_side_ports,
+ '`master` is now called `mem_side_ports`')
allocationPolicy = Param.Bool(True, "Allocate on an access")
accessDistance = Param.Bool(False, "print accessDistance stats")
cxx_header = 'gpu-compute/tlb_coalescer.hh'
probesPerCycle = Param.Int(2, "Number of TLB probes per cycle")
coalescingWindow = Param.Int(1, "Permit coalescing across that many ticks")
- slave = VectorSlavePort("Port on side closer to CPU/CU")
- master = VectorMasterPort("Port on side closer to memory")
+ cpu_side_ports = VectorResponsePort("Port on side closer to CPU/CU")
+ slave = DeprecatedParam(cpu_side_ports,
+ '`slave` is now called `cpu_side_ports`')
+ mem_side_ports = VectorRequestPort("Port on side closer to memory")
+ master DeprecatedParam(mem_side_ports,
+ '`master` is now called `mem_side_ports`')
disableCoalescing = Param.Bool(False,"Dispable Coalescing")
countPages(p->countPages),
req_tick_latency(p->mem_req_latency * p->clk_domain->clockPeriod()),
resp_tick_latency(p->mem_resp_latency * p->clk_domain->clockPeriod()),
- _masterId(p->system->getMasterId(this, "ComputeUnit")),
+ _requestorId(p->system->getRequestorId(this, "ComputeUnit")),
lds(*p->localDataStore), gmTokenPort(name() + ".gmTokenPort", this),
ldsPort(csprintf("%s-port", name()), this),
scalarDataPort(csprintf("%s-port", name()), this),
tlbPort.emplace_back(csprintf("%s-port%d", name(), i), this, i);
}
- // Setup tokens for slave ports. The number of tokens in memSlaveTokens
+ // Setup tokens for response ports. The number of tokens in memPortTokens
// is the total token count for the entire vector port (i.e., this CU).
memPortTokens = new TokenManager(p->max_cu_tokens);
if (!req) {
req = std::make_shared<Request>(
- 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId);
+ 0, 0, 0, requestorId(), 0, gpuDynInst->wfDynId);
}
// all mem sync requests have Paddr == 0
RequestPtr prefetch_req = std::make_shared<Request>(
vaddr + stride * pf * TheISA::PageBytes,
sizeof(uint8_t), 0,
- computeUnit->masterId(),
+ computeUnit->requestorId(),
0, 0, nullptr);
PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd);
}
// First we must convert the response cmd back to a request cmd so that
- // the request can be sent through the cu's master port
+ // the request can be sent through the cu's request port
PacketPtr new_pkt = new Packet(pkt->req, requestCmd);
new_pkt->dataStatic(pkt->getPtr<uint8_t>());
delete pkt->senderState;
if (success) {
// pkt is reused in fetch(), don't delete it here. However, we must
// reset the command to be a request so that it can be sent through
- // the cu's master port
+ // the cu's request port
assert(pkt->cmd == MemCmd::ReadResp);
pkt->cmd = MemCmd::ReadReq;
void processFetchReturn(PacketPtr pkt);
void updatePageDivergenceDist(Addr addr);
- MasterID masterId() { return _masterId; }
+ RequestorID requestorId() { return _requestorId; }
bool isDone() const;
bool isVectorAluIdle(uint32_t simdId) const;
protected:
- MasterID _masterId;
+ RequestorID _requestorId;
LdsState &lds;
void exitCallback();
- class GMTokenPort : public TokenMasterPort
+ class GMTokenPort : public TokenRequestPort
{
public:
GMTokenPort(const std::string& name, SimObject *owner,
PortID id = InvalidPortID)
- : TokenMasterPort(name, owner, id)
+ : TokenRequestPort(name, owner, id)
{ }
~GMTokenPort() { }
// set up virtual request
RequestPtr req = std::make_shared<Request>(
vaddr, computeUnit.cacheLineSize(), Request::INST_FETCH,
- computeUnit.masterId(), 0, 0, nullptr);
+ computeUnit.requestorId(), 0, 0, nullptr);
PacketPtr pkt = new Packet(req, MemCmd::ReadReq);
missLatency1 = p->missLatency1;
missLatency2 = p->missLatency2;
- // create the slave ports based on the number of connected ports
- for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
+ // create the response ports based on the number of connected ports
+ for (size_t i = 0; i < p->port_cpu_side_ports_connection_count; ++i) {
cpuSidePort.push_back(new CpuSidePort(csprintf("%s-port%d",
name(), i), this, i));
}
- // create the master ports based on the number of connected ports
- for (size_t i = 0; i < p->port_master_connection_count; ++i) {
+ // create the request ports based on the number of connected ports
+ for (size_t i = 0; i < p->port_mem_side_ports_connection_count; ++i) {
memSidePort.push_back(new MemSidePort(csprintf("%s-port%d",
name(), i), this, i));
}
Port &
GpuTLB::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "slave") {
+ if (if_name == "cpu_side_ports") {
if (idx >= static_cast<PortID>(cpuSidePort.size())) {
panic("TLBCoalescer::getPort: unknown index %d\n", idx);
}
return *cpuSidePort[idx];
- } else if (if_name == "master") {
+ } else if (if_name == "mem_side_ports") {
if (idx >= static_cast<PortID>(memSidePort.size())) {
panic("TLBCoalescer::getPort: unknown index %d\n", idx);
}
Addr paddr = local_entry->paddr | (vaddr & (page_size - 1));
DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr);
- // Since this packet will be sent through the cpu side slave port,
+ // Since this packet will be sent through the cpu side port,
// it must be converted to a response pkt if it is not one already
if (pkt->isRequest()) {
pkt->makeTimingResponse();
AddrRangeList
GpuTLB::CpuSidePort::getAddrRanges() const
{
- // currently not checked by the master
+ // currently not checked by the requestor
AddrRangeList ranges;
return ranges;
// create a request to hold INV info; the request's fields will
// be updated in cu before use
auto req = std::make_shared<Request>(0, 0, 0,
- cuList[i_cu]->masterId(),
+ cuList[i_cu]->requestorId(),
0, -1);
_dispatcher.updateInvCounter(kernId, +1);
RequestPtr req = std::make_shared<Request>(
gen.addr(), gen.size(), 0,
- cuList[0]->masterId(), 0, 0, nullptr);
+ cuList[0]->requestorId(), 0, 0, nullptr);
doFunctionalAccess(req, cmd, data_buf, suppress_func_errors, cu_id);
data_buf += gen.size();
"Cleanup issuedTranslationsTable hashmap",
false, Event::Maximum_Pri)
{
- // create the slave ports based on the number of connected ports
- for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
+ // create the response ports based on the number of connected ports
+ for (size_t i = 0; i < p->port_cpu_side_ports_connection_count; ++i) {
cpuSidePort.push_back(new CpuSidePort(csprintf("%s-port%d", name(), i),
this, i));
}
- // create the master ports based on the number of connected ports
- for (size_t i = 0; i < p->port_master_connection_count; ++i) {
+ // create the request ports based on the number of connected ports
+ for (size_t i = 0; i < p->port_mem_side_ports_connection_count; ++i) {
memSidePort.push_back(new MemSidePort(csprintf("%s-port%d", name(), i),
this, i));
}
Port &
TLBCoalescer::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "slave") {
+ if (if_name == "cpu_side_ports") {
if (idx >= static_cast<PortID>(cpuSidePort.size())) {
panic("TLBCoalescer::getPort: unknown index %d\n", idx);
}
return *cpuSidePort[idx];
- } else if (if_name == "master") {
+ } else if (if_name == "mem_side_ports") {
if (idx >= static_cast<PortID>(memSidePort.size())) {
panic("TLBCoalescer::getPort: unknown index %d\n", idx);
}
AddrRangeList
TLBCoalescer::CpuSidePort::getAddrRanges() const
{
- // currently not checked by the master
+ // currently not checked by the requestor
AddrRangeList ranges;
return ranges;
}
};
- // Coalescer slave ports on the cpu Side
+ // Coalescer response ports on the cpu Side
std::vector<CpuSidePort*> cpuSidePort;
- // Coalescer master ports on the memory side
+ // Coalescer request ports on the memory side
std::vector<MemSidePort*> memSidePort;
Port &getPort(const std::string &if_name,
# Vector port example. Both the instruction and data ports connect to this
# port which is automatically split out into two ports.
- cpu_side = VectorSlavePort("CPU side port, receives requests")
+ cpu_side = VectorResponsePort("CPU side port, receives requests")
mem_side = RequestPort("Memory side port, sends requests")
latency = Param.Cycles(1, "Cycles taken on a hit or to resolve a miss")
// automatically created depending on the name of the vector port and
// holds the number of connections to this port name
for (int i = 0; i < params->port_cpu_side_connection_count; ++i) {
- cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i), i, this);
+ cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i),
+ i, this);
}
}
/**
* Get a list of the non-overlapping address ranges the owner is
- * responsible for. All slave ports must override this function
+ * responsible for. All response ports must override this function
* and return a populated list with at least one item.
*
* @return a list of ranges responded to
protected:
/**
- * Receive an atomic request packet from the master port.
+ * Receive an atomic request packet from the request port.
* No need to implement in this simple cache.
*/
Tick recvAtomic(PacketPtr pkt) override
{ panic("recvAtomic unimpl."); }
/**
- * Receive a functional request packet from the master port.
+ * Receive a functional request packet from the request port.
* Performs a "debug" access updating/reading the data in place.
*
* @param packet the requestor sent.
void recvFunctional(PacketPtr pkt) override;
/**
- * Receive a timing request from the master port.
+ * Receive a timing request from the request port.
*
* @param the packet that the requestor sent
* @return whether this object can consume to packet. If false, we
bool recvTimingReq(PacketPtr pkt) override;
/**
- * Called by the master port if sendTimingResp was called on this
- * slave port (causing recvTimingResp to be called on the master
- * port) and was unsuccesful.
+ * Called by the request port if sendTimingResp was called on this
+ * response port (causing recvTimingResp to be called on the request
+ * port) and was unsuccessful.
*/
void recvRespRetry() override;
};
protected:
/**
- * Receive a timing response from the slave port.
+ * Receive a timing response from the response port.
*/
bool recvTimingResp(PacketPtr pkt) override;
/**
- * Called by the slave port if sendTimingReq was called on this
- * master port (causing recvTimingReq to be called on the slave
+ * Called by the response port if sendTimingReq was called on this
+ * request port (causing recvTimingReq to be called on the response
* port) and was unsuccesful.
*/
void recvReqRetry() override;
/**
- * Called to receive an address range change from the peer slave
+ * Called to receive an address range change from the peer response
* port. The default implementation ignores the change and does
* nothing. Override this function in a derived class if the owner
* needs to be aware of the address ranges, e.g. in an
from m5.SimObject import SimObject
# An address mapper changes the packet addresses in going from the
-# slave port side of the mapper to the master port side. When the
-# slave port is queried for the address ranges, it also performs the
+# response port side of the mapper to the request port side. When the
+# response port is queried for the address ranges, it also performs the
# necessary range updates. Note that snoop requests that travel from
-# the master port (i.e. the memory side) to the slave port are
+# the request port (i.e. the memory side) to the response port are
# currently not modified.
class AddrMapper(SimObject):
type = 'AddrMapper'
abstract = True
# one port in each direction
- master = RequestPort("Master port")
- slave = ResponsePort("Slave port")
-
+ mem_side_port = RequestPort("This port sends requests and "
+ "receives responses")
+ master = DeprecatedParam(mem_side_port,
+ '`master` is now called `mem_side_port`')
+ cpu_side_port = ResponsePort("This port receives requests and "
+ "sends responses")
+ slave = DeprecatedParam(cpu_side_port,
+ '`slave` is now called `cpu_side_port`')
# Range address mapper that maps a set of original ranges to a set of
# remapped ranges, where a specific range is of the same size
class Bridge(ClockedObject):
type = 'Bridge'
cxx_header = "mem/bridge.hh"
- slave = ResponsePort('Slave port')
- master = RequestPort('Master port')
+
+ mem_side_port = RequestPort("This port sends requests and "
+ "receives responses")
+ master = DeprecatedParam(mem_side_port,
+ '`master` is now called `mem_side_port`')
+ cpu_side_port = ResponsePort("This port receives requests and "
+ "sends responses")
+ slave = DeprecatedParam(cpu_side_port,
+ '`slave` is now called `cpu_side_port`')
+
req_size = Param.Unsigned(16, "The number of requests to buffer")
resp_size = Param.Unsigned(16, "The number of responses to buffer")
delay = Param.Latency('0ns', "The latency of this bridge")
system = Param.System(Parent.any, "System that the monitor belongs to.")
# one port in each direction
- master = RequestPort("Master port")
- slave = ResponsePort("Slave port")
+ mem_side_port = RequestPort("This port sends requests and "
+ "receives responses")
+ master = DeprecatedParam(mem_side_port,
+ '`master` is now called `mem_side_port`')
+ cpu_side_port = ResponsePort("This port receives requests and "
+ "sends responses")
+ slave = DeprecatedParam(cpu_side_port,
+ '`slave` is now called `cpu_side_port`')
# control the sample period window length of this monitor
sample_period = Param.Clock("1ms", "Sample period for histograms")
cxx_header = "mem/dramsim2.hh"
# A single port for now
- port = SlavePort("Slave port")
+ port = ResponsePort("This port sends responses and receives requests")
deviceConfigFile = Param.String("ini/DDR3_micron_32M_8B_x8_sg15.ini",
"Device configuration file")
cxx_header = "mem/mem_checker_monitor.hh"
# one port in each direction
- master = RequestPort("Master port")
- slave = ResponsePort("Slave port")
- cpu_side = ResponsePort("Alias for slave")
- mem_side = RequestPort("Alias for master")
+ mem_side_port = RequestPort("This port sends requests and receives "
+ "responses")
+ master = DeprecatedParam(mem_side_port,"`master` is now called "
+ "`mem_side_port`")
+ cpu_side_port = ResponsePort("This port receives requests and sends "
+ "responses")
+ slave = DeprecatedParam(cpu_side_port,"`slave` is now called "
+ "`cpu_side_port`")
warn_only = Param.Bool(False, "Warn about violations only")
memchecker = Param.MemChecker("Instance shared with other monitors")
# single-ported on the system interface side, instantiate with a
# bus in front of the controller for multiple ports
- port = SlavePort("Slave port")
+ port = ResponsePort("This port responds to memory requests")
# Interface to volatile, DRAM media
dram = Param.DRAMInterface(NULL, "DRAM interface")
cxx_header = 'mem/mem_delay.hh'
abstract = True
- master = RequestPort("Master port")
- slave = ResponsePort("Slave port")
+ mem_side_port = RequestPort("This port sends requests and "
+ "receives responses")
+ master = DeprecatedParam(mem_side_port,
+ '`master` is now called `mem_side_port`')
+ cpu_side_port = ResponsePort("This port receives requests and "
+ "sends responses")
+ slave = DeprecatedParam(cpu_side_port,
+ '`slave` is now called `cpu_side_port`')
class SimpleMemDelay(MemDelay):
type = 'SimpleMemDelay'
class SerialLink(ClockedObject):
type = 'SerialLink'
cxx_header = "mem/serial_link.hh"
- slave = ResponsePort('Slave port')
- master = RequestPort('Master port')
+ mem_side_port = RequestPort("This port sends requests and "
+ "receives responses")
+ master = DeprecatedParam(mem_side_port,
+ '`master` is now called `mem_side_port`')
+ cpu_side_port = ResponsePort("This port receives requests and "
+ "sends responses")
+ slave = DeprecatedParam(cpu_side_port,
+ '`slave` is now called `cpu_side_port`')
req_size = Param.Unsigned(16, "The number of requests to buffer")
resp_size = Param.Unsigned(16, "The number of responses to buffer")
delay = Param.Latency('0ns', "The latency of this serial_link")
class SimpleMemory(AbstractMemory):
type = 'SimpleMemory'
cxx_header = "mem/simple_mem.hh"
- port = SlavePort("Slave ports")
+ port = ResponsePort("This port sends responses and receives requests")
latency = Param.Latency('30ns', "Request to response latency")
latency_var = Param.Latency('0ns', "Request to response latency variance")
# The memory bandwidth limit default is set to 12.8GB/s which is
abstract = True
cxx_header = "mem/xbar.hh"
- slave = VectorSlavePort("Vector port for connecting masters")
- master = VectorMasterPort("Vector port for connecting slaves")
+ cpu_side_ports = VectorResponsePort("Vector port for connecting "
+ "mem side ports")
+ slave = DeprecatedParam(cpu_side_ports,
+ '`slave` is now called `cpu_side_ports`')
+ mem_side_ports = VectorRequestPort("Vector port for connecting "
+ "cpu side ports")
+ master = DeprecatedParam(mem_side_ports,
+ '`master` is now called `mem_side_ports`')
# Latencies governing the time taken for the variuos paths a
# packet has through the crossbar. Note that the crossbar itself
forward_latency = Param.Cycles("Forward latency")
response_latency = Param.Cycles("Response latency")
- # The XBar uses one Layer per master. Each Layer forwards a packet
+ # The XBar uses one Layer per requestor. Each Layer forwards a packet
# to its destination and is occupied for header_latency + size /
# width cycles
header_latency = Param.Cycles(1, "Header latency")
width = Param.Unsigned("Datapath width per port (bytes)")
# The default port can be left unconnected, or be used to connect
- # a default slave port
- default = RequestPort("Port for connecting an optional default slave")
+ # a default response port
+ default = RequestPort("Port for connecting an optional default responder")
# The default port can be used unconditionally, or based on
# address range, in which case it may overlap with other
# Sanity check on max capacity to track, adjust if needed.
max_capacity = Param.MemorySize('8MB', "Maximum capacity of snoop filter")
-# We use a coherent crossbar to connect multiple masters to the L2
+# We use a coherent crossbar to connect multiple requestors to the L2
# caches. Normally this crossbar would be part of the cache itself.
class L2XBar(CoherentXBar):
# 256-bit crossbar by default
# One of the key coherent crossbar instances is the system
# interconnect, tying together the CPU clusters, GPUs, and any I/O
-# coherent masters, and DRAM controllers.
+# coherent requestors, and DRAM controllers.
class SystemXBar(CoherentXBar):
# 128-bit crossbar by default
width = 16
System *sys = mem.system();
assert(sys);
- const auto max_masters = sys->maxMasters();
+ const auto max_requestors = sys->maxRequestors();
bytesRead
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- bytesRead.subname(i, sys->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ bytesRead.subname(i, sys->getRequestorName(i));
}
bytesInstRead
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- bytesInstRead.subname(i, sys->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ bytesInstRead.subname(i, sys->getRequestorName(i));
}
bytesWritten
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- bytesWritten.subname(i, sys->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ bytesWritten.subname(i, sys->getRequestorName(i));
}
numReads
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- numReads.subname(i, sys->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ numReads.subname(i, sys->getRequestorName(i));
}
numWrites
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- numWrites.subname(i, sys->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ numWrites.subname(i, sys->getRequestorName(i));
}
numOther
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- numOther.subname(i, sys->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ numOther.subname(i, sys->getRequestorName(i));
}
bwRead
.prereq(bytesRead)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- bwRead.subname(i, sys->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ bwRead.subname(i, sys->getRequestorName(i));
}
bwInstRead
.prereq(bytesInstRead)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- bwInstRead.subname(i, sys->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ bwInstRead.subname(i, sys->getRequestorName(i));
}
bwWrite
.prereq(bytesWritten)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- bwWrite.subname(i, sys->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ bwWrite.subname(i, sys->getRequestorName(i));
}
bwTotal
.prereq(bwTotal)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- bwTotal.subname(i, sys->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ bwTotal.subname(i, sys->getRequestorName(i));
}
bwRead = bytesRead / simSeconds;
i->contextId, paddr);
ContextID owner_cid = i->contextId;
assert(owner_cid != InvalidContextID);
- ContextID requester_cid = req->hasContextId() ?
+ ContextID requestor_cid = req->hasContextId() ?
req->contextId() :
InvalidContextID;
- if (owner_cid != requester_cid) {
+ if (owner_cid != requestor_cid) {
ThreadContext* ctx = system()->threads[owner_cid];
TheISA::globalClearExclusive(ctx);
}
if (size == 1 || size == 2 || size == 4 || size == 8) {
ByteOrder byte_order = sys->getGuestByteOrder();
DPRINTF(MemoryAccess,"%s from %s of size %i on address %#x data "
- "%#x %c\n", label, sys->getMasterName(pkt->req->masterId()),
+ "%#x %c\n", label, sys->getRequestorName(pkt->req->
+ requestorId()), size, pkt->getAddr(),
size, pkt->getAddr(), pkt->getUintX(byte_order),
pkt->req->isUncacheable() ? 'U' : 'C');
return;
}
#endif
DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x %c\n",
- label, sys->getMasterName(pkt->req->masterId()),
+ label, sys->getRequestorName(pkt->req->requestorId()),
size, pkt->getAddr(), pkt->req->isUncacheable() ? 'U' : 'C');
DDUMP(MemoryAccess, pkt->getConstPtr<uint8_t>(), pkt->getSize());
}
assert(!pkt->req->isInstFetch());
TRACE_PACKET("Read/Write");
- stats.numOther[pkt->req->masterId()]++;
+ stats.numOther[pkt->req->requestorId()]++;
}
} else if (pkt->isRead()) {
assert(!pkt->isWrite());
pkt->setData(host_addr);
}
TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
- stats.numReads[pkt->req->masterId()]++;
- stats.bytesRead[pkt->req->masterId()] += pkt->getSize();
+ stats.numReads[pkt->req->requestorId()]++;
+ stats.bytesRead[pkt->req->requestorId()] += pkt->getSize();
if (pkt->req->isInstFetch())
- stats.bytesInstRead[pkt->req->masterId()] += pkt->getSize();
+ stats.bytesInstRead[pkt->req->requestorId()] += pkt->getSize();
} else if (pkt->isInvalidate() || pkt->isClean()) {
assert(!pkt->isWrite());
// in a fastmem system invalidating and/or cleaning packets
}
assert(!pkt->req->isInstFetch());
TRACE_PACKET("Write");
- stats.numWrites[pkt->req->masterId()]++;
- stats.bytesWritten[pkt->req->masterId()] += pkt->getSize();
+ stats.numWrites[pkt->req->requestorId()]++;
+ stats.bytesWritten[pkt->req->requestorId()] += pkt->getSize();
}
} else {
panic("Unexpected packet %s", pkt->print());
}
/** Pointer to the System object.
- * This is used for getting the number of masters in the system which is
+ * This is used for getting the number of requestors in the system which is
* needed when registering stats
*/
System *_system;
AddrMapper::AddrMapper(const AddrMapperParams* p)
: SimObject(p),
- masterPort(name() + "-master", *this),
- slavePort(name() + "-slave", *this)
+ memSidePort(name() + "-mem_side_port", *this),
+ cpuSidePort(name() + "-cpu_side_port", *this)
{
}
void
AddrMapper::init()
{
- if (!slavePort.isConnected() || !masterPort.isConnected())
+ if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
fatal("Address mapper is not connected on both sides.\n");
}
Port &
AddrMapper::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "master") {
- return masterPort;
- } else if (if_name == "slave") {
- return slavePort;
+ if (if_name == "mem_side_port") {
+ return memSidePort;
+ } else if (if_name == "cpu_side_port") {
+ return cpuSidePort;
} else {
return SimObject::getPort(if_name, idx);
}
{
Addr orig_addr = pkt->getAddr();
pkt->setAddr(remapAddr(orig_addr));
- masterPort.sendFunctional(pkt);
+ memSidePort.sendFunctional(pkt);
pkt->setAddr(orig_addr);
}
{
Addr orig_addr = pkt->getAddr();
pkt->setAddr(remapAddr(orig_addr));
- slavePort.sendFunctionalSnoop(pkt);
+ cpuSidePort.sendFunctionalSnoop(pkt);
pkt->setAddr(orig_addr);
}
{
Addr orig_addr = pkt->getAddr();
pkt->setAddr(remapAddr(orig_addr));
- Tick ret_tick = masterPort.sendAtomic(pkt);
+ Tick ret_tick = memSidePort.sendAtomic(pkt);
pkt->setAddr(orig_addr);
return ret_tick;
}
{
Addr orig_addr = pkt->getAddr();
pkt->setAddr(remapAddr(orig_addr));
- Tick ret_tick = slavePort.sendAtomicSnoop(pkt);
+ Tick ret_tick = cpuSidePort.sendAtomicSnoop(pkt);
pkt->setAddr(orig_addr);
return ret_tick;
}
pkt->setAddr(remapAddr(orig_addr));
// Attempt to send the packet
- bool successful = masterPort.sendTimingReq(pkt);
+ bool successful = memSidePort.sendTimingReq(pkt);
// If not successful, restore the address and sender state
if (!successful) {
pkt->setAddr(receivedState->origAddr);
// Attempt to send the packet
- bool successful = slavePort.sendTimingResp(pkt);
+ bool successful = cpuSidePort.sendTimingResp(pkt);
// If packet successfully sent, delete the sender state, otherwise
// restore state
void
AddrMapper::recvTimingSnoopReq(PacketPtr pkt)
{
- slavePort.sendTimingSnoopReq(pkt);
+ cpuSidePort.sendTimingSnoopReq(pkt);
}
bool
AddrMapper::recvTimingSnoopResp(PacketPtr pkt)
{
- return masterPort.sendTimingSnoopResp(pkt);
+ return memSidePort.sendTimingSnoopResp(pkt);
}
bool
AddrMapper::isSnooping() const
{
- if (slavePort.isSnooping())
+ if (cpuSidePort.isSnooping())
fatal("AddrMapper doesn't support remapping of snooping requests\n");
return false;
}
void
AddrMapper::recvReqRetry()
{
- slavePort.sendRetryReq();
+ cpuSidePort.sendRetryReq();
}
void
AddrMapper::recvRespRetry()
{
- masterPort.sendRetryResp();
+ memSidePort.sendRetryResp();
}
void
AddrMapper::recvRangeChange()
{
- slavePort.sendRangeChange();
+ cpuSidePort.sendRangeChange();
}
RangeAddrMapper::RangeAddrMapper(const RangeAddrMapperParams* p) :
/**
* An address mapper changes the packet addresses in going from the
- * slave port side of the mapper to the master port side. When the
- * slave port is queried for the address ranges, it also performs the
+ * response port side of the mapper to the request port side. When the
+ * response port is queried for the address ranges, it also performs the
* necessary range updates. Note that snoop requests that travel from
- * the master port (i.e. the memory side) to the slave port are
+ * the request port (i.e. the memory side) to the response port are
* currently not modified.
*/
};
- class MapperMasterPort : public RequestPort
+ class MapperRequestPort : public RequestPort
{
public:
- MapperMasterPort(const std::string& _name, AddrMapper& _mapper)
+ MapperRequestPort(const std::string& _name, AddrMapper& _mapper)
: RequestPort(_name, &_mapper), mapper(_mapper)
{ }
};
- /** Instance of master port, facing the memory side */
- MapperMasterPort masterPort;
+ /** Instance of request port, facing the memory side */
+ MapperRequestPort memSidePort;
- class MapperSlavePort : public ResponsePort
+ class MapperResponsePort : public ResponsePort
{
public:
- MapperSlavePort(const std::string& _name, AddrMapper& _mapper)
+ MapperResponsePort(const std::string& _name, AddrMapper& _mapper)
: ResponsePort(_name, &_mapper), mapper(_mapper)
{ }
};
- /** Instance of slave port, i.e. on the CPU side */
- MapperSlavePort slavePort;
+ /** Instance of response port, i.e. on the CPU side */
+ MapperResponsePort cpuSidePort;
void recvFunctional(PacketPtr pkt);
/**
* @file
- * Implementation of a memory-mapped bridge that connects a master
- * and a slave through a request and response queue.
+ * Implementation of a memory-mapped bridge that connects a requestor
+ * and a responder through a request and response queue.
*/
#include "mem/bridge.hh"
#include "debug/Bridge.hh"
#include "params/Bridge.hh"
-Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name,
+Bridge::BridgeResponsePort::BridgeResponsePort(const std::string& _name,
Bridge& _bridge,
- BridgeMasterPort& _masterPort,
+ BridgeRequestPort& _memSidePort,
Cycles _delay, int _resp_limit,
std::vector<AddrRange> _ranges)
- : ResponsePort(_name, &_bridge), bridge(_bridge), masterPort(_masterPort),
- delay(_delay), ranges(_ranges.begin(), _ranges.end()),
+ : ResponsePort(_name, &_bridge), bridge(_bridge),
+ memSidePort(_memSidePort), delay(_delay),
+ ranges(_ranges.begin(), _ranges.end()),
outstandingResponses(0), retryReq(false), respQueueLimit(_resp_limit),
sendEvent([this]{ trySendTiming(); }, _name)
{
}
-Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name,
+Bridge::BridgeRequestPort::BridgeRequestPort(const std::string& _name,
Bridge& _bridge,
- BridgeSlavePort& _slavePort,
+ BridgeResponsePort& _cpuSidePort,
Cycles _delay, int _req_limit)
- : RequestPort(_name, &_bridge), bridge(_bridge), slavePort(_slavePort),
+ : RequestPort(_name, &_bridge), bridge(_bridge),
+ cpuSidePort(_cpuSidePort),
delay(_delay), reqQueueLimit(_req_limit),
sendEvent([this]{ trySendTiming(); }, _name)
{
Bridge::Bridge(Params *p)
: ClockedObject(p),
- slavePort(p->name + ".slave", *this, masterPort,
+ cpuSidePort(p->name + ".cpu_side_port", *this, memSidePort,
ticksToCycles(p->delay), p->resp_size, p->ranges),
- masterPort(p->name + ".master", *this, slavePort,
+ memSidePort(p->name + ".mem_side_port", *this, cpuSidePort,
ticksToCycles(p->delay), p->req_size)
{
}
Port &
Bridge::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "master")
- return masterPort;
- else if (if_name == "slave")
- return slavePort;
+ if (if_name == "mem_side_port")
+ return memSidePort;
+ else if (if_name == "cpu_side_port")
+ return cpuSidePort;
else
// pass it along to our super class
return ClockedObject::getPort(if_name, idx);
Bridge::init()
{
// make sure both sides are connected and have the same block size
- if (!slavePort.isConnected() || !masterPort.isConnected())
+ if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
fatal("Both ports of a bridge must be connected.\n");
- // notify the master side of our address ranges
- slavePort.sendRangeChange();
+ // notify the request side of our address ranges
+ cpuSidePort.sendRangeChange();
}
bool
-Bridge::BridgeSlavePort::respQueueFull() const
+Bridge::BridgeResponsePort::respQueueFull() const
{
return outstandingResponses == respQueueLimit;
}
bool
-Bridge::BridgeMasterPort::reqQueueFull() const
+Bridge::BridgeRequestPort::reqQueueFull() const
{
return transmitList.size() == reqQueueLimit;
}
bool
-Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt)
+Bridge::BridgeRequestPort::recvTimingResp(PacketPtr pkt)
{
- // all checks are done when the request is accepted on the slave
+ // all checks are done when the request is accepted on the response
// side, so we are guaranteed to have space for the response
DPRINTF(Bridge, "recvTimingResp: %s addr 0x%x\n",
pkt->cmdString(), pkt->getAddr());
Tick receive_delay = pkt->headerDelay + pkt->payloadDelay;
pkt->headerDelay = pkt->payloadDelay = 0;
- slavePort.schedTimingResp(pkt, bridge.clockEdge(delay) +
+ cpuSidePort.schedTimingResp(pkt, bridge.clockEdge(delay) +
receive_delay);
return true;
}
bool
-Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt)
+Bridge::BridgeResponsePort::recvTimingReq(PacketPtr pkt)
{
DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n",
pkt->cmdString(), pkt->getAddr());
transmitList.size(), outstandingResponses);
// if the request queue is full then there is no hope
- if (masterPort.reqQueueFull()) {
+ if (memSidePort.reqQueueFull()) {
DPRINTF(Bridge, "Request queue full\n");
retryReq = true;
} else {
Tick receive_delay = pkt->headerDelay + pkt->payloadDelay;
pkt->headerDelay = pkt->payloadDelay = 0;
- masterPort.schedTimingReq(pkt, bridge.clockEdge(delay) +
+ memSidePort.schedTimingReq(pkt, bridge.clockEdge(delay) +
receive_delay);
}
}
// remember that we are now stalling a packet and that we have to
- // tell the sending master to retry once space becomes available,
+ // tell the sending requestor to retry once space becomes available,
// we make no distinction whether the stalling is due to the
// request queue or response queue being full
return !retryReq;
}
void
-Bridge::BridgeSlavePort::retryStalledReq()
+Bridge::BridgeResponsePort::retryStalledReq()
{
if (retryReq) {
DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
}
void
-Bridge::BridgeMasterPort::schedTimingReq(PacketPtr pkt, Tick when)
+Bridge::BridgeRequestPort::schedTimingReq(PacketPtr pkt, Tick when)
{
// If we're about to put this packet at the head of the queue, we
// need to schedule an event to do the transmit. Otherwise there
void
-Bridge::BridgeSlavePort::schedTimingResp(PacketPtr pkt, Tick when)
+Bridge::BridgeResponsePort::schedTimingResp(PacketPtr pkt, Tick when)
{
// If we're about to put this packet at the head of the queue, we
// need to schedule an event to do the transmit. Otherwise there
}
void
-Bridge::BridgeMasterPort::trySendTiming()
+Bridge::BridgeRequestPort::trySendTiming()
{
assert(!transmitList.empty());
// then send a retry at this point, also note that if the
// request we stalled was waiting for the response queue
// rather than the request queue we might stall it again
- slavePort.retryStalledReq();
+ cpuSidePort.retryStalledReq();
}
// if the send failed, then we try again once we receive a retry,
}
void
-Bridge::BridgeSlavePort::trySendTiming()
+Bridge::BridgeResponsePort::trySendTiming()
{
assert(!transmitList.empty());
// if there is space in the request queue and we were stalling
// a request, it will definitely be possible to accept it now
// since there is guaranteed space in the response queue
- if (!masterPort.reqQueueFull() && retryReq) {
+ if (!memSidePort.reqQueueFull() && retryReq) {
DPRINTF(Bridge, "Request waiting for retry, now retrying\n");
retryReq = false;
sendRetryReq();
}
void
-Bridge::BridgeMasterPort::recvReqRetry()
+Bridge::BridgeRequestPort::recvReqRetry()
{
trySendTiming();
}
void
-Bridge::BridgeSlavePort::recvRespRetry()
+Bridge::BridgeResponsePort::recvRespRetry()
{
trySendTiming();
}
Tick
-Bridge::BridgeSlavePort::recvAtomic(PacketPtr pkt)
+Bridge::BridgeResponsePort::recvAtomic(PacketPtr pkt)
{
panic_if(pkt->cacheResponding(), "Should not see packets where cache "
"is responding");
- return delay * bridge.clockPeriod() + masterPort.sendAtomic(pkt);
+ return delay * bridge.clockPeriod() + memSidePort.sendAtomic(pkt);
}
void
-Bridge::BridgeSlavePort::recvFunctional(PacketPtr pkt)
+Bridge::BridgeResponsePort::recvFunctional(PacketPtr pkt)
{
pkt->pushLabel(name());
}
}
- // also check the master port's request queue
- if (masterPort.trySatisfyFunctional(pkt)) {
+ // also check the request port's request queue
+ if (memSidePort.trySatisfyFunctional(pkt)) {
return;
}
pkt->popLabel();
// fall through if pkt still not satisfied
- masterPort.sendFunctional(pkt);
+ memSidePort.sendFunctional(pkt);
}
bool
-Bridge::BridgeMasterPort::trySatisfyFunctional(PacketPtr pkt)
+Bridge::BridgeRequestPort::trySatisfyFunctional(PacketPtr pkt)
{
bool found = false;
auto i = transmitList.begin();
}
AddrRangeList
-Bridge::BridgeSlavePort::getAddrRanges() const
+Bridge::BridgeResponsePort::getAddrRanges() const
{
return ranges;
}
/**
* @file
- * Declaration of a memory-mapped bridge that connects a master
- * and a slave through a request and response queue.
+ * Declaration of a memory-mapped bridge that connects a requestor
+ * and a responder through a request and response queue.
*/
#ifndef __MEM_BRIDGE_HH__
/**
* A bridge is used to interface two different crossbars (or in general a
- * memory-mapped master and slave), with buffering for requests and
+ * memory-mapped requestor and responder), with buffering for requests and
* responses. The bridge has a fixed delay for packets passing through
* it and responds to a fixed set of address ranges.
*
- * The bridge comprises a slave port and a master port, that buffer
+ * The bridge comprises a response port and a request port, that buffer
* outgoing responses and requests respectively. Buffer space is
* reserved when a request arrives, also reserving response space
* before forwarding the request. If there is no space present, then
{ }
};
- // Forward declaration to allow the slave port to have a pointer
- class BridgeMasterPort;
+ // Forward declaration to allow the response port to have a pointer
+ class BridgeRequestPort;
/**
* The port on the side that receives requests and sends
- * responses. The slave port has a set of address ranges that it
- * is responsible for. The slave port also has a buffer for the
+ * responses. The response port has a set of address ranges that it
+ * is responsible for. The response port also has a buffer for the
* responses not yet sent.
*/
- class BridgeSlavePort : public ResponsePort
+ class BridgeResponsePort : public ResponsePort
{
private:
Bridge& bridge;
/**
- * Master port on the other side of the bridge.
+ * Request port on the other side of the bridge.
*/
- BridgeMasterPort& masterPort;
+ BridgeRequestPort& memSidePort;
/** Minimum request delay though this bridge. */
const Cycles delay;
public:
/**
- * Constructor for the BridgeSlavePort.
+ * Constructor for the BridgeResponsePort.
*
* @param _name the port name including the owner
* @param _bridge the structural owner
- * @param _masterPort the master port on the other side of the bridge
+ * @param _memSidePort the request port on the other
+ * side of the bridge
* @param _delay the delay in cycles from receiving to sending
* @param _resp_limit the size of the response queue
* @param _ranges a number of address ranges to forward
*/
- BridgeSlavePort(const std::string& _name, Bridge& _bridge,
- BridgeMasterPort& _masterPort, Cycles _delay,
+ BridgeResponsePort(const std::string& _name, Bridge& _bridge,
+ BridgeRequestPort& _memSidePort, Cycles _delay,
int _resp_limit, std::vector<AddrRange> _ranges);
/**
/**
* Port on the side that forwards requests and receives
- * responses. The master port has a buffer for the requests not
+ * responses. The request port has a buffer for the requests not
* yet sent.
*/
- class BridgeMasterPort : public RequestPort
+ class BridgeRequestPort : public RequestPort
{
private:
Bridge& bridge;
/**
- * The slave port on the other side of the bridge.
+ * The response port on the other side of the bridge.
*/
- BridgeSlavePort& slavePort;
+ BridgeResponsePort& cpuSidePort;
/** Minimum delay though this bridge. */
const Cycles delay;
public:
/**
- * Constructor for the BridgeMasterPort.
+ * Constructor for the BridgeRequestPort.
*
* @param _name the port name including the owner
* @param _bridge the structural owner
- * @param _slavePort the slave port on the other side of the bridge
+ * @param _cpuSidePort the response port on the other side of
+ * the bridge
* @param _delay the delay in cycles from receiving to sending
* @param _req_limit the size of the request queue
*/
- BridgeMasterPort(const std::string& _name, Bridge& _bridge,
- BridgeSlavePort& _slavePort, Cycles _delay,
+ BridgeRequestPort(const std::string& _name, Bridge& _bridge,
+ BridgeResponsePort& _cpuSidePort, Cycles _delay,
int _req_limit);
/**
void recvReqRetry();
};
- /** Slave port of the bridge. */
- BridgeSlavePort slavePort;
+ /** Response port of the bridge. */
+ BridgeResponsePort cpuSidePort;
- /** Master port of the bridge. */
- BridgeMasterPort masterPort;
+ /** Request port of the bridge. */
+ BridgeRequestPort memSidePort;
public:
using namespace std;
-BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
+BaseCache::CacheResponsePort::CacheResponsePort(const std::string &_name,
BaseCache *_cache,
const std::string &_label)
- : QueuedSlavePort(_name, _cache, queue),
+ : QueuedResponsePort(_name, _cache, queue),
queue(*_cache, *this, true, _label),
blocked(false), mustSendRetry(false),
sendRetryEvent([this]{ processSendRetry(); }, _name)
BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size)
: ClockedObject(p),
- cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"),
- memSidePort(p->name + ".mem_side", this, "MemSidePort"),
+ cpuSidePort (p->name + ".cpu_side_port", this, "CpuSidePort"),
+ memSidePort(p->name + ".mem_side_port", this, "MemSidePort"),
mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below
writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below
tags(p->tags),
// buffer before committing to an MSHR
// forward snoops is overridden in init() once we can query
- // whether the connected master is actually snooping or not
+ // whether the connected requestor is actually snooping or not
tempBlock = new TempCacheBlk(blkSize);
}
void
-BaseCache::CacheSlavePort::setBlocked()
+BaseCache::CacheResponsePort::setBlocked()
{
assert(!blocked);
DPRINTF(CachePort, "Port is blocking new requests\n");
}
void
-BaseCache::CacheSlavePort::clearBlocked()
+BaseCache::CacheResponsePort::clearBlocked()
{
assert(blocked);
DPRINTF(CachePort, "Port is accepting new requests\n");
}
void
-BaseCache::CacheSlavePort::processSendRetry()
+BaseCache::CacheResponsePort::processSendRetry()
{
DPRINTF(CachePort, "Port is sending retry\n");
DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__,
pkt->print());
- assert(pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(pkt).mshr_hits[pkt->req->masterId()]++;
+ assert(pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(pkt).mshr_hits[pkt->req->requestorId()]++;
// We use forward_time here because it is the same
// considering new targets. We have multiple
}
} else {
// no MSHR
- assert(pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(pkt).mshr_misses[pkt->req->masterId()]++;
+ assert(pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++;
if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) {
// We use forward_time here because there is an
const QueueEntry::Target *initial_tgt = mshr->getTarget();
const Tick miss_latency = curTick() - initial_tgt->recvTime;
if (pkt->req->isUncacheable()) {
- assert(pkt->req->masterId() < system->maxMasters());
+ assert(pkt->req->requestorId() < system->maxRequestors());
stats.cmdStats(initial_tgt->pkt)
- .mshr_uncacheable_lat[pkt->req->masterId()] += miss_latency;
+ .mshr_uncacheable_lat[pkt->req->requestorId()] += miss_latency;
} else {
- assert(pkt->req->masterId() < system->maxMasters());
+ assert(pkt->req->requestorId() < system->maxRequestors());
stats.cmdStats(initial_tgt->pkt)
- .mshr_miss_latency[pkt->req->masterId()] += miss_latency;
+ .mshr_miss_latency[pkt->req->requestorId()] += miss_latency;
}
PacketList writebacks;
!writeBuffer.findMatch(pf_addr, pkt->isSecure())) {
// Update statistic on number of prefetches issued
// (hwpf_mshr_misses)
- assert(pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(pkt).mshr_misses[pkt->req->masterId()]++;
+ assert(pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++;
// allocate an MSHR and return it, note
// that we send the packet straight away, so do not
// satisfying a string of Read and ReadEx requests from
// upper-level caches, a Read will mark the block as shared but we
// can satisfy a following ReadEx anyway since we can rely on the
- // Read requester(s) to have buffered the ReadEx snoop and to
+ // Read requestor(s) to have buffered the ReadEx snoop and to
// invalidate their blocks after receiving them.
// assert(!pkt->needsWritable() || blk->isWritable());
assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize);
"Writeback from read-only cache");
assert(blk && blk->isValid() && (blk->isDirty() || writebackClean));
- stats.writebacks[Request::wbMasterId]++;
+ stats.writebacks[Request::wbRequestorId]++;
RequestPtr req = std::make_shared<Request>(
- regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+ regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId);
if (blk->isSecure())
req->setFlags(Request::SECURE);
BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
{
RequestPtr req = std::make_shared<Request>(
- regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+ regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId);
if (blk->isSecure()) {
req->setFlags(Request::SECURE);
assert(blk.isValid());
RequestPtr request = std::make_shared<Request>(
- regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
+ regenerateBlkAddr(&blk), blkSize, 0, Request::funcRequestorId);
request->taskId(blk.task_id);
if (blk.isSecure()) {
Stats::Group::regStats();
System *system = cache.system;
- const auto max_masters = system->maxMasters();
+ const auto max_requestors = system->maxRequestors();
hits
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- hits.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ hits.subname(i, system->getRequestorName(i));
}
// Miss statistics
misses
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- misses.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ misses.subname(i, system->getRequestorName(i));
}
// Miss latency statistics
missLatency
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- missLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ missLatency.subname(i, system->getRequestorName(i));
}
// access formulas
accesses.flags(total | nozero | nonan);
accesses = hits + misses;
- for (int i = 0; i < max_masters; i++) {
- accesses.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ accesses.subname(i, system->getRequestorName(i));
}
// miss rate formulas
missRate.flags(total | nozero | nonan);
missRate = misses / accesses;
- for (int i = 0; i < max_masters; i++) {
- missRate.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ missRate.subname(i, system->getRequestorName(i));
}
// miss latency formulas
avgMissLatency.flags(total | nozero | nonan);
avgMissLatency = missLatency / misses;
- for (int i = 0; i < max_masters; i++) {
- avgMissLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ avgMissLatency.subname(i, system->getRequestorName(i));
}
// MSHR statistics
// MSHR hit statistics
mshr_hits
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- mshr_hits.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ mshr_hits.subname(i, system->getRequestorName(i));
}
// MSHR miss statistics
mshr_misses
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- mshr_misses.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ mshr_misses.subname(i, system->getRequestorName(i));
}
// MSHR miss latency statistics
mshr_miss_latency
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- mshr_miss_latency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ mshr_miss_latency.subname(i, system->getRequestorName(i));
}
// MSHR uncacheable statistics
mshr_uncacheable
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- mshr_uncacheable.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ mshr_uncacheable.subname(i, system->getRequestorName(i));
}
// MSHR miss latency statistics
mshr_uncacheable_lat
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- mshr_uncacheable_lat.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ mshr_uncacheable_lat.subname(i, system->getRequestorName(i));
}
// MSHR miss rate formulas
mshrMissRate.flags(total | nozero | nonan);
mshrMissRate = mshr_misses / accesses;
- for (int i = 0; i < max_masters; i++) {
- mshrMissRate.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ mshrMissRate.subname(i, system->getRequestorName(i));
}
// mshrMiss latency formulas
avgMshrMissLatency.flags(total | nozero | nonan);
avgMshrMissLatency = mshr_miss_latency / mshr_misses;
- for (int i = 0; i < max_masters; i++) {
- avgMshrMissLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ avgMshrMissLatency.subname(i, system->getRequestorName(i));
}
// mshrUncacheable latency formulas
avgMshrUncacheableLatency.flags(total | nozero | nonan);
avgMshrUncacheableLatency = mshr_uncacheable_lat / mshr_uncacheable;
- for (int i = 0; i < max_masters; i++) {
- avgMshrUncacheableLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ avgMshrUncacheableLatency.subname(i, system->getRequestorName(i));
}
}
Stats::Group::regStats();
System *system = cache.system;
- const auto max_masters = system->maxMasters();
+ const auto max_requestors = system->maxRequestors();
for (auto &cs : cmd)
cs->regStatsFromParent();
demandHits.flags(total | nozero | nonan);
demandHits = SUM_DEMAND(hits);
- for (int i = 0; i < max_masters; i++) {
- demandHits.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandHits.subname(i, system->getRequestorName(i));
}
overallHits.flags(total | nozero | nonan);
overallHits = demandHits + SUM_NON_DEMAND(hits);
- for (int i = 0; i < max_masters; i++) {
- overallHits.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallHits.subname(i, system->getRequestorName(i));
}
demandMisses.flags(total | nozero | nonan);
demandMisses = SUM_DEMAND(misses);
- for (int i = 0; i < max_masters; i++) {
- demandMisses.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandMisses.subname(i, system->getRequestorName(i));
}
overallMisses.flags(total | nozero | nonan);
overallMisses = demandMisses + SUM_NON_DEMAND(misses);
- for (int i = 0; i < max_masters; i++) {
- overallMisses.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallMisses.subname(i, system->getRequestorName(i));
}
demandMissLatency.flags(total | nozero | nonan);
demandMissLatency = SUM_DEMAND(missLatency);
- for (int i = 0; i < max_masters; i++) {
- demandMissLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandMissLatency.subname(i, system->getRequestorName(i));
}
overallMissLatency.flags(total | nozero | nonan);
overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency);
- for (int i = 0; i < max_masters; i++) {
- overallMissLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallMissLatency.subname(i, system->getRequestorName(i));
}
demandAccesses.flags(total | nozero | nonan);
demandAccesses = demandHits + demandMisses;
- for (int i = 0; i < max_masters; i++) {
- demandAccesses.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandAccesses.subname(i, system->getRequestorName(i));
}
overallAccesses.flags(total | nozero | nonan);
overallAccesses = overallHits + overallMisses;
- for (int i = 0; i < max_masters; i++) {
- overallAccesses.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallAccesses.subname(i, system->getRequestorName(i));
}
demandMissRate.flags(total | nozero | nonan);
demandMissRate = demandMisses / demandAccesses;
- for (int i = 0; i < max_masters; i++) {
- demandMissRate.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandMissRate.subname(i, system->getRequestorName(i));
}
overallMissRate.flags(total | nozero | nonan);
overallMissRate = overallMisses / overallAccesses;
- for (int i = 0; i < max_masters; i++) {
- overallMissRate.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallMissRate.subname(i, system->getRequestorName(i));
}
demandAvgMissLatency.flags(total | nozero | nonan);
demandAvgMissLatency = demandMissLatency / demandMisses;
- for (int i = 0; i < max_masters; i++) {
- demandAvgMissLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandAvgMissLatency.subname(i, system->getRequestorName(i));
}
overallAvgMissLatency.flags(total | nozero | nonan);
overallAvgMissLatency = overallMissLatency / overallMisses;
- for (int i = 0; i < max_masters; i++) {
- overallAvgMissLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallAvgMissLatency.subname(i, system->getRequestorName(i));
}
blocked_cycles.init(NUM_BLOCKED_CAUSES);
unusedPrefetches.flags(nozero);
writebacks
- .init(max_masters)
+ .init(max_requestors)
.flags(total | nozero | nonan)
;
- for (int i = 0; i < max_masters; i++) {
- writebacks.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ writebacks.subname(i, system->getRequestorName(i));
}
demandMshrHits.flags(total | nozero | nonan);
demandMshrHits = SUM_DEMAND(mshr_hits);
- for (int i = 0; i < max_masters; i++) {
- demandMshrHits.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandMshrHits.subname(i, system->getRequestorName(i));
}
overallMshrHits.flags(total | nozero | nonan);
overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits);
- for (int i = 0; i < max_masters; i++) {
- overallMshrHits.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallMshrHits.subname(i, system->getRequestorName(i));
}
demandMshrMisses.flags(total | nozero | nonan);
demandMshrMisses = SUM_DEMAND(mshr_misses);
- for (int i = 0; i < max_masters; i++) {
- demandMshrMisses.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandMshrMisses.subname(i, system->getRequestorName(i));
}
overallMshrMisses.flags(total | nozero | nonan);
overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses);
- for (int i = 0; i < max_masters; i++) {
- overallMshrMisses.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallMshrMisses.subname(i, system->getRequestorName(i));
}
demandMshrMissLatency.flags(total | nozero | nonan);
demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency);
- for (int i = 0; i < max_masters; i++) {
- demandMshrMissLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandMshrMissLatency.subname(i, system->getRequestorName(i));
}
overallMshrMissLatency.flags(total | nozero | nonan);
overallMshrMissLatency =
demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency);
- for (int i = 0; i < max_masters; i++) {
- overallMshrMissLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallMshrMissLatency.subname(i, system->getRequestorName(i));
}
overallMshrUncacheable.flags(total | nozero | nonan);
overallMshrUncacheable =
SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable);
- for (int i = 0; i < max_masters; i++) {
- overallMshrUncacheable.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallMshrUncacheable.subname(i, system->getRequestorName(i));
}
overallMshrUncacheableLatency =
SUM_DEMAND(mshr_uncacheable_lat) +
SUM_NON_DEMAND(mshr_uncacheable_lat);
- for (int i = 0; i < max_masters; i++) {
- overallMshrUncacheableLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallMshrUncacheableLatency.subname(i, system->getRequestorName(i));
}
demandMshrMissRate.flags(total | nozero | nonan);
demandMshrMissRate = demandMshrMisses / demandAccesses;
- for (int i = 0; i < max_masters; i++) {
- demandMshrMissRate.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandMshrMissRate.subname(i, system->getRequestorName(i));
}
overallMshrMissRate.flags(total | nozero | nonan);
overallMshrMissRate = overallMshrMisses / overallAccesses;
- for (int i = 0; i < max_masters; i++) {
- overallMshrMissRate.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallMshrMissRate.subname(i, system->getRequestorName(i));
}
demandAvgMshrMissLatency.flags(total | nozero | nonan);
demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses;
- for (int i = 0; i < max_masters; i++) {
- demandAvgMshrMissLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ demandAvgMshrMissLatency.subname(i, system->getRequestorName(i));
}
overallAvgMshrMissLatency.flags(total | nozero | nonan);
overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses;
- for (int i = 0; i < max_masters; i++) {
- overallAvgMshrMissLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallAvgMshrMissLatency.subname(i, system->getRequestorName(i));
}
overallAvgMshrUncacheableLatency.flags(total | nozero | nonan);
overallAvgMshrUncacheableLatency =
overallMshrUncacheableLatency / overallMshrUncacheable;
- for (int i = 0; i < max_masters; i++) {
- overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i));
+ for (int i = 0; i < max_requestors; i++) {
+ overallAvgMshrUncacheableLatency.subname(i, system->getRequestorName(i));
}
dataExpansions.flags(nozero | nonan);
assert(pkt->isResponse());
- // Express snoop responses from master to slave, e.g., from L1 to L2
+ // Express snoop responses from requestor to responder, e.g., from L1 to L2
cache->recvTimingSnoopResp(pkt);
return true;
}
BaseCache::
CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache,
const std::string &_label)
- : CacheSlavePort(_name, _cache, _label), cache(_cache)
+ : CacheResponsePort(_name, _cache, _label), cache(_cache)
{
}
BaseCache::MemSidePort::MemSidePort(const std::string &_name,
BaseCache *_cache,
const std::string &_label)
- : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue),
+ : CacheRequestPort(_name, _cache, _reqQueue, _snoopRespQueue),
_reqQueue(*_cache, *this, _snoopRespQueue, _label),
_snoopRespQueue(*_cache, *this, true, _label), cache(_cache)
{
protected:
/**
- * A cache master port is used for the memory-side port of the
+ * A cache request port is used for the memory-side port of the
* cache, and in addition to the basic timing port that only sends
* response packets through a transmit list, it also offers the
* ability to schedule and send request packets (requests &
* and the sendDeferredPacket of the timing port is modified to
* consider both the transmit list and the requests from the MSHR.
*/
- class CacheMasterPort : public QueuedMasterPort
+ class CacheRequestPort : public QueuedRequestPort
{
public:
protected:
- CacheMasterPort(const std::string &_name, BaseCache *_cache,
+ CacheRequestPort(const std::string &_name, BaseCache *_cache,
ReqPacketQueue &_reqQueue,
SnoopRespPacketQueue &_snoopRespQueue) :
- QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue)
+ QueuedRequestPort(_name, _cache, _reqQueue, _snoopRespQueue)
{ }
/**
/**
- * The memory-side port extends the base cache master port with
+ * The memory-side port extends the base cache request port with
* access functions for functional, atomic and timing snoops.
*/
- class MemSidePort : public CacheMasterPort
+ class MemSidePort : public CacheRequestPort
{
private:
};
/**
- * A cache slave port is used for the CPU-side port of the cache,
+ * A cache response port is used for the CPU-side port of the cache,
* and it is basically a simple timing port that uses a transmit
- * list for responses to the CPU (or connected master). In
+ * list for responses to the CPU (or connected requestor). In
* addition, it has the functionality to block the port for
* incoming requests. If blocked, the port will issue a retry once
* unblocked.
*/
- class CacheSlavePort : public QueuedSlavePort
+ class CacheResponsePort : public QueuedResponsePort
{
public:
protected:
- CacheSlavePort(const std::string &_name, BaseCache *_cache,
+ CacheResponsePort(const std::string &_name, BaseCache *_cache,
const std::string &_label);
/** A normal packet queue used to store responses. */
};
/**
- * The CPU-side port extends the base cache slave port with access
+ * The CPU-side port extends the base cache response port with access
* functions for functional, atomic and timing requests.
*/
- class CpuSidePort : public CacheSlavePort
+ class CpuSidePort : public CacheResponsePort
{
private:
/**
* Marks the access path of the cache as blocked for the given cause. This
- * also sets the blocked flag in the slave interface.
+ * also sets the blocked flag in the response interface.
* @param cause The reason for the cache blocking.
*/
void setBlocked(BlockedCause cause)
void incMissCount(PacketPtr pkt)
{
- assert(pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(pkt).misses[pkt->req->masterId()]++;
+ assert(pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(pkt).misses[pkt->req->requestorId()]++;
pkt->req->incAccessDepth();
if (missCount) {
--missCount;
}
void incHitCount(PacketPtr pkt)
{
- assert(pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(pkt).hits[pkt->req->masterId()]++;
+ assert(pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(pkt).hits[pkt->req->requestorId()]++;
}
/**
} else if (blk->isWritable() && !pending_downgrade &&
!pkt->hasSharers() &&
pkt->cmd != MemCmd::ReadCleanReq) {
- // we can give the requester a writable copy on a read
+ // we can give the requestor a writable copy on a read
// request if:
// - we have a writable copy at this level (& below)
// - we don't have a pending snoop from below
// should have flushed and have no valid block
assert(!blk || !blk->isValid());
- stats.cmdStats(pkt).mshr_uncacheable[pkt->req->masterId()]++;
+ stats.cmdStats(pkt).mshr_uncacheable[pkt->req->requestorId()]++;
if (pkt->isWrite()) {
allocateWriteBuffer(pkt, forward_time);
if (!mshr) {
// copy the request and create a new SoftPFReq packet
RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
- pkt->req->getSize(),
- pkt->req->getFlags(),
- pkt->req->masterId());
+ pkt->req->getSize(),
+ pkt->req->getFlags(),
+ pkt->req->requestorId());
pf = new Packet(req, pkt->cmd);
pf->allocate();
assert(pf->matchAddr(pkt));
assert(!tgt_pkt->req->isUncacheable());
- assert(tgt_pkt->req->masterId() < system->maxMasters());
+ assert(tgt_pkt->req->requestorId() < system->maxRequestors());
stats.cmdStats(tgt_pkt)
- .missLatency[tgt_pkt->req->masterId()] +=
+ .missLatency[tgt_pkt->req->requestorId()] +=
completion_time - target.recvTime;
} else if (pkt->cmd == MemCmd::UpgradeFailResp) {
// failed StoreCond upgrade
// Creating a zero sized write, a message to the snoop filter
RequestPtr req = std::make_shared<Request>(
- regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+ regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId);
if (blk->isSecure())
req->setFlags(Request::SECURE);
if (forwardSnoops) {
// first propagate snoop upward to see if anyone above us wants to
// handle it. save & restore packet src since it will get
- // rewritten to be relative to cpu-side bus (if any)
+ // rewritten to be relative to CPU-side bus (if any)
if (is_timing) {
// copy the packet so that we can clear any flags before
// forwarding it upwards, we also allocate data (passing
// If this request is a prefetch or clean evict and an upper level
// signals block present, make sure to propagate the block
- // presence to the requester.
+ // presence to the requestor.
if (snoopPkt.isBlockCached()) {
pkt->setBlockCached();
}
cpuSidePort.sendAtomicSnoop(pkt);
if (!already_responded && pkt->cacheResponding()) {
// cache-to-cache response from some upper cache:
- // forward response to original requester
+ // forward response to original requestor
assert(pkt->isResponse());
}
}
void
CacheBlk::insert(const Addr tag, const bool is_secure,
- const int src_master_ID, const uint32_t task_ID)
+ const int src_requestor_ID, const uint32_t task_ID)
{
// Make sure that the block has been properly invalidated
assert(status == 0);
this->tag = tag;
// Set source requestor ID
- srcMasterId = src_master_ID;
+ srcRequestorId = src_requestor_ID;
// Set task ID
task_id = task_ID;
unsigned refCount;
/** holds the source requestor ID for this block. */
- int srcMasterId;
+ int srcRequestorId;
/**
* Tick on which the block was inserted in the cache. Its value is only
status = 0;
whenReady = MaxTick;
refCount = 0;
- srcMasterId = Request::invldMasterId;
+ srcRequestorId = Request::invldRequestorId;
lockList.clear();
}
*
* @param tag Block address tag.
* @param is_secure Whether the block is in secure space or not.
- * @param src_master_ID The source requestor ID.
+ * @param src_requestor_ID The source requestor ID.
* @param task_ID The new task ID.
*/
virtual void insert(const Addr tag, const bool is_secure,
- const int src_master_ID, const uint32_t task_ID);
+ const int src_requestor_ID, const uint32_t task_ID);
/**
* Track the fact that a local locked was issued to the
}
void insert(const Addr addr, const bool is_secure,
- const int src_master_ID=0, const uint32_t task_ID=0) override
+ const int src_requestor_ID=0, const uint32_t task_ID=0)
+ override
{
// Make sure that the block has been properly invalidated
assert(status == 0);
NoncoherentCache::functionalAccess(PacketPtr pkt, bool from_cpu_side)
{
panic_if(!from_cpu_side, "Non-coherent cache received functional snoop"
- " request\n");
+ " request\n");
BaseCache::functionalAccess(pkt, from_cpu_side);
}
completion_time += clockEdge(responseLatency) +
(transfer_offset ? pkt->payloadDelay : 0);
- assert(tgt_pkt->req->masterId() < system->maxMasters());
- stats.cmdStats(tgt_pkt).missLatency[tgt_pkt->req->masterId()] +=
+ assert(tgt_pkt->req->requestorId() < system->maxRequestors());
+ stats.cmdStats(tgt_pkt).missLatency[tgt_pkt->req->requestorId()] +=
completion_time - target.recvTime;
tgt_pkt->makeTimingResponse();
confidence_threshold = Param.Percent(50,
"Prefetch generation confidence threshold")
- use_master_id = Param.Bool(True, "Use master id based history")
+ use_requestor_id = Param.Bool(True, "Use requestor id based history")
degree = Param.Int(4, "Number of prefetches to generate")
Base::PrefetchInfo::PrefetchInfo(PacketPtr pkt, Addr addr, bool miss)
: address(addr), pc(pkt->req->hasPC() ? pkt->req->getPC() : 0),
- masterId(pkt->req->masterId()), validPC(pkt->req->hasPC()),
+ requestorId(pkt->req->requestorId()), validPC(pkt->req->hasPC()),
secure(pkt->isSecure()), size(pkt->req->getSize()), write(pkt->isWrite()),
paddress(pkt->req->getPaddr()), cacheMiss(miss)
{
}
Base::PrefetchInfo::PrefetchInfo(PrefetchInfo const &pfi, Addr addr)
- : address(addr), pc(pfi.pc), masterId(pfi.masterId), validPC(pfi.validPC),
- secure(pfi.secure), size(pfi.size), write(pfi.write),
- paddress(pfi.paddress), cacheMiss(pfi.cacheMiss), data(nullptr)
+ : address(addr), pc(pfi.pc), requestorId(pfi.requestorId),
+ validPC(pfi.validPC), secure(pfi.secure), size(pfi.size),
+ write(pfi.write), paddress(pfi.paddress), cacheMiss(pfi.cacheMiss),
+ data(nullptr)
{
}
: ClockedObject(p), listeners(), cache(nullptr), blkSize(p->block_size),
lBlkSize(floorLog2(blkSize)), onMiss(p->on_miss), onRead(p->on_read),
onWrite(p->on_write), onData(p->on_data), onInst(p->on_inst),
- masterId(p->sys->getMasterId(this)), pageBytes(p->sys->getPageBytes()),
+ requestorId(p->sys->getRequestorId(this)),
+ pageBytes(p->sys->getPageBytes()),
prefetchOnAccess(p->prefetch_on_access),
useVirtualAddresses(p->use_virtual_addresses),
prefetchStats(this), issuedPrefetches(0),
/** The program counter that generated this address. */
Addr pc;
/** The requestor ID that generated this address. */
- MasterID masterId;
+ RequestorID requestorId;
/** Validity bit for the PC of this address. */
bool validPC;
/** Whether this address targets the secure memory space. */
* Gets the requestor ID that generated this address
* @return the requestor ID that generated this address
*/
- MasterID getMasterId() const
+ RequestorID getRequestorId() const
{
- return masterId;
+ return requestorId;
}
/**
const bool onInst;
/** Request id for prefetches */
- const MasterID masterId;
+ const RequestorID requestorId;
const Addr pageBytes;
void
Queued::DeferredPacket::createPkt(Addr paddr, unsigned blk_size,
- MasterID mid, bool tag_prefetch,
+ RequestorID requestor_id,
+ bool tag_prefetch,
Tick t) {
/* Create a prefetch memory request */
- RequestPtr req = std::make_shared<Request>(paddr, blk_size, 0, mid);
+ RequestPtr req = std::make_shared<Request>(paddr, blk_size,
+ 0, requestor_id);
if (pfInfo.isSecure()) {
req->setFlags(Request::SECURE);
} else {
Tick pf_time = curTick() + clockPeriod() * latency;
it->createPkt(it->translationRequest->getPaddr(), blkSize,
- masterId, tagPrefetch, pf_time);
+ requestorId, tagPrefetch, pf_time);
addToQueue(pfq, *it);
}
} else {
PacketPtr pkt)
{
RequestPtr translation_req = std::make_shared<Request>(
- addr, blkSize, pkt->req->getFlags(), masterId, pfi.getPC(),
+ addr, blkSize, pkt->req->getFlags(), requestorId, pfi.getPC(),
pkt->req->contextId());
translation_req->setFlags(Request::PREFETCH);
return translation_req;
DeferredPacket dpp(this, new_pfi, 0, priority);
if (has_target_pa) {
Tick pf_time = curTick() + clockPeriod() * latency;
- dpp.createPkt(target_paddr, blkSize, masterId, tagPrefetch, pf_time);
+ dpp.createPkt(target_paddr, blkSize, requestorId, tagPrefetch,
+ pf_time);
DPRINTF(HWPrefetch, "Prefetch queued. "
"addr:%#x priority: %3d tick:%lld.\n",
new_pfi.getAddr(), priority, pf_time);
* Create the associated memory packet
* @param paddr physical address of this packet
* @param blk_size block size used by the prefetcher
- * @param mid Requester ID of the access that generated this prefetch
+ * @param requestor_id Requestor ID of the access that generated
+ * this prefetch
* @param tag_prefetch flag to indicate if the packet needs to be
* tagged
* @param t time when the prefetch becomes ready
*/
- void createPkt(Addr paddr, unsigned blk_size, MasterID mid,
+ void createPkt(Addr paddr, unsigned blk_size, RequestorID requestor_id,
bool tag_prefetch, Tick t);
/**
: Queued(p),
initConfidence(p->confidence_counter_bits, p->initial_confidence),
threshConf(p->confidence_threshold/100.0),
- useMasterId(p->use_master_id),
+ useRequestorId(p->use_requestor_id),
degree(p->degree),
pcTableInfo(p->table_assoc, p->table_entries, p->table_indexing_policy,
p->table_replacement_policy)
Addr pf_addr = pfi.getAddr();
Addr pc = pfi.getPC();
bool is_secure = pfi.isSecure();
- MasterID master_id = useMasterId ? pfi.getMasterId() : 0;
+ RequestorID requestor_id = useRequestorId ? pfi.getRequestorId() : 0;
// Get corresponding pc table
- PCTable* pcTable = findTable(master_id);
+ PCTable* pcTable = findTable(requestor_id);
// Search for entry in the pc table
StrideEntry *entry = pcTable->findEntry(pc, is_secure);
/** Confidence threshold for prefetch generation. */
const double threshConf;
- const bool useMasterId;
+ const bool useRequestorId;
const int degree;
// to insert the new one
// Deal with what we are bringing in
- MasterID master_id = pkt->req->masterId();
- assert(master_id < system->maxMasters());
- stats.occupancies[master_id]++;
+ RequestorID requestor_id = pkt->req->requestorId();
+ assert(requestor_id < system->maxRequestors());
+ stats.occupancies[requestor_id]++;
- // Insert block with tag, src master id and task id
- blk->insert(extractTag(pkt->getAddr()), pkt->isSecure(), master_id,
+ // Insert block with tag, src requestor id and task id
+ blk->insert(extractTag(pkt->getAddr()), pkt->isSecure(), requestor_id,
pkt->req->taskId());
// Check if cache warm up is done
avgRefs = totalRefs / sampledRefs;
occupancies
- .init(system->maxMasters())
+ .init(system->maxRequestors())
.flags(nozero | nonan)
;
- for (int i = 0; i < system->maxMasters(); i++) {
- occupancies.subname(i, system->getMasterName(i));
+ for (int i = 0; i < system->maxRequestors(); i++) {
+ occupancies.subname(i, system->getRequestorName(i));
}
avgOccs.flags(nozero | total);
- for (int i = 0; i < system->maxMasters(); i++) {
- avgOccs.subname(i, system->getMasterName(i));
+ for (int i = 0; i < system->maxRequestors(); i++) {
+ avgOccs.subname(i, system->getRequestorName(i));
}
avgOccs = occupancies / Stats::constant(tags.numBlocks);
assert(blk);
assert(blk->isValid());
- stats.occupancies[blk->srcMasterId]--;
+ stats.occupancies[blk->srcRequestorId]--;
stats.totalRefs += blk->refCount;
stats.sampledRefs++;
void
SectorSubBlk::insert(const Addr tag, const bool is_secure,
- const int src_master_ID, const uint32_t task_ID)
+ const int src_requestor_ID, const uint32_t task_ID)
{
// Make sure it is not overwriting another sector
panic_if((_sectorBlk && _sectorBlk->isValid()) &&
(_sectorBlk->isSecure() != is_secure)),
"Overwriting valid sector!");
- CacheBlk::insert(tag, is_secure, src_master_ID, task_ID);
+ CacheBlk::insert(tag, is_secure, src_requestor_ID, task_ID);
// Set sector tag
_sectorBlk->setTag(tag);
*
* @param tag Block address tag.
* @param is_secure Whether the block is in secure space or not.
- * @param src_master_ID The source requestor ID.
+ * @param src_requestor_ID The source requestor ID.
* @param task_ID The new task ID.
*/
- void insert(const Addr tag, const bool is_secure, const int src_master_ID,
- const uint32_t task_ID) override;
+ void insert(const Addr tag, const bool is_secure, const int
+ src_requestor_ID, const uint32_t task_ID) override;
/**
* Pretty-print sector offset and other CacheBlk information.
snoopTraffic(this, "snoopTraffic", "Total snoop traffic (bytes)"),
snoopFanout(this, "snoop_fanout", "Request fanout histogram")
{
- // create the ports based on the size of the master and slave
- // vector ports, and the presence of the default port, the ports
- // are enumerated starting from zero
- for (int i = 0; i < p->port_master_connection_count; ++i) {
- std::string portName = csprintf("%s.master[%d]", name(), i);
- RequestPort* bp = new CoherentXBarMasterPort(portName, *this, i);
- masterPorts.push_back(bp);
+ // create the ports based on the size of the memory-side port and
+ // CPU-side port vector ports, and the presence of the default port,
+ // the ports are enumerated starting from zero
+ for (int i = 0; i < p->port_mem_side_ports_connection_count; ++i) {
+ std::string portName = csprintf("%s.mem_side_port[%d]", name(), i);
+ RequestPort* bp = new CoherentXBarRequestPort(portName, *this, i);
+ memSidePorts.push_back(bp);
reqLayers.push_back(new ReqLayer(*bp, *this,
csprintf("reqLayer%d", i)));
snoopLayers.push_back(
new SnoopRespLayer(*bp, *this, csprintf("snoopLayer%d", i)));
}
- // see if we have a default slave device connected and if so add
- // our corresponding master port
+ // see if we have a default CPU-side-port device connected and if so add
+ // our corresponding memory-side port
if (p->port_default_connection_count) {
- defaultPortID = masterPorts.size();
+ defaultPortID = memSidePorts.size();
std::string portName = name() + ".default";
- RequestPort* bp = new CoherentXBarMasterPort(portName, *this,
+ RequestPort* bp = new CoherentXBarRequestPort(portName, *this,
defaultPortID);
- masterPorts.push_back(bp);
+ memSidePorts.push_back(bp);
reqLayers.push_back(new ReqLayer(*bp, *this, csprintf("reqLayer%d",
defaultPortID)));
snoopLayers.push_back(new SnoopRespLayer(*bp, *this,
defaultPortID)));
}
- // create the slave ports, once again starting at zero
- for (int i = 0; i < p->port_slave_connection_count; ++i) {
- std::string portName = csprintf("%s.slave[%d]", name(), i);
- QueuedSlavePort* bp = new CoherentXBarSlavePort(portName, *this, i);
- slavePorts.push_back(bp);
+ // create the CPU-side ports, once again starting at zero
+ for (int i = 0; i < p->port_cpu_side_ports_connection_count; ++i) {
+ std::string portName = csprintf("%s.cpu_side_port[%d]", name(), i);
+ QueuedResponsePort* bp = new CoherentXBarResponsePort(portName,
+ *this, i);
+ cpuSidePorts.push_back(bp);
respLayers.push_back(new RespLayer(*bp, *this,
csprintf("respLayer%d", i)));
snoopRespPorts.push_back(new SnoopRespPort(*bp, *this));
{
BaseXBar::init();
- // iterate over our slave ports and determine which of our
- // neighbouring master ports are snooping and add them as snoopers
- for (const auto& p: slavePorts) {
- // check if the connected master port is snooping
+ // iterate over our CPU-side ports and determine which of our
+ // neighbouring memory-side ports are snooping and add them as snoopers
+ for (const auto& p: cpuSidePorts) {
+ // check if the connected memory-side port is snooping
if (p->isSnooping()) {
- DPRINTF(AddrRanges, "Adding snooping master %s\n", p->getPeer());
+ DPRINTF(AddrRanges, "Adding snooping requestor %s\n",
+ p->getPeer());
snoopPorts.push_back(p);
}
}
if (snoopPorts.empty())
warn("CoherentXBar %s has no snooping ports attached!\n", name());
- // inform the snoop filter about the slave ports so it can create
+ // inform the snoop filter about the CPU-side ports so it can create
// its own internal representation
if (snoopFilter)
- snoopFilter->setSlavePorts(slavePorts);
+ snoopFilter->setCPUSidePorts(cpuSidePorts);
}
bool
-CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
+CoherentXBar::recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id)
{
// determine the source port based on the id
- ResponsePort *src_port = slavePorts[slave_port_id];
+ ResponsePort *src_port = cpuSidePorts[cpu_side_port_id];
// remember if the packet is an express snoop
bool is_express_snoop = pkt->isExpressSnoop();
assert(is_express_snoop == cache_responding);
// determine the destination based on the destination address range
- PortID master_port_id = findPort(pkt->getAddrRange());
+ PortID mem_side_port_id = findPort(pkt->getAddrRange());
// test if the crossbar should be considered occupied for the current
// port, and exclude express snoops from the check
- if (!is_express_snoop && !reqLayers[master_port_id]->tryTiming(src_port)) {
+ if (!is_express_snoop &&
+ !reqLayers[mem_side_port_id]->tryTiming(src_port)) {
DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
src_port->name(), pkt->print());
return false;
// before snooping we need to make sure that the memory
// below is not busy and the cache clean request can be
// forwarded to it
- if (!masterPorts[master_port_id]->tryTiming(pkt)) {
+ if (!memSidePorts[mem_side_port_id]->tryTiming(pkt)) {
DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__,
src_port->name(), pkt->print());
// update the layer state and schedule an idle event
- reqLayers[master_port_id]->failedTiming(src_port,
+ reqLayers[mem_side_port_id]->failedTiming(src_port,
clockEdge(Cycles(1)));
return false;
}
if (!sf_res.first.empty())
pkt->setBlockCached();
} else {
- forwardTiming(pkt, slave_port_id, sf_res.first);
+ forwardTiming(pkt, cpu_side_port_id, sf_res.first);
}
} else {
- forwardTiming(pkt, slave_port_id);
+ forwardTiming(pkt, cpu_side_port_id);
}
// add the snoop delay to our header delay, and then reset it
}
// since it is a normal request, attempt to send the packet
- success = masterPorts[master_port_id]->sendTimingReq(pkt);
+ success = memSidePorts[mem_side_port_id]->sendTimingReq(pkt);
} else {
// no need to forward, turn this packet around and respond
// directly
src_port->name(), pkt->print());
// update the layer state and schedule an idle event
- reqLayers[master_port_id]->failedTiming(src_port,
+ reqLayers[mem_side_port_id]->failedTiming(src_port,
clockEdge(Cycles(1)));
} else {
// express snoops currently bypass the crossbar state entirely
// remember where to route the normal response to
if (expect_response || expect_snoop_resp) {
assert(routeTo.find(pkt->req) == routeTo.end());
- routeTo[pkt->req] = slave_port_id;
+ routeTo[pkt->req] = cpu_side_port_id;
panic_if(routeTo.size() > maxRoutingTableSizeCheck,
"%s: Routing table exceeds %d packets\n",
}
// update the layer state and schedule an idle event
- reqLayers[master_port_id]->succeededTiming(packetFinishTime);
+ reqLayers[mem_side_port_id]->succeededTiming(packetFinishTime);
}
// stats updates only consider packets that were successfully sent
- pktCount[slave_port_id][master_port_id]++;
- pktSize[slave_port_id][master_port_id] += pkt_size;
+ pktCount[cpu_side_port_id][mem_side_port_id]++;
+ pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
transDist[pkt_cmd]++;
if (is_express_snoop) {
// normally we respond to the packet we just received if we need to
PacketPtr rsp_pkt = pkt;
- PortID rsp_port_id = slave_port_id;
+ PortID rsp_port_id = cpu_side_port_id;
// If this is the destination of the cache clean operation the
// crossbar is responsible for responding. This crossbar will
outstandingCMO.emplace(pkt->id, deferred_rsp);
if (!pkt->isWrite()) {
assert(routeTo.find(pkt->req) == routeTo.end());
- routeTo[pkt->req] = slave_port_id;
+ routeTo[pkt->req] = cpu_side_port_id;
panic_if(routeTo.size() > maxRoutingTableSizeCheck,
"%s: Routing table exceeds %d packets\n",
if (snoopFilter && !system->bypassCaches()) {
// let the snoop filter inspect the response and update its state
- snoopFilter->updateResponse(rsp_pkt, *slavePorts[rsp_port_id]);
+ snoopFilter->updateResponse(rsp_pkt, *cpuSidePorts[rsp_port_id]);
}
// we send the response after the current packet, even if the
Tick response_time = clockEdge() + pkt->headerDelay;
rsp_pkt->headerDelay = 0;
- slavePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time);
+ cpuSidePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time);
}
return success;
}
bool
-CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
+CoherentXBar::recvTimingResp(PacketPtr pkt, PortID mem_side_port_id)
{
// determine the source port based on the id
- RequestPort *src_port = masterPorts[master_port_id];
+ RequestPort *src_port = memSidePorts[mem_side_port_id];
// determine the destination
const auto route_lookup = routeTo.find(pkt->req);
assert(route_lookup != routeTo.end());
- const PortID slave_port_id = route_lookup->second;
- assert(slave_port_id != InvalidPortID);
- assert(slave_port_id < respLayers.size());
+ const PortID cpu_side_port_id = route_lookup->second;
+ assert(cpu_side_port_id != InvalidPortID);
+ assert(cpu_side_port_id < respLayers.size());
// test if the crossbar should be considered occupied for the
// current port
- if (!respLayers[slave_port_id]->tryTiming(src_port)) {
+ if (!respLayers[cpu_side_port_id]->tryTiming(src_port)) {
DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
src_port->name(), pkt->print());
return false;
if (snoopFilter && !system->bypassCaches()) {
// let the snoop filter inspect the response and update its state
- snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
+ snoopFilter->updateResponse(pkt, *cpuSidePorts[cpu_side_port_id]);
}
- // send the packet through the destination slave port and pay for
+ // send the packet through the destination CPU-side port and pay for
// any outstanding header delay
Tick latency = pkt->headerDelay;
pkt->headerDelay = 0;
- slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency);
+ cpuSidePorts[cpu_side_port_id]->schedTimingResp(pkt, curTick()
+ + latency);
// remove the request from the routing table
routeTo.erase(route_lookup);
- respLayers[slave_port_id]->succeededTiming(packetFinishTime);
+ respLayers[cpu_side_port_id]->succeededTiming(packetFinishTime);
// stats updates
- pktCount[slave_port_id][master_port_id]++;
- pktSize[slave_port_id][master_port_id] += pkt_size;
+ pktCount[cpu_side_port_id][mem_side_port_id]++;
+ pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
transDist[pkt_cmd]++;
return true;
}
void
-CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id)
+CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID mem_side_port_id)
{
DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
- masterPorts[master_port_id]->name(), pkt->print());
+ memSidePorts[mem_side_port_id]->name(), pkt->print());
// update stats here as we know the forwarding will succeed
unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
// of the snoop filter
pkt->headerDelay += sf_res.second * clockPeriod();
DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
- __func__, masterPorts[master_port_id]->name(), pkt->print(),
- sf_res.first.size(), sf_res.second);
+ __func__, memSidePorts[mem_side_port_id]->name(),
+ pkt->print(), sf_res.first.size(), sf_res.second);
// forward to all snoopers
forwardTiming(pkt, InvalidPortID, sf_res.first);
// if we can expect a response, remember how to route it
if (!cache_responding && pkt->cacheResponding()) {
assert(routeTo.find(pkt->req) == routeTo.end());
- routeTo[pkt->req] = master_port_id;
+ routeTo[pkt->req] = mem_side_port_id;
}
- // a snoop request came from a connected slave device (one of
- // our master ports), and if it is not coming from the slave
+ // a snoop request came from a connected CPU-side-port device (one of
+ // our memory-side ports), and if it is not coming from the CPU-side-port
// device responsible for the address range something is
// wrong, hence there is nothing further to do as the packet
// would be going back to where it came from
- assert(findPort(pkt->getAddrRange()) == master_port_id);
+ assert(findPort(pkt->getAddrRange()) == mem_side_port_id);
}
bool
-CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id)
+CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID cpu_side_port_id)
{
// determine the source port based on the id
- ResponsePort* src_port = slavePorts[slave_port_id];
+ ResponsePort* src_port = cpuSidePorts[cpu_side_port_id];
// get the destination
const auto route_lookup = routeTo.find(pkt->req);
return false;
}
} else {
- // get the master port that mirrors this slave port internally
- RequestPort* snoop_port = snoopRespPorts[slave_port_id];
+ // get the memory-side port that mirrors this CPU-side port internally
+ RequestPort* snoop_port = snoopRespPorts[cpu_side_port_id];
assert(dest_port_id < respLayers.size());
if (!respLayers[dest_port_id]->tryTiming(snoop_port)) {
DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__,
if (snoopFilter) {
// update the probe filter so that it can properly track the line
- snoopFilter->updateSnoopForward(pkt, *slavePorts[slave_port_id],
- *masterPorts[dest_port_id]);
+ snoopFilter->updateSnoopForward(pkt,
+ *cpuSidePorts[cpu_side_port_id],
+ *memSidePorts[dest_port_id]);
}
bool success M5_VAR_USED =
- masterPorts[dest_port_id]->sendTimingSnoopResp(pkt);
- pktCount[slave_port_id][dest_port_id]++;
- pktSize[slave_port_id][dest_port_id] += pkt_size;
+ memSidePorts[dest_port_id]->sendTimingSnoopResp(pkt);
+ pktCount[cpu_side_port_id][dest_port_id]++;
+ pktSize[cpu_side_port_id][dest_port_id] += pkt_size;
assert(success);
snoopLayers[dest_port_id]->succeededTiming(packetFinishTime);
} else {
- // we got a snoop response on one of our slave ports,
- // i.e. from a coherent master connected to the crossbar, and
+ // we got a snoop response on one of our CPU-side ports,
+ // i.e. from a coherent requestor connected to the crossbar, and
// since we created the snoop request as part of recvTiming,
// this should now be a normal response again
outstandingSnoop.erase(pkt->req);
- // this is a snoop response from a coherent master, hence it
+ // this is a snoop response from a coherent requestor, hence it
// should never go back to where the snoop response came from,
// but instead to where the original request came from
- assert(slave_port_id != dest_port_id);
+ assert(cpu_side_port_id != dest_port_id);
if (snoopFilter) {
- // update the probe filter so that it can properly track the line
- snoopFilter->updateSnoopResponse(pkt, *slavePorts[slave_port_id],
- *slavePorts[dest_port_id]);
+ // update the probe filter so that it can properly track
+ // the line
+ snoopFilter->updateSnoopResponse(pkt,
+ *cpuSidePorts[cpu_side_port_id],
+ *cpuSidePorts[dest_port_id]);
}
DPRINTF(CoherentXBar, "%s: src %s packet %s FWD RESP\n", __func__,
src_port->name(), pkt->print());
- // as a normal response, it should go back to a master through
- // one of our slave ports, we also pay for any outstanding
+ // as a normal response, it should go back to a requestor through
+ // one of our CPU-side ports, we also pay for any outstanding
// header latency
Tick latency = pkt->headerDelay;
pkt->headerDelay = 0;
- slavePorts[dest_port_id]->schedTimingResp(pkt, curTick() + latency);
+ cpuSidePorts[dest_port_id]->schedTimingResp(pkt,
+ curTick() + latency);
respLayers[dest_port_id]->succeededTiming(packetFinishTime);
}
void
-CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id,
- const std::vector<QueuedSlavePort*>& dests)
+CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_cpu_side_port_id,
+ const std::vector<QueuedResponsePort*>& dests)
{
DPRINTF(CoherentXBar, "%s for %s\n", __func__, pkt->print());
unsigned fanout = 0;
for (const auto& p: dests) {
- // we could have gotten this request from a snooping master
- // (corresponding to our own slave port that is also in
+ // we could have gotten this request from a snooping requestor
+ // (corresponding to our own CPU-side port that is also in
// snoopPorts) and should not send it back to where it came
// from
- if (exclude_slave_port_id == InvalidPortID ||
- p->getId() != exclude_slave_port_id) {
+ if (exclude_cpu_side_port_id == InvalidPortID ||
+ p->getId() != exclude_cpu_side_port_id) {
// cache is not allowed to refuse snoop
p->sendTimingSnoopReq(pkt);
fanout++;
}
void
-CoherentXBar::recvReqRetry(PortID master_port_id)
+CoherentXBar::recvReqRetry(PortID mem_side_port_id)
{
// responses and snoop responses never block on forwarding them,
// so the retry will always be coming from a port to which we
// tried to forward a request
- reqLayers[master_port_id]->recvRetry();
+ reqLayers[mem_side_port_id]->recvRetry();
}
Tick
-CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
+CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id,
MemBackdoorPtr *backdoor)
{
DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
- slavePorts[slave_port_id]->name(), pkt->print());
+ cpuSidePorts[cpu_side_port_id]->name(), pkt->print());
unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
unsigned int pkt_cmd = pkt->cmdToIndex();
if (snoopFilter) {
// check with the snoop filter where to forward this packet
auto sf_res =
- snoopFilter->lookupRequest(pkt, *slavePorts[slave_port_id]);
+ snoopFilter->lookupRequest(pkt,
+ *cpuSidePorts [cpu_side_port_id]);
snoop_response_latency += sf_res.second * clockPeriod();
DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
- __func__, slavePorts[slave_port_id]->name(), pkt->print(),
- sf_res.first.size(), sf_res.second);
+ __func__, cpuSidePorts[cpu_side_port_id]->name(),
+ pkt->print(), sf_res.first.size(), sf_res.second);
// let the snoop filter know about the success of the send
// operation, and do it even before sending it onwards to
if (!sf_res.first.empty())
pkt->setBlockCached();
} else {
- snoop_result = forwardAtomic(pkt, slave_port_id, InvalidPortID,
- sf_res.first);
+ snoop_result = forwardAtomic(pkt, cpu_side_port_id,
+ InvalidPortID, sf_res.first);
}
} else {
- snoop_result = forwardAtomic(pkt, slave_port_id);
+ snoop_result = forwardAtomic(pkt, cpu_side_port_id);
}
snoop_response_cmd = snoop_result.first;
snoop_response_latency += snoop_result.second;
// even if we had a snoop response, we must continue and also
// perform the actual request at the destination
- PortID master_port_id = findPort(pkt->getAddrRange());
+ PortID mem_side_port_id = findPort(pkt->getAddrRange());
if (sink_packet) {
DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__,
}
// forward the request to the appropriate destination
- auto master = masterPorts[master_port_id];
+ auto mem_side_port = memSidePorts[mem_side_port_id];
response_latency = backdoor ?
- master->sendAtomicBackdoor(pkt, *backdoor) :
- master->sendAtomic(pkt);
+ mem_side_port->sendAtomicBackdoor(pkt, *backdoor) :
+ mem_side_port->sendAtomic(pkt);
} else {
// if it does not need a response we sink the packet above
assert(pkt->needsResponse());
}
// stats updates for the request
- pktCount[slave_port_id][master_port_id]++;
- pktSize[slave_port_id][master_port_id] += pkt_size;
+ pktCount[cpu_side_port_id][mem_side_port_id]++;
+ pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
transDist[pkt_cmd]++;
// if lower levels have replied, tell the snoop filter
if (!system->bypassCaches() && snoopFilter && pkt->isResponse()) {
- snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]);
+ snoopFilter->updateResponse(pkt, *cpuSidePorts[cpu_side_port_id]);
}
// if we got a response from a snooper, restore it here
pkt_cmd = pkt->cmdToIndex();
// stats updates
- pktCount[slave_port_id][master_port_id]++;
- pktSize[slave_port_id][master_port_id] += pkt_size;
+ pktCount[cpu_side_port_id][mem_side_port_id]++;
+ pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
transDist[pkt_cmd]++;
}
}
Tick
-CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id)
+CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID mem_side_port_id)
{
DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
- masterPorts[master_port_id]->name(), pkt->print());
+ memSidePorts[mem_side_port_id]->name(), pkt->print());
// add the request snoop data
unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
auto sf_res = snoopFilter->lookupSnoop(pkt);
snoop_response_latency += sf_res.second * clockPeriod();
DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n",
- __func__, masterPorts[master_port_id]->name(), pkt->print(),
- sf_res.first.size(), sf_res.second);
- snoop_result = forwardAtomic(pkt, InvalidPortID, master_port_id,
+ __func__, memSidePorts[mem_side_port_id]->name(),
+ pkt->print(), sf_res.first.size(), sf_res.second);
+ snoop_result = forwardAtomic(pkt, InvalidPortID, mem_side_port_id,
sf_res.first);
} else {
snoop_result = forwardAtomic(pkt, InvalidPortID);
}
std::pair<MemCmd, Tick>
-CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id,
- PortID source_master_port_id,
- const std::vector<QueuedSlavePort*>& dests)
+CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_cpu_side_port_id,
+ PortID source_mem_side_port_id,
+ const std::vector<QueuedResponsePort*>& dests)
{
// the packet may be changed on snoops, record the original
// command to enable us to restore it between snoops so that
unsigned fanout = 0;
for (const auto& p: dests) {
- // we could have gotten this request from a snooping master
- // (corresponding to our own slave port that is also in
+ // we could have gotten this request from a snooping memory-side port
+ // (corresponding to our own CPU-side port that is also in
// snoopPorts) and should not send it back to where it came
// from
- if (exclude_slave_port_id != InvalidPortID &&
- p->getId() == exclude_slave_port_id)
+ if (exclude_cpu_side_port_id != InvalidPortID &&
+ p->getId() == exclude_cpu_side_port_id)
continue;
Tick latency = p->sendAtomicSnoop(pkt);
if (snoopFilter) {
// Handle responses by the snoopers and differentiate between
// responses to requests from above and snoops from below
- if (source_master_port_id != InvalidPortID) {
+ if (source_mem_side_port_id != InvalidPortID) {
// Getting a response for a snoop from below
- assert(exclude_slave_port_id == InvalidPortID);
+ assert(exclude_cpu_side_port_id == InvalidPortID);
snoopFilter->updateSnoopForward(pkt, *p,
- *masterPorts[source_master_port_id]);
+ *memSidePorts[source_mem_side_port_id]);
} else {
// Getting a response for a request from above
- assert(source_master_port_id == InvalidPortID);
+ assert(source_mem_side_port_id == InvalidPortID);
snoopFilter->updateSnoopResponse(pkt, *p,
- *slavePorts[exclude_slave_port_id]);
+ *cpuSidePorts[exclude_cpu_side_port_id]);
}
}
// restore original packet state for remaining snoopers
}
void
-CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id)
+CoherentXBar::recvFunctional(PacketPtr pkt, PortID cpu_side_port_id)
{
if (!pkt->isPrint()) {
// don't do DPRINTFs on PrintReq as it clutters up the output
DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
- slavePorts[slave_port_id]->name(), pkt->print());
+ cpuSidePorts[cpu_side_port_id]->name(), pkt->print());
}
if (!system->bypassCaches()) {
// forward to all snoopers but the source
- forwardFunctional(pkt, slave_port_id);
+ forwardFunctional(pkt, cpu_side_port_id);
}
// there is no need to continue if the snooping has found what we
// were looking for and the packet is already a response
if (!pkt->isResponse()) {
- // since our slave ports are queued ports we need to check them as well
- for (const auto& p : slavePorts) {
+ // since our CPU-side ports are queued ports we need to check
+ // them as well
+ for (const auto& p : cpuSidePorts) {
// if we find a response that has the data, then the
// downstream caches/memories may be out of date, so simply stop
// here
PortID dest_id = findPort(pkt->getAddrRange());
- masterPorts[dest_id]->sendFunctional(pkt);
+ memSidePorts[dest_id]->sendFunctional(pkt);
}
}
void
-CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id)
+CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID mem_side_port_id)
{
if (!pkt->isPrint()) {
// don't do DPRINTFs on PrintReq as it clutters up the output
DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__,
- masterPorts[master_port_id]->name(), pkt->print());
+ memSidePorts[mem_side_port_id]->name(), pkt->print());
}
- for (const auto& p : slavePorts) {
+ for (const auto& p : cpuSidePorts) {
if (p->trySatisfyFunctional(pkt)) {
if (pkt->needsResponse())
pkt->makeResponse();
}
void
-CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id)
+CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_cpu_side_port_id)
{
// snoops should only happen if the system isn't bypassing caches
assert(!system->bypassCaches());
for (const auto& p: snoopPorts) {
- // we could have gotten this request from a snooping master
- // (corresponding to our own slave port that is also in
+ // we could have gotten this request from a snooping requestor
+ // (corresponding to our own CPU-side port that is also in
// snoopPorts) and should not send it back to where it came
// from
- if (exclude_slave_port_id == InvalidPortID ||
- p->getId() != exclude_slave_port_id)
+ if (exclude_cpu_side_port_id == InvalidPortID ||
+ p->getId() != exclude_cpu_side_port_id)
p->sendFunctionalSnoop(pkt);
// if we get a response we are done
/**
* A coherent crossbar connects a number of (potentially) snooping
- * masters and slaves, and routes the request and response packets
+ * requestors and responders, and routes the request and response packets
* based on the address, and also forwards all requests to the
* snoopers and deals with the snoop responses.
*
std::vector<SnoopRespLayer*> snoopLayers;
/**
- * Declaration of the coherent crossbar slave port type, one will
- * be instantiated for each of the master ports connecting to the
+ * Declaration of the coherent crossbar CPU-side port type, one will
+ * be instantiated for each of the mem_side_ports connecting to the
* crossbar.
*/
- class CoherentXBarSlavePort : public QueuedSlavePort
+ class CoherentXBarResponsePort : public QueuedResponsePort
{
private:
public:
- CoherentXBarSlavePort(const std::string &_name,
+ CoherentXBarResponsePort(const std::string &_name,
CoherentXBar &_xbar, PortID _id)
- : QueuedSlavePort(_name, &_xbar, queue, _id), xbar(_xbar),
+ : QueuedResponsePort(_name, &_xbar, queue, _id), xbar(_xbar),
queue(_xbar, *this)
{ }
};
/**
- * Declaration of the coherent crossbar master port type, one will be
- * instantiated for each of the slave interfaces connecting to the
+ * Declaration of the coherent crossbar memory-side port type, one will be
+ * instantiated for each of the CPU-side-port interfaces connecting to the
* crossbar.
*/
- class CoherentXBarMasterPort : public RequestPort
+ class CoherentXBarRequestPort : public RequestPort
{
private:
/** A reference to the crossbar to which this port belongs. */
public:
- CoherentXBarMasterPort(const std::string &_name,
+ CoherentXBarRequestPort(const std::string &_name,
CoherentXBar &_xbar, PortID _id)
: RequestPort(_name, &_xbar, _id), xbar(_xbar)
{ }
/**
* Determine if this port should be considered a snooper. For
- * a coherent crossbar master port this is always true.
+ * a coherent crossbar memory-side port this is always true.
*
* @return a boolean that is true if this port is snooping
*/
/**
* Internal class to bridge between an incoming snoop response
- * from a slave port and forwarding it through an outgoing slave
- * port. It is effectively a dangling master port.
+ * from a CPU-side port and forwarding it through an outgoing
+ * CPU-side port. It is effectively a dangling memory-side port.
*/
class SnoopRespPort : public RequestPort
{
private:
/** The port which we mirror internally. */
- QueuedSlavePort& slavePort;
+ QueuedResponsePort& cpuSidePort;
public:
/**
- * Create a snoop response port that mirrors a given slave port.
+ * Create a snoop response port that mirrors a given CPU-side port.
*/
- SnoopRespPort(QueuedSlavePort& slave_port, CoherentXBar& _xbar) :
- RequestPort(slave_port.name() + ".snoopRespPort", &_xbar),
- slavePort(slave_port) { }
+ SnoopRespPort(QueuedResponsePort& cpu_side_port,
+ CoherentXBar& _xbar) :
+ RequestPort(cpu_side_port.name() + ".snoopRespPort", &_xbar),
+ cpuSidePort(cpu_side_port) { }
/**
* Override the sending of retries and pass them on through
- * the mirrored slave port.
+ * the mirrored CPU-side port.
*/
void
sendRetryResp() override
{
// forward it as a snoop response retry
- slavePort.sendRetrySnoopResp();
+ cpuSidePort.sendRetrySnoopResp();
}
void
std::vector<SnoopRespPort*> snoopRespPorts;
- std::vector<QueuedSlavePort*> snoopPorts;
+ std::vector<QueuedResponsePort*> snoopPorts;
/**
* Store the outstanding requests that we are expecting snoop
*/
std::unique_ptr<Packet> pendingDelete;
- bool recvTimingReq(PacketPtr pkt, PortID slave_port_id);
- bool recvTimingResp(PacketPtr pkt, PortID master_port_id);
- void recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id);
- bool recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id);
- void recvReqRetry(PortID master_port_id);
+ bool recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id);
+ bool recvTimingResp(PacketPtr pkt, PortID mem_side_port_id);
+ void recvTimingSnoopReq(PacketPtr pkt, PortID mem_side_port_id);
+ bool recvTimingSnoopResp(PacketPtr pkt, PortID cpu_side_port_id);
+ void recvReqRetry(PortID mem_side_port_id);
/**
* Forward a timing packet to our snoopers, potentially excluding
- * one of the connected coherent masters to avoid sending a packet
+ * one of the connected coherent requestors to avoid sending a packet
* back to where it came from.
*
* @param pkt Packet to forward
- * @param exclude_slave_port_id Id of slave port to exclude
+ * @param exclude_cpu_side_port_id Id of CPU-side port to exclude
*/
void
- forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id)
+ forwardTiming(PacketPtr pkt, PortID exclude_cpu_side_port_id)
{
- forwardTiming(pkt, exclude_slave_port_id, snoopPorts);
+ forwardTiming(pkt, exclude_cpu_side_port_id, snoopPorts);
}
/**
* Forward a timing packet to a selected list of snoopers, potentially
- * excluding one of the connected coherent masters to avoid sending a packet
- * back to where it came from.
+ * excluding one of the connected coherent requestors to avoid sending
+ * a packet back to where it came from.
*
* @param pkt Packet to forward
- * @param exclude_slave_port_id Id of slave port to exclude
+ * @param exclude_cpu_side_port_id Id of CPU-side port to exclude
* @param dests Vector of destination ports for the forwarded pkt
*/
- void forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id,
- const std::vector<QueuedSlavePort*>& dests);
+ void forwardTiming(PacketPtr pkt, PortID exclude_cpu_side_port_id,
+ const std::vector<QueuedResponsePort*>& dests);
- Tick recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
+ Tick recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id,
MemBackdoorPtr *backdoor=nullptr);
- Tick recvAtomicSnoop(PacketPtr pkt, PortID master_port_id);
+ Tick recvAtomicSnoop(PacketPtr pkt, PortID mem_side_port_id);
/**
* Forward an atomic packet to our snoopers, potentially excluding
- * one of the connected coherent masters to avoid sending a packet
+ * one of the connected coherent requestors to avoid sending a packet
* back to where it came from.
*
* @param pkt Packet to forward
- * @param exclude_slave_port_id Id of slave port to exclude
+ * @param exclude_cpu_side_port_id Id of CPU-side port to exclude
*
* @return a pair containing the snoop response and snoop latency
*/
std::pair<MemCmd, Tick>
- forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id)
+ forwardAtomic(PacketPtr pkt, PortID exclude_cpu_side_port_id)
{
- return forwardAtomic(pkt, exclude_slave_port_id, InvalidPortID,
+ return forwardAtomic(pkt, exclude_cpu_side_port_id, InvalidPortID,
snoopPorts);
}
/**
* Forward an atomic packet to a selected list of snoopers, potentially
- * excluding one of the connected coherent masters to avoid sending a packet
- * back to where it came from.
+ * excluding one of the connected coherent requestors to avoid sending a
+ * packet back to where it came from.
*
* @param pkt Packet to forward
- * @param exclude_slave_port_id Id of slave port to exclude
- * @param source_master_port_id Id of the master port for snoops from below
+ * @param exclude_cpu_side_port_id Id of CPU-side port to exclude
+ * @param source_mem_side_port_id Id of the memory-side port for
+ * snoops from below
* @param dests Vector of destination ports for the forwarded pkt
*
* @return a pair containing the snoop response and snoop latency
*/
std::pair<MemCmd, Tick> forwardAtomic(PacketPtr pkt,
- PortID exclude_slave_port_id,
- PortID source_master_port_id,
- const std::vector<QueuedSlavePort*>&
+ PortID exclude_cpu_side_port_id,
+ PortID source_mem_side_port_id,
+ const std::vector<QueuedResponsePort*>&
dests);
- /** Function called by the port when the crossbar is recieving a Functional
+ /** Function called by the port when the crossbar is receiving a Functional
transaction.*/
- void recvFunctional(PacketPtr pkt, PortID slave_port_id);
+ void recvFunctional(PacketPtr pkt, PortID cpu_side_port_id);
- /** Function called by the port when the crossbar is recieving a functional
+ /** Function called by the port when the crossbar is receiving a functional
snoop transaction.*/
- void recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id);
+ void recvFunctionalSnoop(PacketPtr pkt, PortID mem_side_port_id);
/**
* Forward a functional packet to our snoopers, potentially
- * excluding one of the connected coherent masters to avoid
+ * excluding one of the connected coherent requestors to avoid
* sending a packet back to where it came from.
*
* @param pkt Packet to forward
- * @param exclude_slave_port_id Id of slave port to exclude
+ * @param exclude_cpu_side_port_id Id of CPU-side port to exclude
*/
- void forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id);
+ void forwardFunctional(PacketPtr pkt, PortID exclude_cpu_side_port_id);
/**
* Determine if the crossbar should sink the packet, as opposed to
CommMonitor::CommMonitor(Params* params)
: SimObject(params),
- masterPort(name() + "-master", *this),
- slavePort(name() + "-slave", *this),
+ memSidePort(name() + "-mem_side_port", *this),
+ cpuSidePort(name() + "-cpu_side_port", *this),
samplePeriodicEvent([this]{ samplePeriodic(); }, name()),
samplePeriodTicks(params->sample_period),
samplePeriod(params->sample_period / SimClock::Float::s),
CommMonitor::init()
{
// make sure both sides of the monitor are connected
- if (!slavePort.isConnected() || !masterPort.isConnected())
+ if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
fatal("Communication monitor is not connected on both sides.\n");
}
Port &
CommMonitor::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "master") {
- return masterPort;
- } else if (if_name == "slave") {
- return slavePort;
+ if (if_name == "mem_side_port") {
+ return memSidePort;
+ } else if (if_name == "cpu_side_port") {
+ return cpuSidePort;
} else {
return SimObject::getPort(if_name, idx);
}
void
CommMonitor::recvFunctional(PacketPtr pkt)
{
- masterPort.sendFunctional(pkt);
+ memSidePort.sendFunctional(pkt);
}
void
CommMonitor::recvFunctionalSnoop(PacketPtr pkt)
{
- slavePort.sendFunctionalSnoop(pkt);
+ cpuSidePort.sendFunctionalSnoop(pkt);
}
CommMonitor::MonitorStats::MonitorStats(Stats::Group *parent,
ProbePoints::PacketInfo req_pkt_info(pkt);
ppPktReq->notify(req_pkt_info);
- const Tick delay(masterPort.sendAtomic(pkt));
+ const Tick delay(memSidePort.sendAtomic(pkt));
stats.updateReqStats(req_pkt_info, true, expects_response);
if (expects_response)
Tick
CommMonitor::recvAtomicSnoop(PacketPtr pkt)
{
- return slavePort.sendAtomicSnoop(pkt);
+ return cpuSidePort.sendAtomicSnoop(pkt);
}
bool
}
// Attempt to send the packet
- bool successful = masterPort.sendTimingReq(pkt);
+ bool successful = memSidePort.sendTimingReq(pkt);
// If not successful, restore the sender state
if (!successful && expects_response && !stats.disableLatencyHists) {
}
// Attempt to send the packet
- bool successful = slavePort.sendTimingResp(pkt);
+ bool successful = cpuSidePort.sendTimingResp(pkt);
if (!stats.disableLatencyHists) {
// If packet successfully send, sample value of latency,
void
CommMonitor::recvTimingSnoopReq(PacketPtr pkt)
{
- slavePort.sendTimingSnoopReq(pkt);
+ cpuSidePort.sendTimingSnoopReq(pkt);
}
bool
CommMonitor::recvTimingSnoopResp(PacketPtr pkt)
{
- return masterPort.sendTimingSnoopResp(pkt);
+ return memSidePort.sendTimingSnoopResp(pkt);
}
void
CommMonitor::recvRetrySnoopResp()
{
- slavePort.sendRetrySnoopResp();
+ cpuSidePort.sendRetrySnoopResp();
}
bool
CommMonitor::isSnooping() const
{
- // check if the connected master port is snooping
- return slavePort.isSnooping();
+ // check if the connected request port is snooping
+ return cpuSidePort.isSnooping();
}
AddrRangeList
CommMonitor::getAddrRanges() const
{
- // get the address ranges of the connected slave port
- return masterPort.getAddrRanges();
+ // get the address ranges of the connected CPU-side port
+ return memSidePort.getAddrRanges();
}
void
CommMonitor::recvReqRetry()
{
- slavePort.sendRetryReq();
+ cpuSidePort.sendRetryReq();
}
void
CommMonitor::recvRespRetry()
{
- masterPort.sendRetryResp();
+ memSidePort.sendRetryResp();
}
bool
CommMonitor::tryTiming(PacketPtr pkt)
{
- return masterPort.tryTiming(pkt);
+ return memSidePort.tryTiming(pkt);
}
void
CommMonitor::recvRangeChange()
{
- slavePort.sendRangeChange();
+ cpuSidePort.sendRangeChange();
}
void
};
/**
- * This is the master port of the communication monitor. All recv
+ * This is the request port of the communication monitor. All recv
* functions call a function in CommMonitor, where the
- * send function of the slave port is called. Besides this, these
+ * send function of the CPU-side port is called. Besides this, these
* functions can also perform actions for capturing statistics.
*/
- class MonitorMasterPort : public RequestPort
+ class MonitorRequestPort : public RequestPort
{
public:
- MonitorMasterPort(const std::string& _name, CommMonitor& _mon)
+ MonitorRequestPort(const std::string& _name, CommMonitor& _mon)
: RequestPort(_name, &_mon), mon(_mon)
{ }
};
- /** Instance of master port, facing the memory side */
- MonitorMasterPort masterPort;
+ /** Instance of request port, facing the memory side */
+ MonitorRequestPort memSidePort;
/**
- * This is the slave port of the communication monitor. All recv
+ * This is the CPU-side port of the communication monitor. All recv
* functions call a function in CommMonitor, where the
- * send function of the master port is called. Besides this, these
+ * send function of the request port is called. Besides this, these
* functions can also perform actions for capturing statistics.
*/
- class MonitorSlavePort : public ResponsePort
+ class MonitorResponsePort : public ResponsePort
{
public:
- MonitorSlavePort(const std::string& _name, CommMonitor& _mon)
+ MonitorResponsePort(const std::string& _name, CommMonitor& _mon)
: ResponsePort(_name, &_mon), mon(_mon)
{ }
};
- /** Instance of slave port, i.e. on the CPU side */
- MonitorSlavePort slavePort;
+ /** Instance of response port, i.e. on the CPU side */
+ MonitorResponsePort cpuSidePort;
void recvFunctional(PacketPtr pkt);
// response
access(pkt);
- // turn packet around to go back to requester if response expected
+ // turn packet around to go back to requestor if response expected
if (needsResponse) {
// access already turned the packet into a response
assert(pkt->isResponse());
DRAMSim2::MemoryPort::MemoryPort(const std::string& _name,
DRAMSim2& _memory)
- : SlavePort(_name, &_memory), memory(_memory)
+ : ResponsePort(_name, &_memory), memory(_memory)
{ }
AddrRangeList
* having unbounded storage that is implicitly created in the port
* itself.
*/
- class MemoryPort : public SlavePort
+ class MemoryPort : public ResponsePort
{
private:
// response
access(pkt);
- // turn packet around to go back to requester if response expected
+ // turn packet around to go back to requestor if response expected
if (needsResponse) {
// access already turned the packet into a response
assert(pkt->isResponse());
DRAMsim3::MemoryPort::MemoryPort(const std::string& _name,
DRAMsim3& _memory)
- : SlavePort(_name, &_memory), memory(_memory)
+ : ResponsePort(_name, &_memory), memory(_memory)
{ }
AddrRangeList
* having unbounded storage that is implicitly created in the port
* itself.
*/
- class MemoryPort : public SlavePort
+ class MemoryPort : public ResponsePort
{
private:
portName(params->name + ".port"),
portType(params->port_type),
portData(params->port_data),
- masterId(params->system->getMasterId(this))
+ id(params->system->getRequestorId(this))
{}
Port &
* @file
*
* ExternalMaster is a memory object representing a binding from
- * a gem5 slave to a master port in a system external to gem5.
+ * a gem5 responder to a request port in a system external to gem5.
*
* During initialisation, a `handler' for the port type specified in the
* port's port_type parameter is found from the registered port handlers
void init() override;
- const MasterID masterId;
+ const RequestorID id;
};
* @file
*
* ExternalSlave is a memory object representing a binding from
- * a gem5 master to a slave port in a system external to gem5.
+ * a gem5 requestor to a response port in a system external to gem5.
*
* During initialisation, a `handler' for the port type specified in the
* port's port_type parameter is found from the registered port handlers
HMCController::HMCController(const HMCControllerParams* p) :
NoncoherentXBar(p),
- n_master_ports(p->port_master_connection_count),
+ numMemSidePorts(p->port_mem_side_ports_connection_count),
rr_counter(0)
{
- assert(p->port_slave_connection_count == 1);
+ assert(p->port_cpu_side_ports_connection_count == 1);
}
HMCController*
return new HMCController(this);
}
-// Since this module is a load distributor, all its master ports have the same
+// Since this module is a load distributor, all its request ports have the same
// range so we should keep only one of the ranges and ignore the others
-void HMCController::recvRangeChange(PortID master_port_id)
+void HMCController::recvRangeChange(PortID mem_side_port_id)
{
- if (master_port_id == 0)
+ if (mem_side_port_id == 0)
{
gotAllAddrRanges = true;
- BaseXBar::recvRangeChange(master_port_id);
+ BaseXBar::recvRangeChange(mem_side_port_id);
}
else
- gotAddrRanges[master_port_id] = true;
+ gotAddrRanges[mem_side_port_id] = true;
}
int HMCController::rotate_counter()
{
int current_value = rr_counter;
rr_counter++;
- if (rr_counter == n_master_ports)
+ if (rr_counter == numMemSidePorts)
rr_counter = 0;
return current_value;
}
-bool HMCController::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
+bool HMCController::recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id)
{
// determine the source port based on the id
- ResponsePort *src_port = slavePorts[slave_port_id];
+ ResponsePort *src_port = cpuSidePorts[cpu_side_port_id];
// we should never see express snoops on a non-coherent component
assert(!pkt->isExpressSnoop());
// For now, this is a simple round robin counter, for distribution the
// load among the serial links
- PortID master_port_id = rotate_counter();
+ PortID mem_side_port_id = rotate_counter();
// test if the layer should be considered occupied for the current
// port
- if (!reqLayers[master_port_id]->tryTiming(src_port)) {
+ if (!reqLayers[mem_side_port_id]->tryTiming(src_port)) {
DPRINTF(HMCController, "recvTimingReq: src %s %s 0x%x BUSY\n",
src_port->name(), pkt->cmdString(), pkt->getAddr());
return false;
!pkt->cacheResponding();
// since it is a normal request, attempt to send the packet
- bool success = masterPorts[master_port_id]->sendTimingReq(pkt);
+ bool success = memSidePorts[mem_side_port_id]->sendTimingReq(pkt);
if (!success) {
DPRINTF(HMCController, "recvTimingReq: src %s %s 0x%x RETRY\n",
pkt->headerDelay = old_header_delay;
// occupy until the header is sent
- reqLayers[master_port_id]->failedTiming(src_port,
+ reqLayers[mem_side_port_id]->failedTiming(src_port,
clockEdge(Cycles(1)));
return false;
// remember where to route the response to
if (expect_response) {
assert(routeTo.find(pkt->req) == routeTo.end());
- routeTo[pkt->req] = slave_port_id;
+ routeTo[pkt->req] = cpu_side_port_id;
}
- reqLayers[master_port_id]->succeededTiming(packetFinishTime);
+ reqLayers[mem_side_port_id]->succeededTiming(packetFinishTime);
// stats updates
- pktCount[slave_port_id][master_port_id]++;
- pktSize[slave_port_id][master_port_id] += pkt_size;
+ pktCount[cpu_side_port_id][mem_side_port_id]++;
+ pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
transDist[pkt_cmd]++;
return true;
// Receive range change only on one of the ports (because they all have
// the same range)
- virtual void recvRangeChange(PortID master_port_id);
+ virtual void recvRangeChange(PortID mem_side_port_id);
- // Receive a request and distribute it among slave ports
+ // Receive a request and distribute it among response ports
// Simply forwards the packet to the next serial link based on a
// Round-robin counter
- virtual bool recvTimingReq(PacketPtr pkt, PortID slave_port_id);
+ virtual bool recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id);
- int n_master_ports;
+ int numMemSidePorts;
// The round-robin counter
int rr_counter;
MemCheckerMonitor::MemCheckerMonitor(Params* params)
: SimObject(params),
- masterPort(name() + "-master", *this),
- slavePort(name() + "-slave", *this),
+ memSidePort(name() + "-memSidePort", *this),
+ cpuSidePort(name() + "-cpuSidePort", *this),
warnOnly(params->warn_only),
memchecker(params->memchecker)
{}
MemCheckerMonitor::init()
{
// make sure both sides of the monitor are connected
- if (!slavePort.isConnected() || !masterPort.isConnected())
+ if (!cpuSidePort.isConnected() || !memSidePort.isConnected())
fatal("Communication monitor is not connected on both sides.\n");
}
Port &
MemCheckerMonitor::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "master" || if_name == "mem_side") {
- return masterPort;
- } else if (if_name == "slave" || if_name == "cpu_side") {
- return slavePort;
+ if (if_name == "request" || if_name == "mem_side_port") {
+ return memSidePort;
+ } else if (if_name == "response" || if_name == "cpu_side_port") {
+ return cpuSidePort;
} else {
return SimObject::getPort(if_name, idx);
}
// reads/writes to these location from other devices we do not see.
memchecker->reset(addr, size);
- masterPort.sendFunctional(pkt);
+ memSidePort.sendFunctional(pkt);
DPRINTF(MemCheckerMonitor,
"Forwarded functional access: addr = %#llx, size = %d\n",
// See above.
memchecker->reset(addr, size);
- slavePort.sendFunctionalSnoop(pkt);
+ cpuSidePort.sendFunctionalSnoop(pkt);
DPRINTF(MemCheckerMonitor,
"Received functional snoop: addr = %#llx, size = %d\n",
}
// Attempt to send the packet
- bool successful = masterPort.sendTimingReq(pkt);
+ bool successful = memSidePort.sendTimingReq(pkt);
// If not successful, restore the sender state
if (!successful && expects_response && (is_read || is_write)) {
// At the time where we push the sender-state, we do not yet know
// the serial the MemChecker class will assign to this request. We
// cannot call startRead at the time we push the sender-state, as
- // the masterPort may not be successful in executing sendTimingReq,
- // and in case of a failure, we must not modify the state of the
- // MemChecker.
+ // the memSidePort may not be successful in executing
+ // sendTimingReq, and in case of a failure, we must not
+ // modify the state of the MemChecker.
//
// Once we know that sendTimingReq was successful, we can set the
// serial of the newly constructed sender-state. This is legal, as
}
// Attempt to send the packet
- bool successful = slavePort.sendTimingResp(pkt);
+ bool successful = cpuSidePort.sendTimingResp(pkt);
// If packet successfully send, complete transaction in MemChecker
// instance, and delete sender state, otherwise restore state.
void
MemCheckerMonitor::recvTimingSnoopReq(PacketPtr pkt)
{
- slavePort.sendTimingSnoopReq(pkt);
+ cpuSidePort.sendTimingSnoopReq(pkt);
}
bool
MemCheckerMonitor::recvTimingSnoopResp(PacketPtr pkt)
{
- return masterPort.sendTimingSnoopResp(pkt);
+ return memSidePort.sendTimingSnoopResp(pkt);
}
bool
MemCheckerMonitor::isSnooping() const
{
- // check if the connected master port is snooping
- return slavePort.isSnooping();
+ // check if the connected memSidePort is snooping
+ return cpuSidePort.isSnooping();
}
AddrRangeList
MemCheckerMonitor::getAddrRanges() const
{
- // get the address ranges of the connected slave port
- return masterPort.getAddrRanges();
+ // get the address ranges of the connected cpuSidePort
+ return memSidePort.getAddrRanges();
}
void
MemCheckerMonitor::recvReqRetry()
{
- slavePort.sendRetryReq();
+ cpuSidePort.sendRetryReq();
}
void
MemCheckerMonitor::recvRespRetry()
{
- masterPort.sendRetryResp();
+ memSidePort.sendRetryResp();
}
void
MemCheckerMonitor::recvRangeChange()
{
- slavePort.sendRangeChange();
+ cpuSidePort.sendRangeChange();
}
};
/**
- * This is the master port of the communication monitor. All recv
+ * This is the request port of the communication monitor. All recv
* functions call a function in MemCheckerMonitor, where the
- * send function of the slave port is called. Besides this, these
+ * send function of the response port is called. Besides this, these
* functions can also perform actions for capturing statistics.
*/
- class MonitorMasterPort : public RequestPort
+ class MonitorRequestPort : public RequestPort
{
public:
- MonitorMasterPort(const std::string& _name, MemCheckerMonitor& _mon)
+ MonitorRequestPort(const std::string& _name, MemCheckerMonitor& _mon)
: RequestPort(_name, &_mon), mon(_mon)
{ }
};
- /** Instance of master port, facing the memory side */
- MonitorMasterPort masterPort;
+ /** Instance of request port, facing the memory side */
+ MonitorRequestPort memSidePort;
/**
- * This is the slave port of the communication monitor. All recv
+ * This is the response port of the communication monitor. All recv
* functions call a function in MemCheckerMonitor, where the
- * send function of the master port is called. Besides this, these
+ * send function of the request port is called. Besides this, these
* functions can also perform actions for capturing statistics.
*/
- class MonitorSlavePort : public ResponsePort
+ class MonitorResponsePort : public ResponsePort
{
public:
- MonitorSlavePort(const std::string& _name, MemCheckerMonitor& _mon)
+ MonitorResponsePort(const std::string& _name, MemCheckerMonitor& _mon)
: ResponsePort(_name, &_mon), mon(_mon)
{ }
};
- /** Instance of slave port, i.e. on the CPU side */
- MonitorSlavePort slavePort;
+ /** Instance of response port, i.e. on the CPU side */
+ MonitorResponsePort cpuSidePort;
void recvFunctional(PacketPtr pkt);
base_addr + pkt->getSize()) - addr;
stats.readPktSize[ceilLog2(size)]++;
stats.readBursts++;
- stats.masterReadAccesses[pkt->masterId()]++;
+ stats.requestorReadAccesses[pkt->requestorId()]++;
// First check write buffer to see if the data is already at
// the controller
readQueue[mem_pkt->qosValue()].push_back(mem_pkt);
// log packet
- logRequest(MemCtrl::READ, pkt->masterId(), pkt->qosValue(),
+ logRequest(MemCtrl::READ, pkt->requestorId(), pkt->qosValue(),
mem_pkt->addr, 1);
// Update stats
base_addr + pkt->getSize()) - addr;
stats.writePktSize[ceilLog2(size)]++;
stats.writeBursts++;
- stats.masterWriteAccesses[pkt->masterId()]++;
+ stats.requestorWriteAccesses[pkt->requestorId()]++;
// see if we can merge with an existing item in the write
// queue and keep track of whether we have merged or not
isInWriteQueue.insert(burstAlign(addr, is_dram));
// log packet
- logRequest(MemCtrl::WRITE, pkt->masterId(), pkt->qosValue(),
+ logRequest(MemCtrl::WRITE, pkt->requestorId(), pkt->qosValue(),
mem_pkt->addr, 1);
assert(totalWriteQueueSize == isInWriteQueue.size());
if (mem_pkt->burstHelper->burstsServiced ==
mem_pkt->burstHelper->burstCount) {
// we have now serviced all children packets of a system packet
- // so we can now respond to the requester
+ // so we can now respond to the requestor
// @todo we probably want to have a different front end and back
// end latency for split packets
accessAndRespond(mem_pkt->pkt, frontendLatency + backendLatency);
pkt->print());
}
- // turn packet around to go back to requester if response expected
+ // turn packet around to go back to requestor if response expected
if (needsResponse) {
// access already turned the packet into a response
assert(pkt->isResponse());
if (mem_pkt->isRead()) {
++readsThisTime;
// Update latency stats
- stats.masterReadTotalLat[mem_pkt->masterId()] +=
+ stats.requestorReadTotalLat[mem_pkt->requestorId()] +=
mem_pkt->readyTime - mem_pkt->entryTime;
- stats.masterReadBytes[mem_pkt->masterId()] += mem_pkt->size;
+ stats.requestorReadBytes[mem_pkt->requestorId()] += mem_pkt->size;
} else {
++writesThisTime;
- stats.masterWriteBytes[mem_pkt->masterId()] += mem_pkt->size;
- stats.masterWriteTotalLat[mem_pkt->masterId()] +=
+ stats.requestorWriteBytes[mem_pkt->requestorId()] += mem_pkt->size;
+ stats.requestorWriteTotalLat[mem_pkt->requestorId()] +=
mem_pkt->readyTime - mem_pkt->entryTime;
}
}
assert(mem_pkt->readyTime >= curTick());
// log the response
- logResponse(MemCtrl::READ, (*to_read)->masterId(),
+ logResponse(MemCtrl::READ, (*to_read)->requestorId(),
mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
mem_pkt->readyTime - mem_pkt->entryTime);
// Insert into response queue. It will be sent back to the
- // requester at its readyTime
+ // requestor at its readyTime
if (respQueue.empty()) {
assert(!respondEvent.scheduled());
schedule(respondEvent, mem_pkt->readyTime);
isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_pkt->isDram()));
// log the response
- logResponse(MemCtrl::WRITE, mem_pkt->masterId(),
+ logResponse(MemCtrl::WRITE, mem_pkt->requestorId(),
mem_pkt->qosValue(), mem_pkt->getAddr(), 1,
mem_pkt->readyTime - mem_pkt->entryTime);
ADD_STAT(totGap, "Total gap between requests"),
ADD_STAT(avgGap, "Average gap between requests"),
- ADD_STAT(masterReadBytes, "Per-master bytes read from memory"),
- ADD_STAT(masterWriteBytes, "Per-master bytes write to memory"),
- ADD_STAT(masterReadRate,
- "Per-master bytes read from memory rate (Bytes/sec)"),
- ADD_STAT(masterWriteRate,
- "Per-master bytes write to memory rate (Bytes/sec)"),
- ADD_STAT(masterReadAccesses,
- "Per-master read serviced memory accesses"),
- ADD_STAT(masterWriteAccesses,
- "Per-master write serviced memory accesses"),
- ADD_STAT(masterReadTotalLat,
- "Per-master read total memory access latency"),
- ADD_STAT(masterWriteTotalLat,
- "Per-master write total memory access latency"),
- ADD_STAT(masterReadAvgLat,
- "Per-master read average memory access latency"),
- ADD_STAT(masterWriteAvgLat,
- "Per-master write average memory access latency")
+ ADD_STAT(requestorReadBytes, "Per-requestor bytes read from memory"),
+ ADD_STAT(requestorWriteBytes, "Per-requestor bytes write to memory"),
+ ADD_STAT(requestorReadRate,
+ "Per-requestor bytes read from memory rate (Bytes/sec)"),
+ ADD_STAT(requestorWriteRate,
+ "Per-requestor bytes write to memory rate (Bytes/sec)"),
+ ADD_STAT(requestorReadAccesses,
+ "Per-requestor read serviced memory accesses"),
+ ADD_STAT(requestorWriteAccesses,
+ "Per-requestor write serviced memory accesses"),
+ ADD_STAT(requestorReadTotalLat,
+ "Per-requestor read total memory access latency"),
+ ADD_STAT(requestorWriteTotalLat,
+ "Per-requestor write total memory access latency"),
+ ADD_STAT(requestorReadAvgLat,
+ "Per-requestor read average memory access latency"),
+ ADD_STAT(requestorWriteAvgLat,
+ "Per-requestor write average memory access latency")
{
}
using namespace Stats;
assert(ctrl.system());
- const auto max_masters = ctrl.system()->maxMasters();
+ const auto max_requestors = ctrl.system()->maxRequestors();
avgRdQLen.precision(2);
avgWrQLen.precision(2);
avgWrBWSys.precision(2);
avgGap.precision(2);
- // per-master bytes read and written to memory
- masterReadBytes
- .init(max_masters)
+ // per-requestor bytes read and written to memory
+ requestorReadBytes
+ .init(max_requestors)
.flags(nozero | nonan);
- masterWriteBytes
- .init(max_masters)
+ requestorWriteBytes
+ .init(max_requestors)
.flags(nozero | nonan);
- // per-master bytes read and written to memory rate
- masterReadRate
+ // per-requestor bytes read and written to memory rate
+ requestorReadRate
.flags(nozero | nonan)
.precision(12);
- masterReadAccesses
- .init(max_masters)
+ requestorReadAccesses
+ .init(max_requestors)
.flags(nozero);
- masterWriteAccesses
- .init(max_masters)
+ requestorWriteAccesses
+ .init(max_requestors)
.flags(nozero);
- masterReadTotalLat
- .init(max_masters)
+ requestorReadTotalLat
+ .init(max_requestors)
.flags(nozero | nonan);
- masterReadAvgLat
+ requestorReadAvgLat
.flags(nonan)
.precision(2);
- masterWriteRate
+ requestorWriteRate
.flags(nozero | nonan)
.precision(12);
- masterWriteTotalLat
- .init(max_masters)
+ requestorWriteTotalLat
+ .init(max_requestors)
.flags(nozero | nonan);
- masterWriteAvgLat
+ requestorWriteAvgLat
.flags(nonan)
.precision(2);
- for (int i = 0; i < max_masters; i++) {
- const std::string master = ctrl.system()->getMasterName(i);
- masterReadBytes.subname(i, master);
- masterReadRate.subname(i, master);
- masterWriteBytes.subname(i, master);
- masterWriteRate.subname(i, master);
- masterReadAccesses.subname(i, master);
- masterWriteAccesses.subname(i, master);
- masterReadTotalLat.subname(i, master);
- masterReadAvgLat.subname(i, master);
- masterWriteTotalLat.subname(i, master);
- masterWriteAvgLat.subname(i, master);
+ for (int i = 0; i < max_requestors; i++) {
+ const std::string requestor = ctrl.system()->getRequestorName(i);
+ requestorReadBytes.subname(i, requestor);
+ requestorReadRate.subname(i, requestor);
+ requestorWriteBytes.subname(i, requestor);
+ requestorWriteRate.subname(i, requestor);
+ requestorReadAccesses.subname(i, requestor);
+ requestorWriteAccesses.subname(i, requestor);
+ requestorReadTotalLat.subname(i, requestor);
+ requestorReadAvgLat.subname(i, requestor);
+ requestorWriteTotalLat.subname(i, requestor);
+ requestorWriteAvgLat.subname(i, requestor);
}
// Formula stats
avgGap = totGap / (readReqs + writeReqs);
- masterReadRate = masterReadBytes / simSeconds;
- masterWriteRate = masterWriteBytes / simSeconds;
- masterReadAvgLat = masterReadTotalLat / masterReadAccesses;
- masterWriteAvgLat = masterWriteTotalLat / masterWriteAccesses;
+ requestorReadRate = requestorReadBytes / simSeconds;
+ requestorWriteRate = requestorWriteBytes / simSeconds;
+ requestorReadAvgLat = requestorReadTotalLat / requestorReadAccesses;
+ requestorWriteAvgLat = requestorWriteTotalLat / requestorWriteAccesses;
}
void
}
MemCtrl::MemoryPort::MemoryPort(const std::string& name, MemCtrl& _ctrl)
- : QueuedSlavePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
+ : QueuedResponsePort(name, &_ctrl, queue), queue(_ctrl, *this, true),
ctrl(_ctrl)
{ }
/** This comes from the outside world */
const PacketPtr pkt;
- /** MasterID associated with the packet */
- const MasterID _masterId;
+ /** RequestorID associated with the packet */
+ const RequestorID _requestorId;
const bool read;
inline uint8_t qosValue() const { return _qosValue; }
/**
- * Get the packet MasterID
+ * Get the packet RequestorID
* (interface compatibility with Packet)
*/
- inline MasterID masterId() const { return _masterId; }
+ inline RequestorID requestorId() const { return _requestorId; }
/**
* Get the packet size
uint8_t _bank, uint32_t _row, uint16_t bank_id, Addr _addr,
unsigned int _size)
: entryTime(curTick()), readyTime(curTick()), pkt(_pkt),
- _masterId(pkt->masterId()),
+ _requestorId(pkt->requestorId()),
read(is_read), dram(is_dram), rank(_rank), bank(_bank), row(_row),
bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL),
_qosValue(_pkt->qosValue())
{
private:
- // For now, make use of a queued slave port to avoid dealing with
+ // For now, make use of a queued response port to avoid dealing with
// flow control for the responses being sent back
- class MemoryPort : public QueuedSlavePort
+ class MemoryPort : public QueuedResponsePort
{
RespPacketQueue queue;
Stats::Scalar totGap;
Stats::Formula avgGap;
- // per-master bytes read and written to memory
- Stats::Vector masterReadBytes;
- Stats::Vector masterWriteBytes;
+ // per-requestor bytes read and written to memory
+ Stats::Vector requestorReadBytes;
+ Stats::Vector requestorWriteBytes;
- // per-master bytes read and written to memory rate
- Stats::Formula masterReadRate;
- Stats::Formula masterWriteRate;
+ // per-requestor bytes read and written to memory rate
+ Stats::Formula requestorReadRate;
+ Stats::Formula requestorWriteRate;
- // per-master read and write serviced memory accesses
- Stats::Vector masterReadAccesses;
- Stats::Vector masterWriteAccesses;
+ // per-requestor read and write serviced memory accesses
+ Stats::Vector requestorReadAccesses;
+ Stats::Vector requestorWriteAccesses;
- // per-master read and write total memory access latency
- Stats::Vector masterReadTotalLat;
- Stats::Vector masterWriteTotalLat;
+ // per-requestor read and write total memory access latency
+ Stats::Vector requestorReadTotalLat;
+ Stats::Vector requestorWriteTotalLat;
- // per-master raed and write average memory access latency
- Stats::Formula masterReadAvgLat;
- Stats::Formula masterWriteAvgLat;
+ // per-requestor raed and write average memory access latency
+ Stats::Formula requestorReadAvgLat;
+ Stats::Formula requestorWriteAvgLat;
};
CtrlStats stats;
MemDelay::MemDelay(const MemDelayParams *p)
: ClockedObject(p),
- masterPort(name() + "-master", *this),
- slavePort(name() + "-slave", *this),
- reqQueue(*this, masterPort),
- respQueue(*this, slavePort),
- snoopRespQueue(*this, masterPort)
+ requestPort(name() + "-mem_side_port", *this),
+ responsePort(name() + "-cpu_side_port", *this),
+ reqQueue(*this, requestPort),
+ respQueue(*this, responsePort),
+ snoopRespQueue(*this, requestPort)
{
}
void
MemDelay::init()
{
- if (!slavePort.isConnected() || !masterPort.isConnected())
+ if (!responsePort.isConnected() || !requestPort.isConnected())
fatal("Memory delay is not connected on both sides.\n");
}
Port &
MemDelay::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "master") {
- return masterPort;
- } else if (if_name == "slave") {
- return slavePort;
+ if (if_name == "mem_side_port") {
+ return requestPort;
+ } else if (if_name == "cpu_side_port") {
+ return responsePort;
} else {
return ClockedObject::getPort(if_name, idx);
}
bool
MemDelay::trySatisfyFunctional(PacketPtr pkt)
{
- return slavePort.trySatisfyFunctional(pkt) ||
- masterPort.trySatisfyFunctional(pkt);
+ return responsePort.trySatisfyFunctional(pkt) ||
+ requestPort.trySatisfyFunctional(pkt);
}
MemDelay::RequestPort::RequestPort(const std::string &_name, MemDelay &_parent)
- : QueuedMasterPort(_name, &_parent,
+ : QueuedRequestPort(_name, &_parent,
_parent.reqQueue, _parent.snoopRespQueue),
parent(_parent)
{
const Tick when = curTick() + parent.delayResp(pkt) + receive_delay;
- parent.slavePort.schedTimingResp(pkt, when);
+ parent.responsePort.schedTimingResp(pkt, when);
return true;
}
if (parent.trySatisfyFunctional(pkt)) {
pkt->makeResponse();
} else {
- parent.slavePort.sendFunctionalSnoop(pkt);
+ parent.responsePort.sendFunctionalSnoop(pkt);
}
}
{
const Tick delay = parent.delaySnoopResp(pkt);
- return delay + parent.slavePort.sendAtomicSnoop(pkt);
+ return delay + parent.responsePort.sendAtomicSnoop(pkt);
}
void
MemDelay::RequestPort::recvTimingSnoopReq(PacketPtr pkt)
{
- parent.slavePort.sendTimingSnoopReq(pkt);
+ parent.responsePort.sendTimingSnoopReq(pkt);
}
MemDelay::ResponsePort::
ResponsePort(const std::string &_name, MemDelay &_parent)
- : QueuedSlavePort(_name, &_parent, _parent.respQueue),
+ : QueuedResponsePort(_name, &_parent, _parent.respQueue),
parent(_parent)
{
}
{
const Tick delay = parent.delayReq(pkt) + parent.delayResp(pkt);
- return delay + parent.masterPort.sendAtomic(pkt);
+ return delay + parent.requestPort.sendAtomic(pkt);
}
bool
const Tick when = curTick() + parent.delayReq(pkt) + receive_delay;
- parent.masterPort.schedTimingReq(pkt, when);
+ parent.requestPort.schedTimingReq(pkt, when);
return true;
}
if (parent.trySatisfyFunctional(pkt)) {
pkt->makeResponse();
} else {
- parent.masterPort.sendFunctional(pkt);
+ parent.requestPort.sendFunctional(pkt);
}
}
{
const Tick when = curTick() + parent.delaySnoopResp(pkt);
- parent.masterPort.schedTimingSnoopResp(pkt, when);
+ parent.requestPort.schedTimingSnoopResp(pkt, when);
return true;
}
Port &getPort(const std::string &if_name,
PortID idx=InvalidPortID) override;
- class RequestPort : public QueuedMasterPort
+ class RequestPort : public QueuedRequestPort
{
public:
RequestPort(const std::string &_name, MemDelay &_parent);
void recvTimingSnoopReq(PacketPtr pkt) override;
void recvRangeChange() override {
- parent.slavePort.sendRangeChange();
+ parent.responsePort.sendRangeChange();
}
bool isSnooping() const override {
- return parent.slavePort.isSnooping();
+ return parent.responsePort.isSnooping();
}
private:
MemDelay& parent;
};
- class ResponsePort : public QueuedSlavePort
+ class ResponsePort : public QueuedResponsePort
{
public:
ResponsePort(const std::string &_name, MemDelay &_parent);
bool recvTimingSnoopResp(PacketPtr pkt) override;
AddrRangeList getAddrRanges() const override {
- return parent.masterPort.getAddrRanges();
+ return parent.requestPort.getAddrRanges();
}
bool tryTiming(PacketPtr pkt) override { return true; }
bool trySatisfyFunctional(PacketPtr pkt);
- RequestPort masterPort;
- ResponsePort slavePort;
+ RequestPort requestPort;
+ ResponsePort responsePort;
ReqPacketQueue reqQueue;
RespPacketQueue respQueue;
}
}
-
bool
NVMInterface::burstReady(MemPacket* pkt) const {
bool read_rdy = pkt->isRead() && (ctrl->inReadBusState(true)) &&
{
public:
-
static const uint32_t NO_ROW = -1;
uint32_t openRow;
const Tick wrToRdDlySameBG;
const Tick rdToWrDlySameBG;
+
Enums::PageManage pageMgmt;
/**
* Max column accesses (read and write) per row, before forefully
Stats::Histogram pendingWrites;
Stats::Histogram bytesPerBank;
};
-
NVMStats stats;
void processWriteRespondEvent();
+++ /dev/null
-/*
- * Copyright (c) 2018 ARM Limited
- * All rights reserved
- *
- * The license below extends only to copyright in the software and shall
- * not be construed as granting a license to any other intellectual
- * property including but not limited to intellectual property relating
- * to a hardware implementation of the functionality of the software
- * licensed hereunder. You may use the software subject to the license
- * terms below provided that you ensure that this notice is replicated
- * unmodified and in its entirety in all distributions of the software,
- * modified or unmodified, in source code or in binary form.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/**
- * @file
- * MasterInfo declaration.
- */
-
-#ifndef __MEM_MEM_MASTER_HH__
-#define __MEM_MEM_MASTER_HH__
-
-#include "mem/request.hh"
-#include "sim/sim_object.hh"
-
-/**
- * The MasterInfo class contains data about a specific master.
- */
-struct MasterInfo
-{
- MasterInfo(const SimObject* _obj,
- std::string master_name,
- MasterID master_id)
- : obj(_obj), masterName(master_name), masterId(master_id)
- {}
-
- /** SimObject related to the Master */
- const SimObject* obj;
-
- /** Master Name */
- std::string masterName;
-
- /** Master ID */
- MasterID masterId;
-};
-
-#endif // __MEM_MEM_MASTER_HH__
--- /dev/null
+/*
+ * Copyright (c) 2018 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file
+ * RequestorInfo declaration.
+ */
+
+#ifndef __MEM_MEM_REQUESTOR_HH__
+#define __MEM_MEM_REQUESTOR_HH__
+
+#include "mem/request.hh"
+#include "sim/sim_object.hh"
+
+/**
+ * The RequestorInfo class contains data about a specific requestor.
+ */
+struct RequestorInfo
+{
+ RequestorInfo(const SimObject* _obj,
+ std::string requestor_name,
+ RequestorID requestor_id)
+ : obj(_obj), req_name(requestor_name), id(requestor_id)
+ {}
+
+ /** SimObject related to the Requestor */
+ const SimObject* obj;
+
+ /** Requestor Name */
+ std::string req_name;
+
+ /** Requestor id */
+ RequestorID id;
+};
+
+#endif // __MEM_MEM_REQUESTOR_HH__
NoncoherentXBar::NoncoherentXBar(const NoncoherentXBarParams *p)
: BaseXBar(p)
{
- // create the ports based on the size of the master and slave
- // vector ports, and the presence of the default port, the ports
- // are enumerated starting from zero
- for (int i = 0; i < p->port_master_connection_count; ++i) {
- std::string portName = csprintf("%s.master[%d]", name(), i);
- RequestPort* bp = new NoncoherentXBarMasterPort(portName, *this, i);
- masterPorts.push_back(bp);
+ // create the ports based on the size of the memory-side port and
+ // CPU-side port vector ports, and the presence of the default port,
+ // the ports are enumerated starting from zero
+ for (int i = 0; i < p->port_mem_side_ports_connection_count; ++i) {
+ std::string portName = csprintf("%s.mem_side_port[%d]", name(), i);
+ RequestPort* bp = new NoncoherentXBarRequestPort(portName, *this, i);
+ memSidePorts.push_back(bp);
reqLayers.push_back(new ReqLayer(*bp, *this,
csprintf("reqLayer%d", i)));
}
- // see if we have a default slave device connected and if so add
- // our corresponding master port
+ // see if we have a default CPU-side-port device connected and if so add
+ // our corresponding memory-side port
if (p->port_default_connection_count) {
- defaultPortID = masterPorts.size();
+ defaultPortID = memSidePorts.size();
std::string portName = name() + ".default";
- RequestPort* bp = new NoncoherentXBarMasterPort(portName, *this,
+ RequestPort* bp = new NoncoherentXBarRequestPort(portName, *this,
defaultPortID);
- masterPorts.push_back(bp);
+ memSidePorts.push_back(bp);
reqLayers.push_back(new ReqLayer(*bp, *this, csprintf("reqLayer%d",
defaultPortID)));
}
- // create the slave ports, once again starting at zero
- for (int i = 0; i < p->port_slave_connection_count; ++i) {
- std::string portName = csprintf("%s.slave[%d]", name(), i);
- QueuedSlavePort* bp = new NoncoherentXBarSlavePort(portName, *this, i);
- slavePorts.push_back(bp);
+ // create the CPU-side ports, once again starting at zero
+ for (int i = 0; i < p->port_cpu_side_ports_connection_count; ++i) {
+ std::string portName = csprintf("%s.cpu_side_ports[%d]", name(), i);
+ QueuedResponsePort* bp = new NoncoherentXBarResponsePort(portName,
+ *this, i);
+ cpuSidePorts.push_back(bp);
respLayers.push_back(new RespLayer(*bp, *this,
csprintf("respLayer%d", i)));
}
}
bool
-NoncoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id)
+NoncoherentXBar::recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id)
{
// determine the source port based on the id
- ResponsePort *src_port = slavePorts[slave_port_id];
+ ResponsePort *src_port = cpuSidePorts[cpu_side_port_id];
// we should never see express snoops on a non-coherent crossbar
assert(!pkt->isExpressSnoop());
// determine the destination based on the address
- PortID master_port_id = findPort(pkt->getAddrRange());
+ PortID mem_side_port_id = findPort(pkt->getAddrRange());
// test if the layer should be considered occupied for the current
// port
- if (!reqLayers[master_port_id]->tryTiming(src_port)) {
+ if (!reqLayers[mem_side_port_id]->tryTiming(src_port)) {
DPRINTF(NoncoherentXBar, "recvTimingReq: src %s %s 0x%x BUSY\n",
src_port->name(), pkt->cmdString(), pkt->getAddr());
return false;
!pkt->cacheResponding();
// since it is a normal request, attempt to send the packet
- bool success = masterPorts[master_port_id]->sendTimingReq(pkt);
+ bool success = memSidePorts[mem_side_port_id]->sendTimingReq(pkt);
if (!success) {
DPRINTF(NoncoherentXBar, "recvTimingReq: src %s %s 0x%x RETRY\n",
pkt->headerDelay = old_header_delay;
// occupy until the header is sent
- reqLayers[master_port_id]->failedTiming(src_port,
+ reqLayers[mem_side_port_id]->failedTiming(src_port,
clockEdge(Cycles(1)));
return false;
// remember where to route the response to
if (expect_response) {
assert(routeTo.find(pkt->req) == routeTo.end());
- routeTo[pkt->req] = slave_port_id;
+ routeTo[pkt->req] = cpu_side_port_id;
}
- reqLayers[master_port_id]->succeededTiming(packetFinishTime);
+ reqLayers[mem_side_port_id]->succeededTiming(packetFinishTime);
// stats updates
- pktCount[slave_port_id][master_port_id]++;
- pktSize[slave_port_id][master_port_id] += pkt_size;
+ pktCount[cpu_side_port_id][mem_side_port_id]++;
+ pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
transDist[pkt_cmd]++;
return true;
}
bool
-NoncoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id)
+NoncoherentXBar::recvTimingResp(PacketPtr pkt, PortID mem_side_port_id)
{
// determine the source port based on the id
- RequestPort *src_port = masterPorts[master_port_id];
+ RequestPort *src_port = memSidePorts[mem_side_port_id];
// determine the destination
const auto route_lookup = routeTo.find(pkt->req);
assert(route_lookup != routeTo.end());
- const PortID slave_port_id = route_lookup->second;
- assert(slave_port_id != InvalidPortID);
- assert(slave_port_id < respLayers.size());
+ const PortID cpu_side_port_id = route_lookup->second;
+ assert(cpu_side_port_id != InvalidPortID);
+ assert(cpu_side_port_id < respLayers.size());
// test if the layer should be considered occupied for the current
// port
- if (!respLayers[slave_port_id]->tryTiming(src_port)) {
+ if (!respLayers[cpu_side_port_id]->tryTiming(src_port)) {
DPRINTF(NoncoherentXBar, "recvTimingResp: src %s %s 0x%x BUSY\n",
src_port->name(), pkt->cmdString(), pkt->getAddr());
return false;
// determine how long to be crossbar layer is busy
Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay;
- // send the packet through the destination slave port, and pay for
+ // send the packet through the destination CPU-side port, and pay for
// any outstanding latency
Tick latency = pkt->headerDelay;
pkt->headerDelay = 0;
- slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency);
+ cpuSidePorts[cpu_side_port_id]->schedTimingResp(pkt,
+ curTick() + latency);
// remove the request from the routing table
routeTo.erase(route_lookup);
- respLayers[slave_port_id]->succeededTiming(packetFinishTime);
+ respLayers[cpu_side_port_id]->succeededTiming(packetFinishTime);
// stats updates
- pktCount[slave_port_id][master_port_id]++;
- pktSize[slave_port_id][master_port_id] += pkt_size;
+ pktCount[cpu_side_port_id][mem_side_port_id]++;
+ pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
transDist[pkt_cmd]++;
return true;
}
void
-NoncoherentXBar::recvReqRetry(PortID master_port_id)
+NoncoherentXBar::recvReqRetry(PortID mem_side_port_id)
{
// responses never block on forwarding them, so the retry will
// always be coming from a port to which we tried to forward a
// request
- reqLayers[master_port_id]->recvRetry();
+ reqLayers[mem_side_port_id]->recvRetry();
}
Tick
-NoncoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
+NoncoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id,
MemBackdoorPtr *backdoor)
{
DPRINTF(NoncoherentXBar, "recvAtomic: packet src %s addr 0x%x cmd %s\n",
- slavePorts[slave_port_id]->name(), pkt->getAddr(),
+ cpuSidePorts[cpu_side_port_id]->name(), pkt->getAddr(),
pkt->cmdString());
unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0;
unsigned int pkt_cmd = pkt->cmdToIndex();
// determine the destination port
- PortID master_port_id = findPort(pkt->getAddrRange());
+ PortID mem_side_port_id = findPort(pkt->getAddrRange());
// stats updates for the request
- pktCount[slave_port_id][master_port_id]++;
- pktSize[slave_port_id][master_port_id] += pkt_size;
+ pktCount[cpu_side_port_id][mem_side_port_id]++;
+ pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
transDist[pkt_cmd]++;
// forward the request to the appropriate destination
- auto master = masterPorts[master_port_id];
+ auto mem_side_port = memSidePorts[mem_side_port_id];
Tick response_latency = backdoor ?
- master->sendAtomicBackdoor(pkt, *backdoor) : master->sendAtomic(pkt);
+ mem_side_port->sendAtomicBackdoor(pkt, *backdoor) :
+ mem_side_port->sendAtomic(pkt);
// add the response data
if (pkt->isResponse()) {
pkt_cmd = pkt->cmdToIndex();
// stats updates
- pktCount[slave_port_id][master_port_id]++;
- pktSize[slave_port_id][master_port_id] += pkt_size;
+ pktCount[cpu_side_port_id][mem_side_port_id]++;
+ pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size;
transDist[pkt_cmd]++;
}
}
void
-NoncoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id)
+NoncoherentXBar::recvFunctional(PacketPtr pkt, PortID cpu_side_port_id)
{
if (!pkt->isPrint()) {
// don't do DPRINTFs on PrintReq as it clutters up the output
DPRINTF(NoncoherentXBar,
"recvFunctional: packet src %s addr 0x%x cmd %s\n",
- slavePorts[slave_port_id]->name(), pkt->getAddr(),
+ cpuSidePorts[cpu_side_port_id]->name(), pkt->getAddr(),
pkt->cmdString());
}
- // since our slave ports are queued ports we need to check them as well
- for (const auto& p : slavePorts) {
+ // since our CPU-side ports are queued ports we need to check them as well
+ for (const auto& p : cpuSidePorts) {
// if we find a response that has the data, then the
// downstream caches/memories may be out of date, so simply stop
// here
PortID dest_id = findPort(pkt->getAddrRange());
// forward the request to the appropriate destination
- masterPorts[dest_id]->sendFunctional(pkt);
+ memSidePorts[dest_id]->sendFunctional(pkt);
}
NoncoherentXBar*
#include "params/NoncoherentXBar.hh"
/**
- * A non-coherent crossbar connects a number of non-snooping masters
- * and slaves, and routes the request and response packets based on
- * the address. The request packets issued by the master connected to
+ * A non-coherent crossbar connects a number of non-snooping memory-side ports
+ * and cpu_sides, and routes the request and response packets based on
+ * the address. The request packets issued by the memory-side port connected to
* a non-coherent crossbar could still snoop in caches attached to a
* coherent crossbar, as is the case with the I/O bus and memory bus
* in most system configurations. No snoops will, however, reach any
- * master on the non-coherent crossbar itself.
+ * memory-side port on the non-coherent crossbar itself.
*
* The non-coherent crossbar can be used as a template for modelling
* PCIe, and non-coherent AMBA and OCP buses, and is typically used
std::vector<RespLayer*> respLayers;
/**
- * Declaration of the non-coherent crossbar slave port type, one
- * will be instantiated for each of the master ports connecting to
+ * Declaration of the non-coherent crossbar CPU-side port type, one
+ * will be instantiated for each of the memory-side ports connecting to
* the crossbar.
*/
- class NoncoherentXBarSlavePort : public QueuedSlavePort
+ class NoncoherentXBarResponsePort : public QueuedResponsePort
{
private:
public:
- NoncoherentXBarSlavePort(const std::string &_name,
+ NoncoherentXBarResponsePort(const std::string &_name,
NoncoherentXBar &_xbar, PortID _id)
- : QueuedSlavePort(_name, &_xbar, queue, _id), xbar(_xbar),
+ : QueuedResponsePort(_name, &_xbar, queue, _id), xbar(_xbar),
queue(_xbar, *this)
{ }
};
/**
- * Declaration of the crossbar master port type, one will be
- * instantiated for each of the slave ports connecting to the
+ * Declaration of the crossbar memory-side port type, one will be
+ * instantiated for each of the CPU-side ports connecting to the
* crossbar.
*/
- class NoncoherentXBarMasterPort : public RequestPort
+ class NoncoherentXBarRequestPort : public RequestPort
{
private:
public:
- NoncoherentXBarMasterPort(const std::string &_name,
+ NoncoherentXBarRequestPort(const std::string &_name,
NoncoherentXBar &_xbar, PortID _id)
: RequestPort(_name, &_xbar, _id), xbar(_xbar)
{ }
}
};
- virtual bool recvTimingReq(PacketPtr pkt, PortID slave_port_id);
- virtual bool recvTimingResp(PacketPtr pkt, PortID master_port_id);
- void recvReqRetry(PortID master_port_id);
- Tick recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id,
+ virtual bool recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id);
+ virtual bool recvTimingResp(PacketPtr pkt, PortID mem_side_port_id);
+ void recvReqRetry(PortID mem_side_port_id);
+ Tick recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id,
MemBackdoorPtr *backdoor=nullptr);
- void recvFunctional(PacketPtr pkt, PortID slave_port_id);
+ void recvFunctional(PacketPtr pkt, PortID cpu_side_port_id);
public:
/**
* A Packet is used to encapsulate a transfer between two objects in
* the memory system (e.g., the L1 and L2 cache). (In contrast, a
- * single Request travels all the way from the requester to the
+ * single Request travels all the way from the requestor to the
* ultimate destination and back, possibly being conveyed by several
* different Packets along the way.)
*/
inline void qosValue(const uint8_t qos_value)
{ _qosValue = qos_value; }
- inline MasterID masterId() const { return req->masterId(); }
+ inline RequestorID requestorId() const { return req->requestorId(); }
// Network error conditions... encapsulate them as methods since
// their encoding keeps changing (from result field to command
}
}
-ReqPacketQueue::ReqPacketQueue(EventManager& _em, RequestPort& _masterPort,
+ReqPacketQueue::ReqPacketQueue(EventManager& _em, RequestPort& _mem_side_port,
const std::string _label)
- : PacketQueue(_em, _label, name(_masterPort, _label)),
- masterPort(_masterPort)
+ : PacketQueue(_em, _label, name(_mem_side_port, _label)),
+ memSidePort(_mem_side_port)
{
}
bool
ReqPacketQueue::sendTiming(PacketPtr pkt)
{
- return masterPort.sendTimingReq(pkt);
+ return memSidePort.sendTimingReq(pkt);
}
SnoopRespPacketQueue::SnoopRespPacketQueue(EventManager& _em,
- RequestPort& _masterPort,
+ RequestPort& _mem_side_port,
bool force_order,
const std::string _label)
- : PacketQueue(_em, _label, name(_masterPort, _label), force_order),
- masterPort(_masterPort)
+ : PacketQueue(_em, _label, name(_mem_side_port, _label), force_order),
+ memSidePort(_mem_side_port)
{
}
bool
SnoopRespPacketQueue::sendTiming(PacketPtr pkt)
{
- return masterPort.sendTimingSnoopResp(pkt);
+ return memSidePort.sendTimingSnoopResp(pkt);
}
-RespPacketQueue::RespPacketQueue(EventManager& _em, ResponsePort& _slavePort,
+RespPacketQueue::RespPacketQueue(EventManager& _em,
+ ResponsePort& _cpu_side_port,
bool force_order,
const std::string _label)
- : PacketQueue(_em, _label, name(_slavePort, _label), force_order),
- slavePort(_slavePort)
+ : PacketQueue(_em, _label, name(_cpu_side_port, _label), force_order),
+ cpuSidePort(_cpu_side_port)
{
}
bool
RespPacketQueue::sendTiming(PacketPtr pkt)
{
- return slavePort.sendTimingResp(pkt);
+ return cpuSidePort.sendTimingResp(pkt);
}
/**
* A packet queue is a class that holds deferred packets and later
- * sends them using the associated slave port or master port.
+ * sends them using the associated CPU-side port or memory-side port.
*/
class PacketQueue : public Drainable
{
/**
* Send a packet using the appropriate method for the specific
- * subclass (reuest, response or snoop response).
+ * subclass (request, response or snoop response).
*/
virtual bool sendTiming(PacketPtr pkt) = 0;
protected:
- RequestPort& masterPort;
+ RequestPort& memSidePort;
// Static definition so it can be called when constructing the parent
// without us being completely initialized.
- static const std::string name(const RequestPort& masterPort,
+ static const std::string name(const RequestPort& memSidePort,
const std::string& label)
- { return masterPort.name() + "-" + label; }
+ { return memSidePort.name() + "-" + label; }
public:
/**
* Create a request packet queue, linked to an event manager, a
- * master port, and a label that will be used for functional print
+ * memory-side port, and a label that will be used for functional print
* request packets.
*
* @param _em Event manager used for scheduling this queue
- * @param _masterPort Master port used to send the packets
+ * @param _mem_side_port Mem_side port used to send the packets
* @param _label Label to push on the label stack for print request packets
*/
- ReqPacketQueue(EventManager& _em, RequestPort& _masterPort,
+ ReqPacketQueue(EventManager& _em, RequestPort& _mem_side_port,
const std::string _label = "ReqPacketQueue");
virtual ~ReqPacketQueue() { }
const std::string name() const
- { return name(masterPort, label); }
+ { return name(memSidePort, label); }
bool sendTiming(PacketPtr pkt);
protected:
- RequestPort& masterPort;
+ RequestPort& memSidePort;
// Static definition so it can be called when constructing the parent
// without us being completely initialized.
- static const std::string name(const RequestPort& masterPort,
+ static const std::string name(const RequestPort& memSidePort,
const std::string& label)
- { return masterPort.name() + "-" + label; }
+ { return memSidePort.name() + "-" + label; }
public:
/**
* Create a snoop response packet queue, linked to an event
- * manager, a master port, and a label that will be used for
+ * manager, a memory-side port, and a label that will be used for
* functional print request packets.
*
* @param _em Event manager used for scheduling this queue
- * @param _masterPort Master port used to send the packets
+ * @param _mem_side_port memory-side port used to send the packets
* @param force_order Force insertion order for packets with same address
* @param _label Label to push on the label stack for print request packets
*/
- SnoopRespPacketQueue(EventManager& _em, RequestPort& _masterPort,
+ SnoopRespPacketQueue(EventManager& _em, RequestPort& _mem_side_port,
bool force_order = false,
const std::string _label = "SnoopRespPacketQueue");
virtual ~SnoopRespPacketQueue() { }
const std::string name() const
- { return name(masterPort, label); }
+ { return name(memSidePort, label); }
bool sendTiming(PacketPtr pkt);
protected:
- ResponsePort& slavePort;
+ ResponsePort& cpuSidePort;
// Static definition so it can be called when constructing the parent
// without us being completely initialized.
- static const std::string name(const ResponsePort& slavePort,
+ static const std::string name(const ResponsePort& cpuSidePort,
const std::string& label)
- { return slavePort.name() + "-" + label; }
+ { return cpuSidePort.name() + "-" + label; }
public:
/**
* Create a response packet queue, linked to an event manager, a
- * slave port, and a label that will be used for functional print
+ * CPU-side port, and a label that will be used for functional print
* request packets.
*
* @param _em Event manager used for scheduling this queue
- * @param _slavePort Slave port used to send the packets
+ * @param _cpu_side_port Cpu_side port used to send the packets
* @param force_order Force insertion order for packets with same address
* @param _label Label to push on the label stack for print request packets
*/
- RespPacketQueue(EventManager& _em, ResponsePort& _slavePort,
+ RespPacketQueue(EventManager& _em, ResponsePort& _cpu_side_port,
bool force_order = false,
const std::string _label = "RespPacketQueue");
virtual ~RespPacketQueue() { }
const std::string name() const
- { return name(slavePort, label); }
+ { return name(cpuSidePort, label); }
bool sendTiming(PacketPtr pkt);
RequestPort::printAddr(Addr a)
{
auto req = std::make_shared<Request>(
- a, 1, 0, Request::funcMasterId);
+ a, 1, 0, Request::funcRequestorId);
Packet pkt(req, MemCmd::PrintReq);
Packet::PrintReqState prs(std::cerr);
gen.next()) {
auto req = std::make_shared<Request>(
- gen.addr(), gen.size(), flags, Request::funcMasterId);
+ gen.addr(), gen.size(), flags, Request::funcRequestorId);
Packet pkt(req, MemCmd::ReadReq);
pkt.dataStatic(static_cast<uint8_t *>(p));
gen.next()) {
auto req = std::make_shared<Request>(
- gen.addr(), gen.size(), flags, Request::funcMasterId);
+ gen.addr(), gen.size(), flags, Request::funcRequestorId);
Packet pkt(req, MemCmd::WriteReq);
pkt.dataStaticConst(static_cast<const uint8_t *>(p));
# packet trace output file, disabled by default
trace_file = Param.String("", "Packet trace output file")
- # System object to look up the name associated with a master ID
+ # System object to look up the name associated with a requestor ID
system = Param.System(Parent.any, "System the probe belongs to")
header_msg.set_obj_id(name());
header_msg.set_tick_freq(SimClock::Frequency);
- for (int i = 0; i < system->maxMasters(); i++) {
+ for (int i = 0; i < system->maxRequestors(); i++) {
auto id_string = header_msg.add_id_strings();
id_string->set_key(i);
- id_string->set_value(system->getMasterName(i));
+ id_string->set_value(system->getRequestorName(i));
}
traceStream->write(header_msg);
pkt_msg.set_size(pkt_info.size);
if (withPC && pkt_info.pc != 0)
pkt_msg.set_pc(pkt_info.pc);
- pkt_msg.set_pkt_id(pkt_info.master);
+ pkt_msg.set_pkt_id(pkt_info.id);
traceStream->write(pkt_msg);
}
"Memory Controller Requests same-QoS selection policy")
# flag to select QoS syncronised scheduling
- # (calls the scheduler on all masters at every packet arrival)
+ # (calls the scheduler on all requestors at every packet arrival)
qos_syncro_scheduler = Param.Bool(False,
"Enables QoS syncronized scheduling")
qos_priority_escalation = Param.Bool(False,
"Enables QoS priority escalation")
- # Master ID to be mapped to service parameters in QoS schedulers
- qos_masters = VectorParam.String(['']* 16,
- "Master Names to be mapped to service parameters in QoS scheduler")
+ # Requestor ID to be mapped to service parameters in QoS schedulers
+ qos_requestors = VectorParam.String(['']* 16,
+ "Requestor Names to be mapped to service parameters in QoS scheduler")
+ qos_masters = DeprecatedParam(qos_requestors,
+ '`qos_master` is now called `qos_requestors`')
cxx_class = 'QoS::FixedPriorityPolicy'
cxx_exports = [
- PyBindMethod('initMasterName'),
- PyBindMethod('initMasterObj'),
+ PyBindMethod('initRequestorName'),
+ PyBindMethod('initRequestorObj'),
]
- _mpriorities = None
+ _requestor_priorities = None
- def setMasterPriority(self, master, priority):
- if not self._mpriorities:
- self._mpriorities = []
+ def setRequestorPriority(self, request_port, priority):
+ if not self._requestor_priorities:
+ self._requestor_priorities = []
- self._mpriorities.append([master, priority])
+ self._requestor_priorities.append([request_port, priority])
+
+ def setMasterPriority(self, request_port, priority):
+ warn('QosFixedPriority.setMasterPriority is deprecated in favor of '
+ 'setRequestorPriority. See src/mem/qos/QoSPolicy.py for more '
+ 'information')
+ self.setRequestorPriority(request_port, priority)
def init(self):
- if not self._mpriorities:
- print("Error, use setMasterPriority to init masters/priorities\n");
+ if not self._requestor_priorities:
+ print("Error,"
+ "use setRequestorPriority to init requestors/priorities\n");
exit(1)
else:
- for mprio in self._mpriorities:
- master = mprio[0]
- priority = mprio[1]
- if isinstance(master, string_types):
- self.getCCObject().initMasterName(
- master, int(priority))
+ for prio in self._requestor_priorities:
+ request_port = prio[0]
+ priority = prio[1]
+ if isinstance(request_port, string_types):
+ self.getCCObject().initRequestorName(
+ request_port, int(priority))
else:
- self.getCCObject().initMasterObj(
- master.getCCObject(), priority)
+ self.getCCObject().initRequestorObj(
+ request_port.getCCObject(), priority)
- # default fixed priority value for non-listed Masters
+ # default fixed priority value for non-listed Requestors
qos_fixed_prio_default_prio = Param.UInt8(0,
- "Default priority for non-listed Masters")
+ "Default priority for non-listed Requestors")
class QoSPropFairPolicy(QoSPolicy):
type = 'QoSPropFairPolicy'
cxx_class = 'QoS::PropFairPolicy'
cxx_exports = [
- PyBindMethod('initMasterName'),
- PyBindMethod('initMasterObj'),
+ PyBindMethod('initRequestorName'),
+ PyBindMethod('initRequestorObj'),
]
- _mscores = None
+ _requestor_scores = None
- def setInitialScore(self, master, score):
- if not self._mscores:
- self._mscores = []
+ def setInitialScore(self, request_port, score):
+ if not self._requestor_scores:
+ self._requestor_scores = []
- self._mscores.append([master, score])
+ self._requestor_scores.append([request_port, score])
def init(self):
- if not self._mscores:
- print("Error, use setInitialScore to init masters/scores\n");
+ if not self._requestor_scores:
+ print("Error, use setInitialScore to init requestors/scores\n");
exit(1)
else:
- for mprio in self._mscores:
- master = mprio[0]
- score = mprio[1]
- if isinstance(master, string_types):
- self.getCCObject().initMasterName(
- master, float(score))
+ for prio in self._requestor_scores:
+ request_port = prio[0]
+ score = prio[1]
+ if isinstance(request_port, string_types):
+ self.getCCObject().initRequestorName(
+ request_port, float(score))
else:
- self.getCCObject().initMasterObj(
- master.getCCObject(), float(score))
+ self.getCCObject().initRequestorObj(
+ request_port.getCCObject(), float(score))
weight = Param.Float(0.5, "Pf score weight")
{}
void
-MemCtrl::logRequest(BusState dir, MasterID m_id, uint8_t qos,
+MemCtrl::logRequest(BusState dir, RequestorID id, uint8_t qos,
Addr addr, uint64_t entries)
{
// If needed, initialize all counters and statistics
- // for this master
- addMaster(m_id);
+ // for this requestor
+ addRequestor(id);
DPRINTF(QOS,
- "QoSMemCtrl::logRequest MASTER %s [id %d] address %d"
- " prio %d this master q packets %d"
+ "QoSMemCtrl::logRequest REQUESTOR %s [id %d] address %d"
+ " prio %d this requestor q packets %d"
" - queue size %d - requested entries %d\n",
- masters[m_id], m_id, addr, qos, packetPriorities[m_id][qos],
+ requestors[id], id, addr, qos, packetPriorities[id][qos],
(dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos],
entries);
totalWriteQueueSize += entries;
}
- packetPriorities[m_id][qos] += entries;
+ packetPriorities[id][qos] += entries;
for (auto j = 0; j < entries; ++j) {
- requestTimes[m_id][addr].push_back(curTick());
+ requestTimes[id][addr].push_back(curTick());
}
// Record statistics
- stats.avgPriority[m_id].sample(qos);
+ stats.avgPriority[id].sample(qos);
// Compute avg priority distance
- for (uint8_t i = 0; i < packetPriorities[m_id].size(); ++i) {
+ for (uint8_t i = 0; i < packetPriorities[id].size(); ++i) {
uint8_t distance =
- (abs(int(qos) - int(i))) * packetPriorities[m_id][i];
+ (abs(int(qos) - int(i))) * packetPriorities[id][i];
if (distance > 0) {
- stats.avgPriorityDistance[m_id].sample(distance);
+ stats.avgPriorityDistance[id].sample(distance);
DPRINTF(QOS,
- "QoSMemCtrl::logRequest MASTER %s [id %d]"
+ "QoSMemCtrl::logRequest REQUESTOR %s [id %d]"
" registering priority distance %d for priority %d"
" (packets %d)\n",
- masters[m_id], m_id, distance, i,
- packetPriorities[m_id][i]);
+ requestors[id], id, distance, i,
+ packetPriorities[id][i]);
}
}
DPRINTF(QOS,
- "QoSMemCtrl::logRequest MASTER %s [id %d] prio %d "
- "this master q packets %d - new queue size %d\n",
- masters[m_id], m_id, qos, packetPriorities[m_id][qos],
+ "QoSMemCtrl::logRequest REQUESTOR %s [id %d] prio %d "
+ "this requestor q packets %d - new queue size %d\n",
+ requestors[id], id, qos, packetPriorities[id][qos],
(dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos]);
}
void
-MemCtrl::logResponse(BusState dir, MasterID m_id, uint8_t qos,
+MemCtrl::logResponse(BusState dir, RequestorID id, uint8_t qos,
Addr addr, uint64_t entries, double delay)
{
- panic_if(!hasMaster(m_id),
- "Logging response with invalid master\n");
+ panic_if(!hasRequestor(id),
+ "Logging response with invalid requestor\n");
DPRINTF(QOS,
- "QoSMemCtrl::logResponse MASTER %s [id %d] address %d prio"
- " %d this master q packets %d"
+ "QoSMemCtrl::logResponse REQUESTOR %s [id %d] address %d prio"
+ " %d this requestor q packets %d"
" - queue size %d - requested entries %d\n",
- masters[m_id], m_id, addr, qos, packetPriorities[m_id][qos],
+ requestors[id], id, addr, qos, packetPriorities[id][qos],
(dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos],
entries);
totalWriteQueueSize -= entries;
}
- panic_if(packetPriorities[m_id][qos] == 0,
- "QoSMemCtrl::logResponse master %s negative packets for priority"
- " %d", masters[m_id], qos);
+ panic_if(packetPriorities[id][qos] == 0,
+ "QoSMemCtrl::logResponse requestor %s negative packets "
+ "for priority %d", requestors[id], qos);
- packetPriorities[m_id][qos] -= entries;
+ packetPriorities[id][qos] -= entries;
for (auto j = 0; j < entries; ++j) {
- auto it = requestTimes[m_id].find(addr);
- panic_if(it == requestTimes[m_id].end(),
- "QoSMemCtrl::logResponse master %s unmatched response for"
- " address %d received", masters[m_id], addr);
+ auto it = requestTimes[id].find(addr);
+ panic_if(it == requestTimes[id].end(),
+ "QoSMemCtrl::logResponse requestor %s unmatched response for"
+ " address %d received", requestors[id], addr);
// Load request time
uint64_t requestTime = it->second.front();
// Remove whole address entry if last one
if (it->second.empty()) {
- requestTimes[m_id].erase(it);
+ requestTimes[id].erase(it);
}
// Compute latency
double latency = (double) (curTick() + delay - requestTime)
}
DPRINTF(QOS,
- "QoSMemCtrl::logResponse MASTER %s [id %d] prio %d "
- "this master q packets %d - new queue size %d\n",
- masters[m_id], m_id, qos, packetPriorities[m_id][qos],
+ "QoSMemCtrl::logResponse REQUESTOR %s [id %d] prio %d "
+ "this requestor q packets %d - new queue size %d\n",
+ requestors[id], id, qos, packetPriorities[id][qos],
(dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos]);
}
uint8_t
-MemCtrl::schedule(MasterID m_id, uint64_t data)
+MemCtrl::schedule(RequestorID id, uint64_t data)
{
if (policy) {
- return policy->schedule(m_id, data);
+ return policy->schedule(id, data);
} else {
DPRINTF(QOS,
- "QoSScheduler::schedule master ID [%d] "
+ "QoSScheduler::schedule requestor id [%d] "
"data received [%d], but QoS scheduler not initialized\n",
- m_id,data);
+ id,data);
return 0;
}
}
assert(pkt->req);
if (policy) {
- return schedule(pkt->req->masterId(), pkt->getSize());
+ return schedule(pkt->req->requestorId(), pkt->getSize());
} else {
DPRINTF(QOS, "QoSScheduler::schedule Packet received [Qv %d], "
"but QoS scheduler not initialized\n",
}
void
-MemCtrl::addMaster(MasterID m_id)
+MemCtrl::addRequestor(RequestorID id)
{
- if (!hasMaster(m_id)) {
- masters.emplace(m_id, _system->getMasterName(m_id));
- packetPriorities[m_id].resize(numPriorities(), 0);
+ if (!hasRequestor(id)) {
+ requestors.emplace(id, _system->getRequestorName(id));
+ packetPriorities[id].resize(numPriorities(), 0);
DPRINTF(QOS,
- "QoSMemCtrl::addMaster registering"
- " Master %s [id %d]\n",
- masters[m_id], m_id);
+ "QoSMemCtrl::addRequestor registering"
+ " Requestor %s [id %d]\n",
+ requestors[id], id);
}
}
using namespace Stats;
System *system = memCtrl._system;
- const auto max_masters = system->maxMasters();
+ const auto max_requestors = system->maxRequestors();
const auto num_priorities = memCtrl.numPriorities();
- // Initializes per master statistics
+ // Initializes per requestor statistics
avgPriority
- .init(max_masters)
+ .init(max_requestors)
.flags(nozero | nonan)
.precision(2)
;
avgPriorityDistance
- .init(max_masters)
+ .init(max_requestors)
.flags(nozero | nonan)
;
.precision(12)
;
- for (int i = 0; i < max_masters; i++) {
- const std::string master = system->getMasterName(i);
- avgPriority.subname(i, master);
- avgPriorityDistance.subname(i, master);
+ for (int i = 0; i < max_requestors; i++) {
+ const std::string name = system->getRequestorName(i);
+ avgPriority.subname(i, name);
+ avgPriorityDistance.subname(i, name);
}
for (int j = 0; j < num_priorities; ++j) {
/**
* Enables QoS synchronized scheduling invokes the QoS scheduler
- * on all masters, at every packet arrival.
+ * on all requestors, at every packet arrival.
*/
const bool qosSyncroScheduler;
- /** Hash of master ID - master name */
- std::unordered_map<MasterID, const std::string> masters;
+ /** Hash of requestor ID - requestor name */
+ std::unordered_map<RequestorID, const std::string> requestors;
- /** Hash of masters - number of packets queued per priority */
- std::unordered_map<MasterID, std::vector<uint64_t> > packetPriorities;
+ /** Hash of requestors - number of packets queued per priority */
+ std::unordered_map<RequestorID, std::vector<uint64_t> > packetPriorities;
- /** Hash of masters - address of request - queue of times of request */
- std::unordered_map<MasterID,
+ /** Hash of requestors - address of request - queue of times of request */
+ std::unordered_map<RequestorID,
std::unordered_map<uint64_t, std::deque<uint64_t>> > requestTimes;
/**
const MemCtrl &memCtrl;
- /** per-master average QoS priority */
+ /** per-requestor average QoS priority */
Stats::VectorStandardDeviation avgPriority;
/**
- * per-master average QoS distance between assigned and
+ * per-requestor average QoS distance between assigned and
* queued values
*/
Stats::VectorStandardDeviation avgPriorityDistance;
/**
* Initializes dynamically counters and
- * statistics for a given Master
+ * statistics for a given Requestor
*
- * @param m_id the master ID
+ * @param id the requestor's ID
*/
- void addMaster(const MasterID m_id);
+ void addRequestor(const RequestorID id);
/**
* Called upon receiving a request or
* updates statistics and updates queues status
*
* @param dir request direction
- * @param m_id master id
+ * @param id requestor id
* @param qos packet qos value
* @param addr packet address
* @param entries number of entries to record
*/
- void logRequest(BusState dir, MasterID m_id, uint8_t qos,
+ void logRequest(BusState dir, RequestorID id, uint8_t qos,
Addr addr, uint64_t entries);
/**
* updates statistics and updates queues status
*
* @param dir response direction
- * @param m_id master id
+ * @param id requestor id
* @param qos packet qos value
* @param addr packet address
* @param entries number of entries to record
* @param delay response delay
*/
- void logResponse(BusState dir, MasterID m_id, uint8_t qos,
+ void logResponse(BusState dir, RequestorID id, uint8_t qos,
Addr addr, uint64_t entries, double delay);
/**
uint64_t queue_entry_size, const PacketPtr pkt);
using SimObject::schedule;
- uint8_t schedule(MasterID m_id, uint64_t data);
+ uint8_t schedule(RequestorID id, uint64_t data);
uint8_t schedule(const PacketPtr pkt);
/**
/**
* Escalates/demotes priority of all packets
- * belonging to the passed master to given
+ * belonging to the passed requestor to given
* priority value
*
* @param queues list of pointers to packet queues
* @param queue_entry_size size of an entry in the queue
- * @param m_id master whose packets priority will change
+ * @param id requestor whose packets priority will change
* @param tgt_prio target priority value
*/
template<typename Queues>
void escalate(std::initializer_list<Queues*> queues,
uint64_t queue_entry_size,
- MasterID m_id, uint8_t tgt_prio);
+ RequestorID id, uint8_t tgt_prio);
/**
* Escalates/demotes priority of all packets
- * belonging to the passed master to given
+ * belonging to the passed requestor to given
* priority value in a specified cluster of queues
* (e.g. read queues or write queues) which is passed
* as an argument to the function.
*
* @param queues reference to packet queues
* @param queue_entry_size size of an entry in the queue
- * @param m_id master whose packets priority will change
+ * @param id requestor whose packets priority will change
* @param curr_prio source queue priority value
* @param tgt_prio target queue priority value
*/
template<typename Queues>
void escalateQueues(Queues& queues, uint64_t queue_entry_size,
- MasterID m_id, uint8_t curr_prio, uint8_t tgt_prio);
+ RequestorID id, uint8_t curr_prio, uint8_t tgt_prio);
public:
/**
BusState getBusStateNext() const { return busStateNext; }
/**
- * hasMaster returns true if the selected master(ID) has
+ * hasRequestor returns true if the selected requestor(ID) has
* been registered in the memory controller, which happens if
* the memory controller has received at least a packet from
- * that master.
+ * that requestor.
*
- * @param m_id master id to lookup
+ * @param id requestor id to lookup
* @return true if the memory controller has received a packet
- * from the master, false otherwise.
+ * from the requestor, false otherwise.
*/
- bool hasMaster(MasterID m_id) const
+ bool hasRequestor(RequestorID id) const
{
- return masters.find(m_id) != masters.end();
+ return requestors.find(id) != requestors.end();
}
/**
template<typename Queues>
void
MemCtrl::escalateQueues(Queues& queues, uint64_t queue_entry_size,
- MasterID m_id, uint8_t curr_prio, uint8_t tgt_prio)
+ RequestorID id, uint8_t curr_prio, uint8_t tgt_prio)
{
auto it = queues[curr_prio].begin();
while (it != queues[curr_prio].end()) {
// No packets left to move
- if (packetPriorities[m_id][curr_prio] == 0)
+ if (packetPriorities[id][curr_prio] == 0)
break;
auto pkt = *it;
DPRINTF(QOS,
"QoSMemCtrl::escalate checking priority %d packet "
- "m_id %d address %d\n", curr_prio,
- pkt->masterId(), pkt->getAddr());
+ "id %d address %d\n", curr_prio,
+ pkt->requestorId(), pkt->getAddr());
// Found a packet to move
- if (pkt->masterId() == m_id) {
+ if (pkt->requestorId() == id) {
uint64_t moved_entries = divCeil(pkt->getSize(),
queue_entry_size);
DPRINTF(QOS,
- "QoSMemCtrl::escalate Master %s [id %d] moving "
+ "QoSMemCtrl::escalate Requestor %s [id %d] moving "
"packet addr %d size %d (p size %d) from priority %d "
"to priority %d - "
- "this master packets %d (entries to move %d)\n",
- masters[m_id], m_id, pkt->getAddr(),
+ "this requestor packets %d (entries to move %d)\n",
+ requestors[id], id, pkt->getAddr(),
pkt->getSize(),
queue_entry_size, curr_prio, tgt_prio,
- packetPriorities[m_id][curr_prio], moved_entries);
+ packetPriorities[id][curr_prio], moved_entries);
if (pkt->isRead()) {
panic_if(readQueueSizes[curr_prio] < moved_entries,
- "QoSMemCtrl::escalate master %s negative READ "
+ "QoSMemCtrl::escalate requestor %s negative READ "
"packets for priority %d",
- masters[m_id], tgt_prio);
+ requestors[id], tgt_prio);
readQueueSizes[curr_prio] -= moved_entries;
readQueueSizes[tgt_prio] += moved_entries;
} else if (pkt->isWrite()) {
panic_if(writeQueueSizes[curr_prio] < moved_entries,
- "QoSMemCtrl::escalate master %s negative WRITE "
+ "QoSMemCtrl::escalate requestor %s negative WRITE "
"packets for priority %d",
- masters[m_id], tgt_prio);
+ requestors[id], tgt_prio);
writeQueueSizes[curr_prio] -= moved_entries;
writeQueueSizes[tgt_prio] += moved_entries;
}
// Erase element from source packet queue, this will
// increment the iterator
it = queues[curr_prio].erase(it);
- panic_if(packetPriorities[m_id][curr_prio] < moved_entries,
- "QoSMemCtrl::escalate master %s negative packets "
+ panic_if(packetPriorities[id][curr_prio] < moved_entries,
+ "QoSMemCtrl::escalate requestor %s negative packets "
"for priority %d",
- masters[m_id], tgt_prio);
+ requestors[id], tgt_prio);
- packetPriorities[m_id][curr_prio] -= moved_entries;
- packetPriorities[m_id][tgt_prio] += moved_entries;
+ packetPriorities[id][curr_prio] -= moved_entries;
+ packetPriorities[id][tgt_prio] += moved_entries;
} else {
// Increment iterator to next location in the queue
it++;
void
MemCtrl::escalate(std::initializer_list<Queues*> queues,
uint64_t queue_entry_size,
- MasterID m_id, uint8_t tgt_prio)
+ RequestorID id, uint8_t tgt_prio)
{
// If needed, initialize all counters and statistics
- // for this master
- addMaster(m_id);
+ // for this requestor
+ addRequestor(id);
DPRINTF(QOS,
- "QoSMemCtrl::escalate Master %s [id %d] to priority "
- "%d (currently %d packets)\n",masters[m_id], m_id, tgt_prio,
- packetPriorities[m_id][tgt_prio]);
+ "QoSMemCtrl::escalate Requestor %s [id %d] to priority "
+ "%d (currently %d packets)\n",requestors[id], id, tgt_prio,
+ packetPriorities[id][tgt_prio]);
for (uint8_t curr_prio = 0; curr_prio < numPriorities(); ++curr_prio) {
// Skip target priority
continue;
// Process other priority packet
- while (packetPriorities[m_id][curr_prio] > 0) {
+ while (packetPriorities[id][curr_prio] > 0) {
DPRINTF(QOS,
"QoSMemCtrl::escalate MID %d checking priority %d "
"(packets %d)- current packets in prio %d: %d\n"
"\t(source read %d source write %d target read %d, "
"target write %d)\n",
- m_id, curr_prio, packetPriorities[m_id][curr_prio],
- tgt_prio, packetPriorities[m_id][tgt_prio],
+ id, curr_prio, packetPriorities[id][curr_prio],
+ tgt_prio, packetPriorities[id][tgt_prio],
readQueueSizes[curr_prio],
writeQueueSizes[curr_prio], readQueueSizes[tgt_prio],
writeQueueSizes[tgt_prio]);
// Check both read and write queue
for (auto q : queues) {
- escalateQueues(*q, queue_entry_size, m_id,
+ escalateQueues(*q, queue_entry_size, id,
curr_prio, tgt_prio);
}
}
}
DPRINTF(QOS,
- "QoSMemCtrl::escalate Completed master %s [id %d] to priority %d "
- "(now %d packets)\n\t(total read %d, total write %d)\n",
- masters[m_id], m_id, tgt_prio, packetPriorities[m_id][tgt_prio],
+ "QoSMemCtrl::escalate Completed requestor %s [id %d] to priority "
+ "%d (now %d packets)\n\t(total read %d, total write %d)\n",
+ requestors[id], id, tgt_prio, packetPriorities[id][tgt_prio],
readQueueSizes[tgt_prio], writeQueueSizes[tgt_prio]);
}
pkt->qosValue(pkt_priority);
if (qosSyncroScheduler) {
- // Call the scheduling function on all other masters.
- for (const auto& m : masters) {
+ // Call the scheduling function on all other requestors.
+ for (const auto& requestor : requestors) {
- if (m.first == pkt->masterId())
+ if (requestor.first == pkt->requestorId())
continue;
- uint8_t prio = schedule(m.first, 0);
+ uint8_t prio = schedule(requestor.first, 0);
if (qosPriorityEscalation) {
DPRINTF(QOS,
"QoSMemCtrl::qosSchedule: (syncro) escalating "
- "MASTER %s to assigned priority %d\n",
- _system->getMasterName(m.first),
+ "REQUESTOR %s to assigned priority %d\n",
+ _system->getRequestorName(requestor.first),
prio);
- escalate(queues, queue_entry_size, m.first, prio);
+ escalate(queues, queue_entry_size, requestor.first, prio);
}
}
}
if (qosPriorityEscalation) {
DPRINTF(QOS,
"QoSMemCtrl::qosSchedule: escalating "
- "MASTER %s to assigned priority %d\n",
- _system->getMasterName(pkt->masterId()),
+ "REQUESTOR %s to assigned priority %d\n",
+ _system->getRequestorName(pkt->requestorId()),
pkt_priority);
- escalate(queues, queue_entry_size, pkt->masterId(), pkt_priority);
+ escalate(queues, queue_entry_size, pkt->requestorId(), pkt_priority);
}
// Update last service tick for selected priority
__func__);
DPRINTF(QOS,
- "%s: MASTER %s request %s addr %lld size %d\n",
+ "%s: REQUESTOR %s request %s addr %lld size %d\n",
__func__,
- _system->getMasterName(pkt->req->masterId()),
+ _system->getRequestorName(pkt->req->requestorId()),
pkt->cmdString(), pkt->getAddr(), pkt->getSize());
uint64_t required_entries = divCeil(pkt->getSize(), memoryPacketSize);
if (req_accepted) {
// The packet is accepted - log it
logRequest(pkt->isRead()? READ : WRITE,
- pkt->req->masterId(),
+ pkt->req->requestorId(),
pkt->qosValue(),
pkt->getAddr(),
required_entries);
for (uint8_t i = 0; i < numPriorities(); ++i) {
std::string plist = "";
for (auto& e : (busState == WRITE ? writeQueue[i]: readQueue[i])) {
- plist += (std::to_string(e->req->masterId())) + " ";
+ plist += (std::to_string(e->req->requestorId())) + " ";
}
DPRINTF(QOS,
"%s priority Queue [%i] contains %i elements, "
queue->erase(p_it);
DPRINTF(QOS,
- "%s scheduling packet address %d for master %s from "
+ "%s scheduling packet address %d for requestor %s from "
"priority queue %d\n", __func__, pkt->getAddr(),
- _system->getMasterName(pkt->req->masterId()),
+ _system->getRequestorName(pkt->req->requestorId()),
curr_prio);
break;
}
uint64_t removed_entries = divCeil(pkt->getSize(), memoryPacketSize);
DPRINTF(QOS,
- "%s scheduled packet address %d for master %s size is %d, "
+ "%s scheduled packet address %d for requestor %s size is %d, "
"corresponds to %d memory packets\n", __func__, pkt->getAddr(),
- _system->getMasterName(pkt->req->masterId()),
+ _system->getRequestorName(pkt->req->requestorId()),
pkt->getSize(), removed_entries);
// Schedule response
// Log the response
logResponse(pkt->isRead()? READ : WRITE,
- pkt->req->masterId(),
+ pkt->req->requestorId(),
pkt->qosValue(),
pkt->getAddr(),
removed_entries, responseLatency);
MemSinkCtrl::MemoryPort::MemoryPort(const std::string& n,
MemSinkCtrl& m)
- : QueuedSlavePort(n, &m, queue, true), memory(m), queue(memory, *this, true)
+ : QueuedResponsePort(n, &m, queue, true),
+ memory(m), queue(memory, *this, true)
{}
AddrRangeList
using PacketQueue = std::deque<PacketPtr>;
private:
- class MemoryPort : public QueuedSlavePort
+ class MemoryPort : public QueuedResponsePort
{
private:
/** reference to parent memory object */
DrainState drain() override;
/**
- * Getter method to access this memory's slave port
+ * Getter method to access this memory's response port
*
* @param if_name interface name
* @param idx port ID number
- * @return reference to this memory's slave port
+ * @return reference to this memory's response port
*/
Port &getPort(const std::string &if_name, PortID=InvalidPortID) override;
/** Write request packets queue buffer size in #packets */
const uint64_t writeBufferSize;
- /** Memory slave port */
+ /** Memory response port */
MemoryPort port;
/**
Policy::schedule(const PacketPtr pkt)
{
assert(pkt->req);
- return schedule(pkt->req->masterId(), pkt->getSize());
+ return schedule(pkt->req->requestorId(), pkt->getSize());
}
} // namespace QoS
void setMemCtrl(MemCtrl* mem) { memCtrl = mem; };
/**
- * Builds a MasterID/value pair given a master input.
- * This will be lookuped in the system list of masters in order
- * to retrieve the associated MasterID.
- * In case the master name/object cannot be resolved, the pairing
+ * Builds a RequestorID/value pair given a requestor input.
+ * This will be looked up in the system list of requestors in order
+ * to retrieve the associated RequestorID.
+ * In case the requestor name/object cannot be resolved, the pairing
* method will panic.
*
- * @param master Master to lookup in the system
- * @param value Value to be associated with the MasterID
- * @return A MasterID/Value pair.
+ * @param requestor Requestor to lookup in the system
+ * @param value Value to be associated with the RequestorID
+ * @return A RequestorID/Value pair.
*/
- template <typename M, typename T>
- std::pair<MasterID, T> pair(M master, T value);
+ template <typename Requestor, typename T>
+ std::pair<RequestorID, T> pair(Requestor requestor, T value);
/**
* Schedules data - must be defined by derived class
*
- * @param mId master id to schedule
+ * @param requestor_id requestor id to schedule
* @param data data to schedule
* @return QoS priority value
*/
- virtual uint8_t schedule(const MasterID mId, const uint64_t data) = 0;
+ virtual uint8_t schedule(const RequestorID requestor_id,
+ const uint64_t data) = 0;
/**
* Schedules a packet. Non virtual interface for the scheduling
- * method requiring a master ID.
+ * method requiring a requestor id.
*
* @param pkt pointer to packet to schedule
* @return QoS priority value
MemCtrl* memCtrl;
};
-template <typename M, typename T>
-std::pair<MasterID, T>
-Policy::pair(M master, T value)
+template <typename Requestor, typename T>
+std::pair<RequestorID, T>
+Policy::pair(Requestor requestor, T value)
{
- auto id = memCtrl->system()->lookupMasterId(master);
+ auto id = memCtrl->system()->lookupRequestorId(requestor);
- panic_if(id == Request::invldMasterId,
- "Unable to find master %s\n", master);
+ panic_if(id == Request::invldRequestorId,
+ "Unable to find requestor %s\n", requestor);
DPRINTF(QOS,
- "Master %s [id %d] associated with QoS data %d\n",
- master, id, value);
+ "Requestor %s [id %d] associated with QoS data %d\n",
+ requestor, id, value);
- return std::pair<MasterID, T>(id, value);
+ return std::pair<RequestorID, T>(id, value);
}
} // namespace QoS
}
void
-FixedPriorityPolicy::initMasterName(std::string master, uint8_t priority)
+FixedPriorityPolicy::initRequestorName(std::string requestor, uint8_t priority)
{
priorityMap.insert(
- this->pair<std::string, uint8_t>(master, priority));
+ this->pair<std::string, uint8_t>(requestor, priority));
}
void
-FixedPriorityPolicy::initMasterObj(const SimObject* master, uint8_t priority)
+FixedPriorityPolicy::initRequestorObj(const SimObject* requestor,
+ uint8_t priority)
{
priorityMap.insert(
- this->pair<const SimObject*, uint8_t>(master, priority));
+ this->pair<const SimObject*, uint8_t>(requestor, priority));
}
uint8_t
-FixedPriorityPolicy::schedule(const MasterID mId, const uint64_t data)
+FixedPriorityPolicy::schedule(const RequestorID id, const uint64_t data)
{
- // Reads a packet's MasterID contained in its encapsulated request
+ // Reads a packet's RequestorID contained in its encapsulated request
// if a match is found in the configured priority map, returns the
// matching priority, else returns zero
- auto ret = priorityMap.find(mId);
+ auto ret = priorityMap.find(id);
if (ret != priorityMap.end()) {
return ret->second;
} else {
- DPRINTF(QOS, "Master %s (MasterID %d) not present in priorityMap, "
- "assigning default priority %d\n",
- memCtrl->system()->getMasterName(mId),
- mId, defaultPriority);
+ DPRINTF(QOS, "Requestor %s (RequestorID %d) not present in "
+ "priorityMap, assigning default priority %d\n",
+ memCtrl->system()->getRequestorName(id),
+ id, defaultPriority);
return defaultPriority;
}
}
/**
* Fixed Priority QoS Policy
*
- * Fixed Priority Policy: based on a configured MasterID to priority map,
- * it returns a fixed QoS priority value: every master has a fixed priority.
+ * Fixed Priority Policy: based on a configured RequestorID to priority map,
+ * it returns a fixed QoS priority value: every requestor has a fixed priority.
*/
class FixedPriorityPolicy : public Policy
{
void init() override;
/**
- * Initialize the fixed master's priority by providing
- * the master's name and priority value.
- * The master's name has to match a name in the system.
+ * Initialize the fixed requestor's priority by providing
+ * the requestor's name and priority value.
+ * The requestor's name has to match a name in the system.
*
- * @param master master's name to lookup.
- * @param priority priority value for the master
+ * @param requestor requestor's name to lookup.
+ * @param priority priority value for the requestor
*/
- void initMasterName(std::string master, uint8_t priority);
+ void initRequestorName(std::string requestor, uint8_t priority);
/**
- * Initialize the fixed master's priority by providing
- * the master's SimObject pointer and priority value.
+ * Initialize the fixed requestor's priority by providing
+ * the requestor's SimObject pointer and priority value.
*
- * @param master master's SimObject pointer to lookup.
- * @param priority priority value for the master
+ * @param requestor requestor's SimObject pointer to lookup.
+ * @param priority priority value for the requestor
*/
- void initMasterObj(const SimObject* master, uint8_t priority);
+ void initRequestorObj(const SimObject* requestor, uint8_t priority);
/**
* Schedules a packet based on fixed priority configuration
*
- * @param mId master id to schedule
+ * @param id requestor id to schedule
* @param data data to schedule
* @return QoS priority value
*/
- virtual uint8_t schedule(const MasterID, const uint64_t) override;
+ virtual uint8_t schedule(const RequestorID, const uint64_t) override;
protected:
- /** Default fixed priority value for non-listed masters */
+ /** Default fixed priority value for non-listed requestors */
const uint8_t defaultPriority;
/**
- * Priority map, associates configured masters with
+ * Priority map, associates configured requestors with
* a fixed QoS priority value
*/
- std::map<MasterID, uint8_t> priorityMap;
+ std::map<RequestorID, uint8_t> priorityMap;
};
} // namespace QoS
PropFairPolicy::~PropFairPolicy()
{}
-template <typename Master>
+template <typename Requestor>
void
-PropFairPolicy::initMaster(const Master master, const double score)
+PropFairPolicy::initRequestor(const Requestor requestor, const double score)
{
- MasterID m_id = memCtrl->system()->lookupMasterId(master);
+ RequestorID id = memCtrl->system()->lookupRequestorId(requestor);
- assert(m_id != Request::invldMasterId);
+ assert(id != Request::invldRequestorId);
- // Setting the Initial score for the selected master.
- history.push_back(std::make_pair(m_id, score));
+ // Setting the Initial score for the selected requestor.
+ history.push_back(std::make_pair(id, score));
fatal_if(history.size() > memCtrl->numPriorities(),
- "Policy's maximum number of masters is currently dictated "
+ "Policy's maximum number of requestors is currently dictated "
"by the maximum number of priorities\n");
}
void
-PropFairPolicy::initMasterName(const std::string master, const double score)
+PropFairPolicy::initRequestorName(const std::string requestor, const double score)
{
- initMaster(master, score);
+ initRequestor(requestor, score);
}
void
-PropFairPolicy::initMasterObj(const SimObject* master, const double score)
+PropFairPolicy::initRequestorObj(const SimObject* requestor, const double score)
{
- initMaster(master, score);
+ initRequestor(requestor, score);
}
double
}
uint8_t
-PropFairPolicy::schedule(const MasterID pkt_mid, const uint64_t pkt_size)
+PropFairPolicy::schedule(const RequestorID pkt_id, const uint64_t pkt_size)
{
auto sort_pred =
- [] (const MasterHistory& lhs, const MasterHistory& rhs)
+ [] (const RequestorHistory& lhs, const RequestorHistory& rhs)
{ return lhs.second > rhs.second; };
// Sorting in reverse in base of personal history:
uint8_t pkt_priority = 0;
for (auto m_hist = history.begin(); m_hist != history.end(); m_hist++) {
- MasterID curr_mid = m_hist->first;
+ RequestorID curr_id = m_hist->first;
double& curr_score = m_hist->second;
- if (curr_mid == pkt_mid) {
+ if (curr_id == pkt_id) {
// The qos priority is the position in the sorted vector.
pkt_priority = std::distance(history.begin(), m_hist);
virtual ~PropFairPolicy();
/**
- * Initialize the master's score by providing
- * the master's name and initial score value.
- * The master's name has to match a name in the system.
+ * Initialize the requestor's score by providing
+ * the requestor's name and initial score value.
+ * The requestor's name has to match a name in the system.
*
- * @param master master's name to lookup.
- * @param score initial score value for the master
+ * @param requestor requestor's name to lookup.
+ * @param score initial score value for the requestor
*/
- void initMasterName(const std::string master, const double score);
+ void initRequestorName(const std::string requestor, const double score);
/**
- * Initialize the master's score by providing
- * the master's SimObject pointer and initial score value.
- * The master's pointer has to match a master in the system.
+ * Initialize the requestor's score by providing
+ * the requestor's SimObject pointer and initial score value.
+ * The requestor's pointer has to match a requestor in the system.
*
- * @param master master's SimObject pointer to lookup.
- * @param score initial score value for the master
+ * @param requestor requestor's SimObject pointer to lookup.
+ * @param score initial score value for the requestor
*/
- void initMasterObj(const SimObject* master, const double score);
+ void initRequestorObj(const SimObject* requestor, const double score);
/**
* Schedules a packet based on proportional fair configuration
*
- * @param m_id master id to schedule
+ * @param id requestor id to schedule
* @param pkt_size size of the packet
* @return QoS priority value
*/
virtual uint8_t
- schedule(const MasterID m_id, const uint64_t pkt_size) override;
+ schedule(const RequestorID id, const uint64_t pkt_size) override;
protected:
- template <typename Master>
- void initMaster(const Master master, const double score);
+ template <typename Requestor>
+ void initRequestor(const Requestor requestor, const double score);
inline double
updateScore(const double old_score, const uint64_t served_bytes) const;
/** PF Policy weight */
const double weight;
- /** history is keeping track of every master's score */
- using MasterHistory = std::pair<MasterID, double>;
- std::vector<MasterHistory> history;
+ /** history is keeping track of every requestor's score */
+ using RequestorHistory = std::pair<RequestorID, double>;
+ std::vector<RequestorHistory> history;
};
} // namespace QoS
{
QueuePolicy::PacketQueue::iterator ret = q->end();
- // Tracks one packet per master in the queue
- std::unordered_map<MasterID, QueuePolicy::PacketQueue::iterator> track;
+ // Tracks one packet per requestor in the queue
+ std::unordered_map<RequestorID, QueuePolicy::PacketQueue::iterator> track;
// Cycle queue only once
for (auto pkt_it = q->begin(); pkt_it != q->end(); ++pkt_it) {
panic_if(!pkt->req,
"QoSQPolicy::lrg detected packet without request");
- // Get Request MasterID
- MasterID m_id = pkt->req->masterId();
+ // Get Request RequestorID
+ RequestorID requestor_id = pkt->req->requestorId();
DPRINTF(QOS, "QoSQPolicy::lrg checking packet "
- "from queue with id %d\n", m_id);
+ "from queue with id %d\n", requestor_id);
- // Check if this is a known master.
- panic_if(memCtrl->hasMaster(m_id),
- "%s: Unrecognized Master\n", __func__);
+ // Check if this is a known requestor.
+ panic_if(memCtrl->hasRequestor(requestor_id),
+ "%s: Unrecognized Requestor\n", __func__);
panic_if(toServe.size() > 0,
"%s: toServe list is empty\n", __func__);
- if (toServe.front() == m_id) {
+ if (toServe.front() == requestor_id) {
DPRINTF(QOS, "QoSQPolicy::lrg matched to served "
- "master id %d\n", m_id);
- // This packet matches the MasterID to be served next
+ "requestor id %d\n", requestor_id);
+ // This packet matches the RequestorID to be served next
// move toServe front to back
- toServe.push_back(m_id);
+ toServe.push_back(requestor_id);
toServe.pop_front();
return pkt_it;
}
- // The master generating the packet is not first in the toServe list
- // (Doesn't have the highest priority among masters)
- // Check if this is the first packet seen with its master ID
+ // The requestor generating the packet is not first in the toServe list
+ // (Doesn't have the highest priority among requestors)
+ // Check if this is the first packet seen with its requestor id
// and remember it. Then keep looping over the remaining packets
// in the queue.
- if (track.find(m_id) == track.end()) {
- track[m_id] = pkt_it;
+ if (track.find(requestor_id) == track.end()) {
+ track[requestor_id] = pkt_it;
DPRINTF(QOS, "QoSQPolicy::lrg tracking a packet for "
- "master id %d\n", m_id);
+ "requestor id %d\n", requestor_id);
}
}
- // If here, the current master to be serviced doesn't have a pending
- // packet in the queue: look for the next master in the list.
- for (const auto& masterId : toServe) {
+ // If here, the current requestor to be serviced doesn't have a pending
+ // packet in the queue: look for the next requestor in the list.
+ for (const auto& requestorId : toServe) {
DPRINTF(QOS, "QoSQPolicy::lrg evaluating alternative "
- "master id %d\n", masterId);
+ "requestor id %d\n", requestorId);
- if (track.find(masterId) != track.end()) {
- ret = track[masterId];
- DPRINTF(QOS, "QoSQPolicy::lrg master id "
- "%d selected for service\n", masterId);
+ if (track.find(requestorId) != track.end()) {
+ ret = track[requestorId];
+ DPRINTF(QOS, "QoSQPolicy::lrg requestor id "
+ "%d selected for service\n", requestorId);
return ret;
}
void
LrgQueuePolicy::enqueuePacket(PacketPtr pkt)
{
- MasterID m_id = pkt->masterId();
- if (!memCtrl->hasMaster(m_id)) {
- toServe.push_back(m_id);
+ RequestorID requestor_id = pkt->requestorId();
+ if (!memCtrl->hasRequestor(requestor_id)) {
+ toServe.push_back(requestor_id);
}
};
/**
* Least Recently Granted Queue Policy
* It selects packets from the queue with a round
- * robin-like policy: using the master id as a switching
+ * robin-like policy: using the requestor id as a switching
* parameter rather than switching over a time quantum.
*/
class LrgQueuePolicy : public QueuePolicy
protected:
/**
* Support structure for lrg algorithms:
- * keeps track of serviced masters,
+ * keeps track of serviced requestors,
* always serve the front element.
*/
- std::list<MasterID> toServe;
+ std::list<RequestorID> toServe;
};
} // namespace QoS
* A queued port is a port that has an infinite queue for outgoing
* packets and thus decouples the module that wants to send
* request/responses from the flow control (retry mechanism) of the
- * port. A queued port can be used by both a master and a slave. The
+ * port. A queued port can be used by both a requestor and a responder. The
* queue is a parameter to allow tailoring of the queue implementation
* (used in the cache).
*/
-class QueuedSlavePort : public ResponsePort
+class QueuedResponsePort : public ResponsePort
{
protected:
* behaviuor in a subclass, and provide the latter to the
* QueuePort constructor.
*/
- QueuedSlavePort(const std::string& name, SimObject* owner,
+ QueuedResponsePort(const std::string& name, SimObject* owner,
RespPacketQueue &resp_queue, PortID id = InvalidPortID) :
ResponsePort(name, owner, id), respQueue(resp_queue)
{ }
- virtual ~QueuedSlavePort() { }
+ virtual ~QueuedResponsePort() { }
/**
* Schedule the sending of a timing response.
};
/**
- * The QueuedMasterPort combines two queues, a request queue and a
+ * The QueuedRequestPort combines two queues, a request queue and a
* snoop response queue, that both share the same port. The flow
* control for requests and snoop responses are completely
* independent, and so each queue manages its own flow control
* (retries).
*/
-class QueuedMasterPort : public RequestPort
+class QueuedRequestPort : public RequestPort
{
protected:
* behaviuor in a subclass, and provide the latter to the
* QueuePort constructor.
*/
- QueuedMasterPort(const std::string& name, SimObject* owner,
+ QueuedRequestPort(const std::string& name, SimObject* owner,
ReqPacketQueue &req_queue,
SnoopRespPacketQueue &snoop_resp_queue,
PortID id = InvalidPortID) :
snoopRespQueue(snoop_resp_queue)
{ }
- virtual ~QueuedMasterPort() { }
+ virtual ~QueuedRequestPort() { }
/**
* Schedule the sending of a timing request.
class ThreadContext;
typedef std::shared_ptr<Request> RequestPtr;
-typedef uint16_t MasterID;
+typedef uint16_t RequestorID;
class Request
{
static const FlagsType HTM_CMD = HTM_START | HTM_COMMIT |
HTM_CANCEL | HTM_ABORT;
- /** Master Ids that are statically allocated
+ /** Requestor Ids that are statically allocated
* @{*/
- enum : MasterID {
- /** This master id is used for writeback requests by the caches */
- wbMasterId = 0,
+ enum : RequestorID {
+ /** This requestor id is used for writeback requests by the caches */
+ wbRequestorId = 0,
/**
- * This master id is used for functional requests that
+ * This requestor id is used for functional requests that
* don't come from a particular device
*/
- funcMasterId = 1,
- /** This master id is used for message signaled interrupts */
- intMasterId = 2,
+ funcRequestorId = 1,
+ /** This requestor id is used for message signaled interrupts */
+ intRequestorId = 2,
/**
- * Invalid master id for assertion checking only. It is
+ * Invalid requestor id for assertion checking only. It is
* invalid behavior to ever send this id as part of a request.
*/
- invldMasterId = std::numeric_limits<MasterID>::max()
+ invldRequestorId = std::numeric_limits<RequestorID>::max()
};
/** @} */
/** The requestor ID which is unique in the system for all ports
* that are capable of issuing a transaction
*/
- MasterID _masterId = invldMasterId;
+ RequestorID _requestorId = invldRequestorId;
/** Flag structure for the request. */
Flags _flags;
* just physical address, size, flags, and timestamp (to curTick()).
* These fields are adequate to perform a request.
*/
- Request(Addr paddr, unsigned size, Flags flags, MasterID mid) :
- _paddr(paddr), _size(size), _masterId(mid), _time(curTick())
+ Request(Addr paddr, unsigned size, Flags flags, RequestorID id) :
+ _paddr(paddr), _size(size), _requestorId(id), _time(curTick())
{
_flags.set(flags);
privateFlags.set(VALID_PADDR|VALID_SIZE);
}
Request(Addr vaddr, unsigned size, Flags flags,
- MasterID mid, Addr pc, ContextID cid,
+ RequestorID id, Addr pc, ContextID cid,
AtomicOpFunctorPtr atomic_op=nullptr)
{
- setVirt(vaddr, size, flags, mid, pc, std::move(atomic_op));
+ setVirt(vaddr, size, flags, id, pc, std::move(atomic_op));
setContext(cid);
}
Request(const Request& other)
: _paddr(other._paddr), _size(other._size),
_byteEnable(other._byteEnable),
- _masterId(other._masterId),
+ _requestorId(other._requestorId),
_flags(other._flags),
_cacheCoherenceFlags(other._cacheCoherenceFlags),
privateFlags(other.privateFlags),
* allocated Request object.
*/
void
- setVirt(Addr vaddr, unsigned size, Flags flags, MasterID mid, Addr pc,
+ setVirt(Addr vaddr, unsigned size, Flags flags, RequestorID id, Addr pc,
AtomicOpFunctorPtr amo_op=nullptr)
{
_vaddr = vaddr;
_size = size;
- _masterId = mid;
+ _requestorId = id;
_pc = pc;
_time = curTick();
}
/** Accesssor for the requestor id. */
- MasterID
- masterId() const
+ RequestorID
+ requestorId() const
{
- return _masterId;
+ return _requestorId;
}
uint32_t
random delays if RubySystem \
randomization flag is True)")
- master = RequestPort("Master port to MessageBuffer receiver")
- slave = ResponsePort("Slave port from MessageBuffer sender")
+ out_port = RequestPort("Request port to MessageBuffer receiver")
+ master = DeprecatedParam(out_port, '`master` is now called `out_port`')
+ in_port = ResponsePort("Response port from MessageBuffer sender")
+ slave = DeprecatedParam(in_port, '`slave` is now called `in_port`')
ext_links = VectorParam.BasicExtLink("Links to external nodes")
int_links = VectorParam.BasicIntLink("Links between internal nodes")
- slave = VectorSlavePort("CPU slave port")
- master = VectorMasterPort("CPU master port")
+ in_port = VectorResponsePort("CPU input port")
+ slave = DeprecatedParam(in_port, '`slave` is now called `in_port`')
+ out_port = VectorRequestPort("CPU output port")
+ master = DeprecatedParam(out_port, '`master` is now called `out_port`')
AbstractController::AbstractController(const Params *p)
: ClockedObject(p), Consumer(this), m_version(p->version),
m_clusterID(p->cluster_id),
- m_masterId(p->system->getMasterId(this)), m_is_blocking(false),
+ m_id(p->system->getRequestorId(this)), m_is_blocking(false),
m_number_of_TBEs(p->number_of_TBEs),
m_transitions_per_cycle(p->transitions_per_cycle),
m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency),
}
RequestPtr req
- = std::make_shared<Request>(mem_msg->m_addr, req_size, 0, m_masterId);
+ = std::make_shared<Request>(mem_msg->m_addr, req_size, 0, m_id);
PacketPtr pkt;
if (mem_msg->getType() == MemoryRequestType_MEMORY_WB) {
pkt = Packet::createWrite(req);
public:
MachineID getMachineID() const { return m_machineID; }
- MasterID getMasterId() const { return m_masterId; }
+ RequestorID getRequestorId() const { return m_id; }
Stats::Histogram& getDelayHist() { return m_delayHistogram; }
Stats::Histogram& getDelayVCHist(uint32_t index)
MachineID m_machineID;
const NodeID m_clusterID;
- // MasterID used by some components of gem5.
- const MasterID m_masterId;
+ // RequestorID used by some components of gem5.
+ const RequestorID m_id;
Network *m_net_ptr;
bool m_is_blocking;
void recvReqRetry();
};
- /* Master port to the memory controller. */
+ /* Request port to the memory controller. */
MemoryPort memoryPort;
// State that is stored in packets sent to the memory controller.
m_records_flushed++;
auto req = std::make_shared<Request>(rec->m_data_address,
m_block_size_bytes, 0,
- Request::funcMasterId);
+ Request::funcRequestorId);
MemCmd::Command requestType = MemCmd::FlushReq;
Packet *pkt = new Packet(req, requestType);
requestType = MemCmd::ReadReq;
req = std::make_shared<Request>(
traceRecord->m_data_address + rec_bytes_read,
- RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
+ RubySystem::getBlockSizeBytes(), 0,
+ Request::funcRequestorId);
} else if (traceRecord->m_type == RubyRequestType_IFETCH) {
requestType = MemCmd::ReadReq;
req = std::make_shared<Request>(
traceRecord->m_data_address + rec_bytes_read,
RubySystem::getBlockSizeBytes(),
- Request::INST_FETCH, Request::funcMasterId);
+ Request::INST_FETCH, Request::funcRequestorId);
} else {
requestType = MemCmd::WriteReq;
req = std::make_shared<Request>(
traceRecord->m_data_address + rec_bytes_read,
- RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
+ RubySystem::getBlockSizeBytes(), 0,
+ Request::funcRequestorId);
}
Packet *pkt = new Packet(req, requestType);
RubyPort::init();
m_data_block_mask = mask(RubySystem::getBlockSizeBits());
- for (const auto &s_port : slave_ports)
- s_port->sendRangeChange();
+ for (const auto &response_port : response_ports)
+ response_port->sendRangeChange();
}
RequestStatus
// back the requesting CU when we receive write
// complete callbacks for all issued Ruby requests of this
// instruction.
- RubyPort::MemSlavePort* mem_slave_port = ss->port;
+ RubyPort::MemResponsePort* mem_response_port = ss->port;
GPUDynInstPtr gpuDynInst = nullptr;
}
PendingWriteInst& inst = pendingWriteInsts[seqNum];
- inst.addPendingReq(mem_slave_port, gpuDynInst, m_usingRubyTester);
+ inst.addPendingReq(mem_response_port, gpuDynInst,
+ m_usingRubyTester);
}
return true;
for (auto& pkt : mylist) {
RubyPort::SenderState *ss =
safe_cast<RubyPort::SenderState *>(pkt->senderState);
- MemSlavePort *port = ss->port;
+ MemResponsePort *port = ss->port;
assert(port != NULL);
pkt->senderState = ss->predecessor;
// PendingWriteInst tracks the number of outstanding Ruby requests
// per write instruction. Once all requests associated with one instruction
-// are completely done in Ruby, we call back the requester to mark
+// are completely done in Ruby, we call back the requestor to mark
// that this instruction is complete.
class PendingWriteInst
{
{}
void
- addPendingReq(RubyPort::MemSlavePort* port, GPUDynInstPtr inst,
+ addPendingReq(RubyPort::MemResponsePort* port, GPUDynInstPtr inst,
bool usingRubyTester)
{
assert(port);
return (numPendingStores == 0) ? true : false;
}
- // ack the original requester that this write instruction is complete
+ // ack the original requestor that this write instruction is complete
void
ackWriteCompletion(bool usingRubyTester)
{
pkt->senderState = ss;
}
- // send the ack response to the requester
+ // send the ack response to the requestor
originalPort->sendTimingResp(pkt);
}
// which implies multiple ports per instruction. However, we need
// only 1 of the ports to call back the CU. Therefore, here we keep
// track the port that sent the first packet of this instruction.
- RubyPort::MemSlavePort* originalPort;
+ RubyPort::MemResponsePort* originalPort;
// similar to the originalPort, this gpuDynInstPtr is set only for
// the first packet of this instruction.
GPUDynInstPtr gpuDynInstPtr;
class GPUCoalescer : public RubyPort
{
public:
- class GMTokenPort : public TokenSlavePort
+ class GMTokenPort : public TokenResponsePort
{
public:
GMTokenPort(const std::string& name, ClockedObject *owner,
PortID id = InvalidPortID)
- : TokenSlavePort(name, owner, id)
+ : TokenResponsePort(name, owner, id)
{ }
~GMTokenPort() { }
"deadlock/livelock declared")
garnet_standalone = Param.Bool(False, "")
- gmTokenPort = SlavePort("Port to the CU for sharing tokens")
+ gmTokenPort = ResponsePort("Port to the CU for sharing tokens")
RubyPort::SenderState *senderState =
safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
- MemSlavePort *port = safe_cast<MemSlavePort*>(senderState->port);
+ MemResponsePort *port = safe_cast<MemResponsePort*>(senderState->port);
assert(port != nullptr);
delete senderState;
pkt->req->isHTMStart(), pkt->req->isHTMCommit(),
pkt->req->isHTMCancel(), htm_return_code);
- // turn packet around to go back to requester if response expected
+ // turn packet around to go back to requestor if response expected
if (pkt->needsResponse()) {
DPRINTF(RubyPort, "Sending packet back over port\n");
pkt->makeHtmTransactionalReqResponse(
: ClockedObject(p), m_ruby_system(p->ruby_system), m_version(p->version),
m_controller(NULL), m_mandatory_q_ptr(NULL),
m_usingRubyTester(p->using_ruby_tester), system(p->system),
- pioMasterPort(csprintf("%s.pio-master-port", name()), this),
- pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
- memMasterPort(csprintf("%s.mem-master-port", name()), this),
- memSlavePort(csprintf("%s-mem-slave-port", name()), this,
+ pioRequestPort(csprintf("%s.pio-request-port", name()), this),
+ pioResponsePort(csprintf("%s.pio-response-port", name()), this),
+ memRequestPort(csprintf("%s.mem-request-port", name()), this),
+ memResponsePort(csprintf("%s-mem-response-port", name()), this,
p->ruby_system->getAccessBackingStore(), -1,
p->no_retry_on_stall),
- gotAddrRanges(p->port_master_connection_count),
+ gotAddrRanges(p->port_request_ports_connection_count),
m_isCPUSequencer(p->is_cpu_sequencer)
{
assert(m_version != -1);
- // create the slave ports based on the number of connected ports
- for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
- slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(),
- i), this, p->ruby_system->getAccessBackingStore(),
+ // create the response ports based on the number of connected ports
+ for (size_t i = 0; i < p->port_response_ports_connection_count; ++i) {
+ response_ports.push_back(new MemResponsePort(csprintf
+ ("%s.response_ports%d", name(), i), this,
+ p->ruby_system->getAccessBackingStore(),
i, p->no_retry_on_stall));
}
- // create the master ports based on the number of connected ports
- for (size_t i = 0; i < p->port_master_connection_count; ++i) {
- master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
- name(), i), this));
+ // create the request ports based on the number of connected ports
+ for (size_t i = 0; i < p->port_request_ports_connection_count; ++i) {
+ request_ports.push_back(new PioRequestPort(csprintf(
+ "%s.request_ports%d", name(), i), this));
}
}
Port &
RubyPort::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "mem_master_port") {
- return memMasterPort;
- } else if (if_name == "pio_master_port") {
- return pioMasterPort;
- } else if (if_name == "mem_slave_port") {
- return memSlavePort;
- } else if (if_name == "pio_slave_port") {
- return pioSlavePort;
- } else if (if_name == "master") {
+ if (if_name == "mem_request_port") {
+ return memRequestPort;
+ } else if (if_name == "pio_request_port") {
+ return pioRequestPort;
+ } else if (if_name == "mem_response_port") {
+ return memResponsePort;
+ } else if (if_name == "pio_response_port") {
+ return pioResponsePort;
+ } else if (if_name == "request_ports") {
// used by the x86 CPUs to connect the interrupt PIO and interrupt
- // slave port
- if (idx >= static_cast<PortID>(master_ports.size())) {
- panic("RubyPort::getPort master: unknown index %d\n", idx);
+ // response port
+ if (idx >= static_cast<PortID>(request_ports.size())) {
+ panic("%s: unknown %s index (%d)\n", __func__, if_name, idx);
}
- return *master_ports[idx];
- } else if (if_name == "slave") {
+ return *request_ports[idx];
+ } else if (if_name == "response_ports") {
// used by the CPUs to connect the caches to the interconnect, and
- // for the x86 case also the interrupt master
- if (idx >= static_cast<PortID>(slave_ports.size())) {
- panic("RubyPort::getPort slave: unknown index %d\n", idx);
+ // for the x86 case also the interrupt request port
+ if (idx >= static_cast<PortID>(response_ports.size())) {
+ panic("%s: unknown %s index (%d)\n", __func__, if_name, idx);
}
- return *slave_ports[idx];
+ return *response_ports[idx];
}
// pass it along to our super class
return ClockedObject::getPort(if_name, idx);
}
-RubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
+RubyPort::PioRequestPort::PioRequestPort(const std::string &_name,
RubyPort *_port)
- : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
+ : QueuedRequestPort(_name, _port, reqQueue, snoopRespQueue),
reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
{
- DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
+ DPRINTF(RubyPort, "Created request pioport on sequencer %s\n", _name);
}
-RubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
+RubyPort::PioResponsePort::PioResponsePort(const std::string &_name,
RubyPort *_port)
- : QueuedSlavePort(_name, _port, queue), queue(*_port, *this)
+ : QueuedResponsePort(_name, _port, queue), queue(*_port, *this)
{
- DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name);
+ DPRINTF(RubyPort, "Created response pioport on sequencer %s\n", _name);
}
-RubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
+RubyPort::MemRequestPort::MemRequestPort(const std::string &_name,
RubyPort *_port)
- : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue),
+ : QueuedRequestPort(_name, _port, reqQueue, snoopRespQueue),
reqQueue(*_port, *this), snoopRespQueue(*_port, *this)
{
- DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
+ DPRINTF(RubyPort, "Created request memport on ruby sequencer %s\n", _name);
}
-RubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port,
+RubyPort::
+MemResponsePort::MemResponsePort(const std::string &_name, RubyPort *_port,
bool _access_backing_store, PortID id,
bool _no_retry_on_stall)
- : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
+ : QueuedResponsePort(_name, _port, queue, id), queue(*_port, *this),
access_backing_store(_access_backing_store),
no_retry_on_stall(_no_retry_on_stall)
{
- DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name);
+ DPRINTF(RubyPort, "Created response memport on ruby sequencer %s\n",
+ _name);
}
bool
-RubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt)
+RubyPort::PioRequestPort::recvTimingResp(PacketPtr pkt)
{
RubyPort *rp = static_cast<RubyPort *>(&owner);
DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
// send next cycle
- rp->pioSlavePort.schedTimingResp(
+ rp->pioResponsePort.schedTimingResp(
pkt, curTick() + rp->m_ruby_system->clockPeriod());
return true;
}
-bool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt)
+bool RubyPort::MemRequestPort::recvTimingResp(PacketPtr pkt)
{
// got a response from a device
assert(pkt->isResponse());
// First we must retrieve the request port from the sender State
RubyPort::SenderState *senderState =
safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
- MemSlavePort *port = senderState->port;
+ MemResponsePort *port = senderState->port;
assert(port != NULL);
delete senderState;
}
bool
-RubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
+RubyPort::PioResponsePort::recvTimingReq(PacketPtr pkt)
{
RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
- for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
- AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
+ for (size_t i = 0; i < ruby_port->request_ports.size(); ++i) {
+ AddrRangeList l = ruby_port->request_ports[i]->getAddrRanges();
for (auto it = l.begin(); it != l.end(); ++it) {
if (it->contains(pkt->getAddr())) {
// generally it is not safe to assume success here as
// the port could be blocked
bool M5_VAR_USED success =
- ruby_port->master_ports[i]->sendTimingReq(pkt);
+ ruby_port->request_ports[i]->sendTimingReq(pkt);
assert(success);
return true;
}
}
Tick
-RubyPort::PioSlavePort::recvAtomic(PacketPtr pkt)
+RubyPort::PioResponsePort::recvAtomic(PacketPtr pkt)
{
RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
// Only atomic_noncaching mode supported!
panic("Ruby supports atomic accesses only in noncaching mode\n");
}
- for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
- AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
+ for (size_t i = 0; i < ruby_port->request_ports.size(); ++i) {
+ AddrRangeList l = ruby_port->request_ports[i]->getAddrRanges();
for (auto it = l.begin(); it != l.end(); ++it) {
if (it->contains(pkt->getAddr())) {
- return ruby_port->master_ports[i]->sendAtomic(pkt);
+ return ruby_port->request_ports[i]->sendAtomic(pkt);
}
}
}
}
bool
-RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
+RubyPort::MemResponsePort::recvTimingReq(PacketPtr pkt)
{
DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
pkt->getAddr(), id);
if (pkt->cmd != MemCmd::MemSyncReq) {
if (!isPhysMemAddress(pkt)) {
assert(!pkt->req->isHTMCmd());
- assert(ruby_port->memMasterPort.isConnected());
+ assert(ruby_port->memRequestPort.isConnected());
DPRINTF(RubyPort, "Request address %#x assumed to be a "
"pio address\n", pkt->getAddr());
// send next cycle
RubySystem *rs = ruby_port->m_ruby_system;
- ruby_port->memMasterPort.schedTimingReq(pkt,
+ ruby_port->memRequestPort.schedTimingReq(pkt,
curTick() + rs->clockPeriod());
return true;
}
}
Tick
-RubyPort::MemSlavePort::recvAtomic(PacketPtr pkt)
+RubyPort::MemResponsePort::recvAtomic(PacketPtr pkt)
{
RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
// Only atomic_noncaching mode supported!
// pio port.
if (pkt->cmd != MemCmd::MemSyncReq) {
if (!isPhysMemAddress(pkt)) {
- assert(ruby_port->memMasterPort.isConnected());
+ assert(ruby_port->memRequestPort.isConnected());
DPRINTF(RubyPort, "Request address %#x assumed to be a "
"pio address\n", pkt->getAddr());
pkt->pushSenderState(new SenderState(this));
// send next cycle
- Tick req_ticks = ruby_port->memMasterPort.sendAtomic(pkt);
+ Tick req_ticks = ruby_port->memRequestPort.sendAtomic(pkt);
return ruby_port->ticksToCycles(req_ticks);
}
}
void
-RubyPort::MemSlavePort::addToRetryList()
+RubyPort::MemResponsePort::addToRetryList()
{
RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
//
- // Unless the requestor do not want retries (e.g., the Ruby tester),
+ // Unless the request port do not want retries (e.g., the Ruby tester),
// record the stalled M5 port for later retry when the sequencer
// becomes free.
//
}
void
-RubyPort::MemSlavePort::recvFunctional(PacketPtr pkt)
+RubyPort::MemResponsePort::recvFunctional(PacketPtr pkt)
{
DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
// pio port.
if (!isPhysMemAddress(pkt)) {
DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
- assert(rp->pioMasterPort.isConnected());
- rp->pioMasterPort.sendFunctional(pkt);
+ assert(rp->pioRequestPort.isConnected());
+ rp->pioRequestPort.sendFunctional(pkt);
return;
}
panic("Unsupported functional command %s\n", pkt->cmdString());
}
- // Unless the requester explicitly said otherwise, generate an error if
- // the functional request failed
+ // Unless the request port explicitly said otherwise, generate an error
+ // if the functional request failed
if (!accessSucceeded && !pkt->suppressFuncError()) {
fatal("Ruby functional %s failed for address %#x\n",
pkt->isWrite() ? "write" : "read", pkt->getAddr());
}
- // turn packet around to go back to requester if response expected
+ // turn packet around to go back to request port if response expected
if (needsResponse) {
// The pkt is already turned into a reponse if the directory
// forwarded the request to the memory controller (see
// First we must retrieve the request port from the sender State
RubyPort::SenderState *senderState =
safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
- MemSlavePort *port = senderState->port;
+ MemResponsePort *port = senderState->port;
assert(port != NULL);
delete senderState;
RubyPort::trySendRetries()
{
//
- // If we had to stall the MemSlavePorts, wake them up because the sequencer
- // likely has free resources now.
+ // If we had to stall the MemResponsePorts, wake them up because the
+ // sequencer likely has free resources now.
//
if (!retryList.empty()) {
// Record the current list of ports to retry on a temporary list
// an immediate retry, which may result in the ports being put back on
// the list. Therefore we want to clear the retryList before calling
// sendRetryReq.
- std::vector<MemSlavePort *> curRetryList(retryList);
+ std::vector<MemResponsePort *> curRetryList(retryList);
retryList.clear();
}
void
-RubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
+RubyPort::MemResponsePort::hitCallback(PacketPtr pkt)
{
bool needsResponse = pkt->needsResponse();
- // Unless specified at configuraiton, all responses except failed SC
+ // Unless specified at configuration, all responses except failed SC
// and Flush operations access M5 physical memory.
bool accessPhysMem = access_backing_store;
// We must check device memory first in case it overlaps with the
// system memory range.
if (ruby_port->system->isDeviceMemAddr(pkt)) {
- auto dmem = ruby_port->system->getDeviceMemory(pkt->masterId());
+ auto dmem = ruby_port->system->getDeviceMemory(pkt->requestorId());
dmem->access(pkt);
} else if (ruby_port->system->isMemAddr(pkt->getAddr())) {
rs->getPhysMem()->access(pkt);
pkt->makeResponse();
}
- // turn packet around to go back to requester if response expected
+ // turn packet around to go back to request port if response expected
if (needsResponse || pkt->isResponse()) {
DPRINTF(RubyPort, "Sending packet back over port\n");
// Send a response in the same cycle. There is no need to delay the
}
AddrRangeList
-RubyPort::PioSlavePort::getAddrRanges() const
+RubyPort::PioResponsePort::getAddrRanges() const
{
- // at the moment the assumption is that the master does not care
+ // at the moment the assumption is that the request port does not care
AddrRangeList ranges;
RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
- for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
+ for (size_t i = 0; i < ruby_port->request_ports.size(); ++i) {
ranges.splice(ranges.begin(),
- ruby_port->master_ports[i]->getAddrRanges());
+ ruby_port->request_ports[i]->getAddrRanges());
}
for (const auto M5_VAR_USED &r : ranges)
DPRINTF(RubyPort, "%s\n", r.to_string());
}
bool
-RubyPort::MemSlavePort::isPhysMemAddress(PacketPtr pkt) const
+RubyPort::MemResponsePort::isPhysMemAddress(PacketPtr pkt) const
{
RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
return ruby_port->system->isMemAddr(pkt->getAddr())
DPRINTF(RubyPort, "Sending invalidations.\n");
// Allocate the invalidate request and packet on the stack, as it is
// assumed they will not be modified or deleted by receivers.
- // TODO: should this really be using funcMasterId?
+ // TODO: should this really be using funcRequestorId?
auto request = std::make_shared<Request>(
address, RubySystem::getBlockSizeBytes(), 0,
- Request::funcMasterId);
+ Request::funcRequestorId);
// Use a single packet to signal all snooping ports of the invalidation.
// This assumes that snooping ports do NOT modify the packet/request
Packet pkt(request, MemCmd::InvalidateReq);
- for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
- // check if the connected master port is snooping
+ for (CpuPortIter p = response_ports.begin(); p != response_ports.end();
+ ++p) {
+ // check if the connected request port is snooping
if ((*p)->isSnooping()) {
// send as a snoop request
(*p)->sendTimingSnoopReq(&pkt);
}
void
-RubyPort::PioMasterPort::recvRangeChange()
+RubyPort::PioRequestPort::recvRangeChange()
{
RubyPort &r = static_cast<RubyPort &>(owner);
r.gotAddrRanges--;
if (r.gotAddrRanges == 0 && FullSystem) {
- r.pioSlavePort.sendRangeChange();
+ r.pioResponsePort.sendRangeChange();
}
}
RubyPort::functionalWrite(Packet *func_pkt)
{
int num_written = 0;
- for (auto port : slave_ports) {
+ for (auto port : response_ports) {
if (port->trySatisfyFunctional(func_pkt)) {
num_written += 1;
}
class RubyPort : public ClockedObject
{
public:
- class MemMasterPort : public QueuedMasterPort
+ class MemRequestPort : public QueuedRequestPort
{
private:
ReqPacketQueue reqQueue;
SnoopRespPacketQueue snoopRespQueue;
public:
- MemMasterPort(const std::string &_name, RubyPort *_port);
+ MemRequestPort(const std::string &_name, RubyPort *_port);
protected:
bool recvTimingResp(PacketPtr pkt);
void recvRangeChange() {}
};
- class MemSlavePort : public QueuedSlavePort
+ class MemResponsePort : public QueuedResponsePort
{
private:
RespPacketQueue queue;
bool no_retry_on_stall;
public:
- MemSlavePort(const std::string &_name, RubyPort *_port,
+ MemResponsePort(const std::string &_name, RubyPort *_port,
bool _access_backing_store,
PortID id, bool _no_retry_on_stall);
void hitCallback(PacketPtr pkt);
bool isPhysMemAddress(PacketPtr pkt) const;
};
- class PioMasterPort : public QueuedMasterPort
+ class PioRequestPort : public QueuedRequestPort
{
private:
ReqPacketQueue reqQueue;
SnoopRespPacketQueue snoopRespQueue;
public:
- PioMasterPort(const std::string &_name, RubyPort *_port);
+ PioRequestPort(const std::string &_name, RubyPort *_port);
protected:
bool recvTimingResp(PacketPtr pkt);
void recvRangeChange();
};
- class PioSlavePort : public QueuedSlavePort
+ class PioResponsePort : public QueuedResponsePort
{
private:
RespPacketQueue queue;
public:
- PioSlavePort(const std::string &_name, RubyPort *_port);
+ PioResponsePort(const std::string &_name, RubyPort *_port);
protected:
bool recvTimingReq(PacketPtr pkt);
Tick recvAtomic(PacketPtr pkt);
void recvFunctional(PacketPtr pkt)
- { panic("recvFunctional should never be called on pio slave port!"); }
+ { panic("recvFunctional should never be called on pio response "
+ "port!"); }
AddrRangeList getAddrRanges() const;
};
struct SenderState : public Packet::SenderState
{
- MemSlavePort *port;
- SenderState(MemSlavePort * _port) : port(_port)
+ MemResponsePort *port;
+ SenderState(MemResponsePort * _port) : port(_port)
{}
};
* Called by the PIO port when receiving a timing response.
*
* @param pkt Response packet
- * @param master_port_id Port id of the PIO port
+ * @param request_port_id Port id of the PIO port
*
* @return Whether successfully sent
*/
- bool recvTimingResp(PacketPtr pkt, PortID master_port_id);
+ bool recvTimingResp(PacketPtr pkt, PortID request_port_id);
RubySystem *m_ruby_system;
uint32_t m_version;
bool m_usingRubyTester;
System* system;
- std::vector<MemSlavePort *> slave_ports;
+ std::vector<MemResponsePort *> response_ports;
private:
- bool onRetryList(MemSlavePort * port)
+ bool onRetryList(MemResponsePort * port)
{
return (std::find(retryList.begin(), retryList.end(), port) !=
retryList.end());
}
- void addToRetryList(MemSlavePort * port)
+ void addToRetryList(MemResponsePort * port)
{
if (onRetryList(port)) return;
retryList.push_back(port);
}
- PioMasterPort pioMasterPort;
- PioSlavePort pioSlavePort;
- MemMasterPort memMasterPort;
- MemSlavePort memSlavePort;
+ PioRequestPort pioRequestPort;
+ PioResponsePort pioResponsePort;
+ MemRequestPort memRequestPort;
+ MemResponsePort memResponsePort;
unsigned int gotAddrRanges;
/** Vector of M5 Ports attached to this Ruby port. */
- typedef std::vector<MemSlavePort *>::iterator CpuPortIter;
- std::vector<PioMasterPort *> master_ports;
+ typedef std::vector<MemResponsePort *>::iterator CpuPortIter;
+ std::vector<PioRequestPort *> request_ports;
//
// Based on similar code in the M5 bus. Stores pointers to those ports
// that should be called when the Sequencer becomes available after a stall.
//
- std::vector<MemSlavePort *> retryList;
+ std::vector<MemResponsePort *> retryList;
bool m_isCPUSequencer;
};
machineToNetwork.insert(std::make_pair(mach_id, network_id));
}
-// This registers all master IDs in the system for functional reads. This
-// should be called in init() since master IDs are obtained in a SimObject's
+// This registers all requestor IDs in the system for functional reads. This
+// should be called in init() since requestor IDs are obtained in a SimObject's
// constructor and there are functional reads/writes between init() and
// startup().
void
-RubySystem::registerMasterIDs()
+RubySystem::registerRequestorIDs()
{
- // Create the map for MasterID to network node. This is done in init()
- // because all MasterIDs must be obtained in the constructor and
+ // Create the map for RequestorID to network node. This is done in init()
+ // because all RequestorIDs must be obtained in the constructor and
// AbstractControllers are registered in their constructor. This is done
// in two steps: (1) Add all of the AbstractControllers. Since we don't
- // have a mapping of MasterID to MachineID this is the easiest way to
- // filter out AbstractControllers from non-Ruby masters. (2) Go through
- // the system's list of MasterIDs and add missing MasterIDs to network 0
- // (the default).
+ // have a mapping of RequestorID to MachineID this is the easiest way to
+ // filter out AbstractControllers from non-Ruby requestors. (2) Go through
+ // the system's list of RequestorIDs and add missing RequestorIDs to
+ // network 0 (the default).
for (auto& cntrl : m_abs_cntrl_vec) {
- MasterID mid = cntrl->getMasterId();
+ RequestorID id = cntrl->getRequestorId();
MachineID mach_id = cntrl->getMachineID();
// These are setup in Network constructor and should exist
MachineIDToString(mach_id).c_str());
auto network_id = machineToNetwork[mach_id];
- masterToNetwork.insert(std::make_pair(mid, network_id));
+ requestorToNetwork.insert(std::make_pair(id, network_id));
// Create helper vectors for each network to iterate over.
netCntrls[network_id].push_back(cntrl);
}
- // Default all other master IDs to network 0
- for (auto mid = 0; mid < params()->system->maxMasters(); ++mid) {
- if (!masterToNetwork.count(mid)) {
- masterToNetwork.insert(std::make_pair(mid, 0));
+ // Default all other requestor IDs to network 0
+ for (auto id = 0; id < params()->system->maxRequestors(); ++id) {
+ if (!requestorToNetwork.count(id)) {
+ requestorToNetwork.insert(std::make_pair(id, 0));
}
}
}
void
RubySystem::init()
{
- registerMasterIDs();
+ registerRequestorIDs();
}
void
unsigned int num_invalid = 0;
// Only send functional requests within the same network.
- assert(masterToNetwork.count(pkt->masterId()));
- int master_net_id = masterToNetwork[pkt->masterId()];
- assert(netCntrls.count(master_net_id));
+ assert(requestorToNetwork.count(pkt->requestorId()));
+ int request_net_id = requestorToNetwork[pkt->requestorId()];
+ assert(netCntrls.count(request_net_id));
AbstractController *ctrl_ro = nullptr;
AbstractController *ctrl_rw = nullptr;
// In this loop we count the number of controllers that have the given
// address in read only, read write and busy states.
- for (auto& cntrl : netCntrls[master_net_id]) {
+ for (auto& cntrl : netCntrls[request_net_id]) {
access_perm = cntrl-> getAccessPermission(line_address);
if (access_perm == AccessPermission_Read_Only){
num_ro++;
// The reason is because the Backing_Store memory could easily be stale, if
// there are copies floating around the cache hierarchy, so you want to read
// it only if it's not in the cache hierarchy at all.
- int num_controllers = netCntrls[master_net_id].size();
+ int num_controllers = netCntrls[request_net_id].size();
if (num_invalid == (num_controllers - 1) && num_backing_store == 1) {
DPRINTF(RubySystem, "only copy in Backing_Store memory, read from it\n");
ctrl_backing_store->functionalRead(line_address, pkt);
DPRINTF(RubySystem, "Controllers functionalRead lookup "
"(num_maybe_stale=%d, num_busy = %d)\n",
num_maybe_stale, num_busy);
- for (auto& cntrl : netCntrls[master_net_id]) {
+ for (auto& cntrl : netCntrls[request_net_id]) {
if (cntrl->functionalReadBuffers(pkt))
return true;
}
uint32_t M5_VAR_USED num_functional_writes = 0;
// Only send functional requests within the same network.
- assert(masterToNetwork.count(pkt->masterId()));
- int master_net_id = masterToNetwork[pkt->masterId()];
- assert(netCntrls.count(master_net_id));
+ assert(requestorToNetwork.count(pkt->requestorId()));
+ int request_net_id = requestorToNetwork[pkt->requestorId()];
+ assert(netCntrls.count(request_net_id));
- for (auto& cntrl : netCntrls[master_net_id]) {
+ for (auto& cntrl : netCntrls[request_net_id]) {
num_functional_writes += cntrl->functionalWriteBuffers(pkt);
access_perm = cntrl->getAccessPermission(line_addr);
void registerNetwork(Network*);
void registerAbstractController(AbstractController*);
void registerMachineID(const MachineID& mach_id, Network* network);
- void registerMasterIDs();
+ void registerRequestorIDs();
bool eventQueueEmpty() { return eventq->empty(); }
void enqueueRubyEvent(Tick tick)
Cycles m_start_cycle;
std::unordered_map<MachineID, unsigned> machineToNetwork;
- std::unordered_map<MasterID, unsigned> masterToNetwork;
+ std::unordered_map<RequestorID, unsigned> requestorToNetwork;
std::unordered_map<unsigned, std::vector<AbstractController*>> netCntrls;
public:
cxx_header = "mem/ruby/system/RubyPort.hh"
version = Param.Int(0, "")
- slave = VectorSlavePort("CPU slave port")
- master = VectorMasterPort("CPU master port")
- pio_master_port = RequestPort("Ruby mem master port")
- mem_master_port = RequestPort("Ruby mem master port")
- pio_slave_port = ResponsePort("Ruby pio slave port")
- mem_slave_port = ResponsePort("Ruby memory port")
+ response_ports = VectorResponsePort("CPU response port")
+ slave = DeprecatedParam(response_ports,
+ '`slave` is now called `response_ports`')
+ request_ports = VectorRequestPort("CPU request port")
+ master = DeprecatedParam(request_ports,
+ '`master` is now called `request_ports`')
+ pio_request_port = RequestPort("Ruby pio request port")
+ pio_master_port = DeprecatedParam(pio_request_port,
+ '`pio_master_port` is now called `pio_request_port`')
+ mem_request_port = RequestPort("Ruby mem request port")
+ mem_master_port = DeprecatedParam(mem_request_port,
+ '`mem_master_port` is now called `mem_request_port`')
+ pio_response_port = ResponsePort("Ruby pio response port")
+ pio_slave_port = DeprecatedParam(pio_response_port,
+ '`pio_slave_port` is now called `pio_response_port`')
using_ruby_tester = Param.Bool(False, "")
no_retry_on_stall = Param.Bool(False, "")
RubyPort::SenderState *ss =
safe_cast<RubyPort::SenderState *>
(writeCompletePkt->senderState);
- MemSlavePort *port = ss->port;
+ MemResponsePort *port = ss->port;
assert(port != NULL);
writeCompletePkt->senderState = ss->predecessor;
#include "debug/SerialLink.hh"
#include "params/SerialLink.hh"
-SerialLink::SerialLinkSlavePort::SerialLinkSlavePort(const std::string& _name,
+SerialLink::SerialLinkResponsePort::
+SerialLinkResponsePort(const std::string& _name,
SerialLink& _serial_link,
- SerialLinkMasterPort& _masterPort,
+ SerialLinkRequestPort& _mem_side_port,
Cycles _delay, int _resp_limit,
const std::vector<AddrRange>&
_ranges)
: ResponsePort(_name, &_serial_link), serial_link(_serial_link),
- masterPort(_masterPort), delay(_delay),
+ mem_side_port(_mem_side_port), delay(_delay),
ranges(_ranges.begin(), _ranges.end()),
outstandingResponses(0), retryReq(false),
respQueueLimit(_resp_limit),
{
}
-SerialLink::SerialLinkMasterPort::SerialLinkMasterPort(const std::string&
+SerialLink::SerialLinkRequestPort::SerialLinkRequestPort(const std::string&
_name, SerialLink& _serial_link,
- SerialLinkSlavePort& _slavePort,
- Cycles _delay, int _req_limit)
+ SerialLinkResponsePort&
+ _cpu_side_port, Cycles _delay,
+ int _req_limit)
: RequestPort(_name, &_serial_link), serial_link(_serial_link),
- slavePort(_slavePort), delay(_delay), reqQueueLimit(_req_limit),
+ cpu_side_port(_cpu_side_port), delay(_delay), reqQueueLimit(_req_limit),
sendEvent([this]{ trySendTiming(); }, _name)
{
}
SerialLink::SerialLink(SerialLinkParams *p)
: ClockedObject(p),
- slavePort(p->name + ".slave", *this, masterPort,
+ cpu_side_port(p->name + ".cpu_side_port", *this, mem_side_port,
ticksToCycles(p->delay), p->resp_size, p->ranges),
- masterPort(p->name + ".master", *this, slavePort,
+ mem_side_port(p->name + ".mem_side_port", *this, cpu_side_port,
ticksToCycles(p->delay), p->req_size),
num_lanes(p->num_lanes),
link_speed(p->link_speed)
Port&
SerialLink::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "master")
- return masterPort;
- else if (if_name == "slave")
- return slavePort;
+ if (if_name == "mem_side_port")
+ return mem_side_port;
+ else if (if_name == "cpu_side_port")
+ return cpu_side_port;
else
// pass it along to our super class
return ClockedObject::getPort(if_name, idx);
SerialLink::init()
{
// make sure both sides are connected and have the same block size
- if (!slavePort.isConnected() || !masterPort.isConnected())
+ if (!cpu_side_port.isConnected() || !mem_side_port.isConnected())
fatal("Both ports of a serial_link must be connected.\n");
- // notify the master side of our address ranges
- slavePort.sendRangeChange();
+ // notify the request side of our address ranges
+ cpu_side_port.sendRangeChange();
}
bool
-SerialLink::SerialLinkSlavePort::respQueueFull() const
+SerialLink::SerialLinkResponsePort::respQueueFull() const
{
return outstandingResponses == respQueueLimit;
}
bool
-SerialLink::SerialLinkMasterPort::reqQueueFull() const
+SerialLink::SerialLinkRequestPort::reqQueueFull() const
{
return transmitList.size() == reqQueueLimit;
}
bool
-SerialLink::SerialLinkMasterPort::recvTimingResp(PacketPtr pkt)
+SerialLink::SerialLinkRequestPort::recvTimingResp(PacketPtr pkt)
{
- // all checks are done when the request is accepted on the slave
+ // all checks are done when the request is accepted on the response
// side, so we are guaranteed to have space for the response
DPRINTF(SerialLink, "recvTimingResp: %s addr 0x%x\n",
pkt->cmdString(), pkt->getAddr());
// one crosses this link faster than the first one (because the packet
// waits in the link based on its size). This can reorder the received
// response.
- slavePort.schedTimingResp(pkt, t);
+ cpu_side_port.schedTimingResp(pkt, t);
return true;
}
bool
-SerialLink::SerialLinkSlavePort::recvTimingReq(PacketPtr pkt)
+SerialLink::SerialLinkResponsePort::recvTimingReq(PacketPtr pkt)
{
DPRINTF(SerialLink, "recvTimingReq: %s addr 0x%x\n",
pkt->cmdString(), pkt->getAddr());
transmitList.size(), outstandingResponses);
// if the request queue is full then there is no hope
- if (masterPort.reqQueueFull()) {
+ if (mem_side_port.reqQueueFull()) {
DPRINTF(SerialLink, "Request queue full\n");
retryReq = true;
} else if ( !retryReq ) {
// that the second one crosses this link faster than the first one
// (because the packet waits in the link based on its size).
// This can reorder the received response.
- masterPort.schedTimingReq(pkt, t);
+ mem_side_port.schedTimingReq(pkt, t);
}
}
// remember that we are now stalling a packet and that we have to
- // tell the sending master to retry once space becomes available,
+ // tell the sending requestor to retry once space becomes available,
// we make no distinction whether the stalling is due to the
// request queue or response queue being full
return !retryReq;
}
void
-SerialLink::SerialLinkSlavePort::retryStalledReq()
+SerialLink::SerialLinkResponsePort::retryStalledReq()
{
if (retryReq) {
DPRINTF(SerialLink, "Request waiting for retry, now retrying\n");
}
void
-SerialLink::SerialLinkMasterPort::schedTimingReq(PacketPtr pkt, Tick when)
+SerialLink::SerialLinkRequestPort::schedTimingReq(PacketPtr pkt, Tick when)
{
// If we're about to put this packet at the head of the queue, we
// need to schedule an event to do the transmit. Otherwise there
void
-SerialLink::SerialLinkSlavePort::schedTimingResp(PacketPtr pkt, Tick when)
+SerialLink::SerialLinkResponsePort::schedTimingResp(PacketPtr pkt, Tick when)
{
// If we're about to put this packet at the head of the queue, we
// need to schedule an event to do the transmit. Otherwise there
}
void
-SerialLink::SerialLinkMasterPort::trySendTiming()
+SerialLink::SerialLinkRequestPort::trySendTiming()
{
assert(!transmitList.empty());
// then send a retry at this point, also note that if the
// request we stalled was waiting for the response queue
// rather than the request queue we might stall it again
- slavePort.retryStalledReq();
+ cpu_side_port.retryStalledReq();
}
// if the send failed, then we try again once we receive a retry,
}
void
-SerialLink::SerialLinkSlavePort::trySendTiming()
+SerialLink::SerialLinkResponsePort::trySendTiming()
{
assert(!transmitList.empty());
// if there is space in the request queue and we were stalling
// a request, it will definitely be possible to accept it now
// since there is guaranteed space in the response queue
- if (!masterPort.reqQueueFull() && retryReq) {
+ if (!mem_side_port.reqQueueFull() && retryReq) {
DPRINTF(SerialLink, "Request waiting for retry, now retrying\n");
retryReq = false;
sendRetryReq();
}
void
-SerialLink::SerialLinkMasterPort::recvReqRetry()
+SerialLink::SerialLinkRequestPort::recvReqRetry()
{
trySendTiming();
}
void
-SerialLink::SerialLinkSlavePort::recvRespRetry()
+SerialLink::SerialLinkResponsePort::recvRespRetry()
{
trySendTiming();
}
Tick
-SerialLink::SerialLinkSlavePort::recvAtomic(PacketPtr pkt)
+SerialLink::SerialLinkResponsePort::recvAtomic(PacketPtr pkt)
{
- return delay * serial_link.clockPeriod() + masterPort.sendAtomic(pkt);
+ return delay * serial_link.clockPeriod() + mem_side_port.sendAtomic(pkt);
}
void
-SerialLink::SerialLinkSlavePort::recvFunctional(PacketPtr pkt)
+SerialLink::SerialLinkResponsePort::recvFunctional(PacketPtr pkt)
{
pkt->pushLabel(name());
}
}
- // also check the master port's request queue
- if (masterPort.trySatisfyFunctional(pkt)) {
+ // also check the memory-side port's request queue
+ if (mem_side_port.trySatisfyFunctional(pkt)) {
return;
}
pkt->popLabel();
// fall through if pkt still not satisfied
- masterPort.sendFunctional(pkt);
+ mem_side_port.sendFunctional(pkt);
}
bool
-SerialLink::SerialLinkMasterPort::trySatisfyFunctional(PacketPtr pkt)
+SerialLink::SerialLinkRequestPort::trySatisfyFunctional(PacketPtr pkt)
{
bool found = false;
auto i = transmitList.begin();
}
AddrRangeList
-SerialLink::SerialLinkSlavePort::getAddrRanges() const
+SerialLink::SerialLinkResponsePort::getAddrRanges() const
{
return ranges;
}
{ }
};
- // Forward declaration to allow the slave port to have a pointer
- class SerialLinkMasterPort;
+ // Forward declaration to allow the CPU-side port to have a pointer
+ class SerialLinkRequestPort;
/**
* The port on the side that receives requests and sends
- * responses. The slave port has a set of address ranges that it
- * is responsible for. The slave port also has a buffer for the
+ * responses. The CPU-side port has a set of address ranges that it
+ * is responsible for. The CPU-side port also has a buffer for the
* responses not yet sent.
*/
- class SerialLinkSlavePort : public ResponsePort
+ class SerialLinkResponsePort : public ResponsePort
{
private:
SerialLink& serial_link;
/**
- * Master port on the other side of the serial_link.
+ * Request port on the other side of the serial_link.
*/
- SerialLinkMasterPort& masterPort;
+ SerialLinkRequestPort& mem_side_port;
/** Minimum request delay though this serial_link. */
const Cycles delay;
public:
/**
- * Constructor for the SerialLinkSlavePort.
+ * Constructor for the SerialLinkResponsePort.
*
* @param _name the port name including the owner
* @param _serial_link the structural owner
- * @param _masterPort the master port on the other side of the
+ * @param _mem_side_port the memory-side port on the other side of the
* serial_link
* @param _delay the delay in cycles from receiving to sending
* @param _resp_limit the size of the response queue
* @param _ranges a number of address ranges to forward
*/
- SerialLinkSlavePort(const std::string& _name, SerialLink&
- _serial_link, SerialLinkMasterPort& _masterPort,
+ SerialLinkResponsePort(const std::string& _name, SerialLink&
+ _serial_link, SerialLinkRequestPort& _mem_side_port,
Cycles _delay, int _resp_limit, const
std::vector<AddrRange>& _ranges);
/**
* Port on the side that forwards requests and receives
- * responses. The master port has a buffer for the requests not
+ * responses. The memory-side port has a buffer for the requests not
* yet sent.
*/
- class SerialLinkMasterPort : public RequestPort
+ class SerialLinkRequestPort : public RequestPort
{
private:
SerialLink& serial_link;
/**
- * The slave port on the other side of the serial_link.
+ * The response (CPU-side port) port on the other side of
+ * the serial_link.
*/
- SerialLinkSlavePort& slavePort;
+ SerialLinkResponsePort& cpu_side_port;
/** Minimum delay though this serial_link. */
const Cycles delay;
public:
/**
- * Constructor for the SerialLinkMasterPort.
+ * Constructor for the SerialLinkRequestPort.
*
* @param _name the port name including the owner
* @param _serial_link the structural owner
- * @param _slavePort the slave port on the other side of the
- * serial_link
+ * @param _cpu_side_port the CPU-side port on the other
+ * side of the serial_link
* @param _delay the delay in cycles from receiving to sending
* @param _req_limit the size of the request queue
*/
- SerialLinkMasterPort(const std::string& _name, SerialLink&
- _serial_link, SerialLinkSlavePort& _slavePort, Cycles
- _delay, int _req_limit);
+ SerialLinkRequestPort(const std::string& _name, SerialLink&
+ _serial_link, SerialLinkResponsePort& _cpu_side_port,
+ Cycles _delay, int _req_limit);
/**
* Is this side blocked from accepting new request packets.
void recvReqRetry();
};
- /** Slave port of the serial_link. */
- SerialLinkSlavePort slavePort;
+ /** Response port of the serial_link. */
+ SerialLinkResponsePort cpu_side_port;
- /** Master port of the serial_link. */
- SerialLinkMasterPort masterPort;
+ /** Request port of the serial_link. */
+ SerialLinkRequestPort mem_side_port;
/** Number of parallel lanes in this serial link */
unsigned num_lanes;
// queue if there is one
bool needsResponse = pkt->needsResponse();
recvAtomic(pkt);
- // turn packet around to go back to requester if response expected
+ // turn packet around to go back to requestor if response expected
if (needsResponse) {
// recvAtomic() should already have turned packet into
// atomic response
SimpleMemory::MemoryPort::MemoryPort(const std::string& _name,
SimpleMemory& _memory)
- : SlavePort(_name, &_memory), memory(_memory)
+ : ResponsePort(_name, &_memory), memory(_memory)
{ }
AddrRangeList
{ }
};
- class MemoryPort : public SlavePort
+ class MemoryPort : public ResponsePort
{
private:
SimpleMemory& memory;
}
std::pair<SnoopFilter::SnoopList, Cycles>
-SnoopFilter::lookupRequest(const Packet* cpkt, const ResponsePort& slave_port)
+SnoopFilter::lookupRequest(const Packet* cpkt, const ResponsePort&
+ cpu_side_port)
{
DPRINTF(SnoopFilter, "%s: src %s packet %s\n", __func__,
- slave_port.name(), cpkt->print());
+ cpu_side_port.name(), cpkt->print());
// check if the packet came from a cache
- bool allocate = !cpkt->req->isUncacheable() && slave_port.isSnooping() &&
- cpkt->fromCache();
+ bool allocate = !cpkt->req->isUncacheable() && cpu_side_port.isSnooping()
+ && cpkt->fromCache();
Addr line_addr = cpkt->getBlockAddr(linesize);
if (cpkt->isSecure()) {
line_addr |= LineSecure;
}
- SnoopMask req_port = portToMask(slave_port);
+ SnoopMask req_port = portToMask(cpu_side_port);
reqLookupResult.it = cachedLocations.find(line_addr);
bool is_hit = (reqLookupResult.it != cachedLocations.end());
} else { // if (!cpkt->needsResponse())
assert(cpkt->isEviction());
// make sure that the sender actually had the line
- panic_if((sf_item.holder & req_port).none(), "requester %x is not a " \
+ panic_if((sf_item.holder & req_port).none(), "requestor %x is not a " \
"holder :( SF value %x.%x\n", req_port,
sf_item.requested, sf_item.holder);
// CleanEvicts and Writebacks -> the sender and all caches above
}
void
-SnoopFilter::updateResponse(const Packet* cpkt, const ResponsePort& slave_port)
+SnoopFilter::updateResponse(const Packet* cpkt, const ResponsePort&
+ cpu_side_port)
{
DPRINTF(SnoopFilter, "%s: src %s packet %s\n",
- __func__, slave_port.name(), cpkt->print());
+ __func__, cpu_side_port.name(), cpkt->print());
assert(cpkt->isResponse());
// we only allocate if the packet actually came from a cache, but
// start by checking if the port is snooping
- if (cpkt->req->isUncacheable() || !slave_port.isSnooping())
+ if (cpkt->req->isUncacheable() || !cpu_side_port.isSnooping())
return;
// next check if we actually allocated an entry
if (sf_it == cachedLocations.end())
return;
- SnoopMask slave_mask = portToMask(slave_port);
+ SnoopMask response_mask = portToMask(cpu_side_port);
SnoopItem& sf_item = sf_it->second;
DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n",
__func__, sf_item.requested, sf_item.holder);
// Make sure we have seen the actual request, too
- panic_if((sf_item.requested & slave_mask).none(),
+ panic_if((sf_item.requested & response_mask).none(),
"SF value %x.%x missing request bit\n",
sf_item.requested, sf_item.holder);
- sf_item.requested &= ~slave_mask;
+ sf_item.requested &= ~response_mask;
// Update the residency of the cache line.
if (cpkt->req->isCacheMaintenance()) {
// A cache clean response does not carry any data so it
// shouldn't change the holders, unless it is invalidating.
if (cpkt->isInvalidate()) {
- sf_item.holder &= ~slave_mask;
+ sf_item.holder &= ~response_mask;
}
eraseIfNullEntry(sf_it);
} else {
// Any other response implies that a cache above will have the
// block.
- sf_item.holder |= slave_mask;
+ sf_item.holder |= response_mask;
assert((sf_item.holder | sf_item.requested).any());
}
DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n",
// Change for systems with more than 256 ports tracked by this object
static const int SNOOP_MASK_SIZE = 256;
- typedef std::vector<QueuedSlavePort*> SnoopList;
+ typedef std::vector<QueuedResponsePort*> SnoopList;
SnoopFilter (const SnoopFilterParams *p) :
SimObject(p), reqLookupResult(cachedLocations.end()),
}
/**
- * Init a new snoop filter and tell it about all the slave ports
+ * Init a new snoop filter and tell it about all the cpu_sideports
* of the enclosing bus.
*
- * @param slave_ports Slave ports that the bus is attached to.
+ * @param _cpu_side_ports Response ports that the bus is attached to.
*/
- void setSlavePorts(const SnoopList& slave_ports) {
- localSlavePortIds.resize(slave_ports.size(), InvalidPortID);
+ void setCPUSidePorts(const SnoopList& _cpu_side_ports) {
+ localResponsePortIds.resize(_cpu_side_ports.size(), InvalidPortID);
PortID id = 0;
- for (const auto& p : slave_ports) {
+ for (const auto& p : _cpu_side_ports) {
// no need to track this port if it is not snooping
if (p->isSnooping()) {
- slavePorts.push_back(p);
- localSlavePortIds[p->getId()] = id++;
+ cpuSidePorts.push_back(p);
+ localResponsePortIds[p->getId()] = id++;
}
}
}
/**
- * Lookup a request (from a slave port) in the snoop filter and
- * return a list of other slave ports that need forwarding of the
+ * Lookup a request (from a CPU-side port) in the snoop filter and
+ * return a list of other CPU-side ports that need forwarding of the
* resulting snoops. Additionally, update the tracking structures
* with new request information. Note that the caller must also
* call finishRequest once it is known if the request needs to
* retry or not.
*
- * @param cpkt Pointer to the request packet. Not changed.
- * @param slave_port Slave port where the request came from.
+ * @param cpkt Pointer to the request packet. Not changed.
+ * @param cpu_side_port Response port where the request came from.
* @return Pair of a vector of snoop target ports and lookup latency.
*/
std::pair<SnoopList, Cycles> lookupRequest(const Packet* cpkt,
- const ResponsePort& slave_port);
+ const ResponsePort& cpu_side_port);
/**
* For an un-successful request, revert the change to the snoop
void finishRequest(bool will_retry, Addr addr, bool is_secure);
/**
- * Handle an incoming snoop from below (the master port). These
+ * Handle an incoming snoop from below (the memory-side port). These
* can upgrade the tracking logic and may also benefit from
* additional steering thanks to the snoop filter.
*
* other cache, or memory) and update the tracking information in
* the snoop filter.
*
- * @param cpkt Pointer to const Packet holding the snoop response.
- * @param slave_port ResponsePort that made the original request and
- * is the target of this response.
+ * @param cpkt Pointer to const Packet holding the snoop response.
+ * @param cpu_side_port ResponsePort that made the original request and
+ * is the target of this response.
*/
- void updateResponse(const Packet *cpkt, const ResponsePort& slave_port);
+ void updateResponse(const Packet *cpkt, const ResponsePort& cpu_side_port);
virtual void regStats();
*/
std::pair<SnoopList, Cycles> snoopAll(Cycles latency) const
{
- return std::make_pair(slavePorts, latency);
+ return std::make_pair(cpuSidePorts, latency);
}
- std::pair<SnoopList, Cycles> snoopSelected(const SnoopList& slave_ports,
- Cycles latency) const
+ std::pair<SnoopList, Cycles> snoopSelected(const SnoopList&
+ _cpu_side_ports, Cycles latency) const
{
- return std::make_pair(slave_ports, latency);
+ return std::make_pair(_cpu_side_ports, latency);
}
std::pair<SnoopList, Cycles> snoopDown(Cycles latency) const
{
private:
/**
- * Removes snoop filter items which have no requesters and no holders.
+ * Removes snoop filter items which have no requestors and no holders.
*/
void eraseIfNullEntry(SnoopFilterCache::iterator& sf_it);
ReqLookupResult() = delete;
} reqLookupResult;
- /** List of all attached snooping slave ports. */
- SnoopList slavePorts;
+ /** List of all attached snooping CPU-side ports. */
+ SnoopList cpuSidePorts;
/** Track the mapping from port ids to the local mask ids. */
- std::vector<PortID> localSlavePortIds;
+ std::vector<PortID> localResponsePortIds;
/** Cache line size. */
const unsigned linesize;
/** Latency for doing a lookup in the filter */
assert(port.getId() != InvalidPortID);
// if this is not a snooping port, return a zero mask
return !port.isSnooping() ? 0 :
- ((SnoopMask)1) << localSlavePortIds[port.getId()];
+ ((SnoopMask)1) << localResponsePortIds[port.getId()];
}
inline SnoopFilter::SnoopList
SnoopFilter::maskToPortList(SnoopMask port_mask) const
{
SnoopList res;
- for (const auto& p : slavePorts)
+ for (const auto& p : cpuSidePorts)
if ((port_mask & portToMask(*p)).any())
res.push_back(p);
return res;
#include "debug/TokenPort.hh"
void
-TokenMasterPort::bind(Port &peer)
+TokenRequestPort::bind(Port &peer)
{
RequestPort::bind(peer);
}
void
-TokenMasterPort::recvTokens(int num_tokens)
+TokenRequestPort::recvTokens(int num_tokens)
{
panic_if(!tokenManager, "TokenManager not set for %s.\n", name());
}
bool
-TokenMasterPort::haveTokens(int num_tokens)
+TokenRequestPort::haveTokens(int num_tokens)
{
panic_if(!tokenManager, "TokenManager not set for %s.\n", name());
}
void
-TokenMasterPort::acquireTokens(int num_tokens)
+TokenRequestPort::acquireTokens(int num_tokens)
{
panic_if(!tokenManager, "TokenManager not set for %s.\n", name());
}
void
-TokenMasterPort::setTokenManager(TokenManager *_tokenManager)
+TokenRequestPort::setTokenManager(TokenManager *_tokenManager)
{
tokenManager = _tokenManager;
}
void
-TokenSlavePort::sendTokens(int num_tokens)
+TokenResponsePort::sendTokens(int num_tokens)
{
- fatal_if(!tokenMasterPort, "Tried sendTokens to non-token master!\n");
+ fatal_if(!tokenRequestPort, "Tried sendTokens to non-token requestor!\n");
- // Send tokens to a master
- tokenMasterPort->recvTokens(num_tokens);
+ // Send tokens to a requestor
+ tokenRequestPort->recvTokens(num_tokens);
}
void
-TokenSlavePort::bind(Port& peer)
+TokenResponsePort::bind(Port& peer)
{
- // TokenSlavePort is allowed to bind to either TokenMasterPort or a
- // RequestPort as fallback. If the type is a RequestPort, tokenMasterPort
+ // TokenResponsePort is allowed to bind to either TokenRequestPort or a
+ // RequestPort as fallback. If the type is a RequestPort, tokenRequestPort
// is set to nullptr to indicate tokens should not be exchanged.
- auto *token_master_port = dynamic_cast<TokenMasterPort*>(&peer);
- auto *master_port = dynamic_cast<RequestPort*>(&peer);
- if (!token_master_port && !master_port) {
- fatal("Attempt to bind port %s to unsupported slave port %s.",
+ auto *token_request_port = dynamic_cast<TokenRequestPort*>(&peer);
+ auto *request_port = dynamic_cast<RequestPort*>(&peer);
+ if (!token_request_port && !request_port) {
+ fatal("Attempt to bind port %s to unsupported response port %s.",
name(), peer.name());
- } else if (token_master_port) {
- // slave port keeps track of the master port
- tokenMasterPort = token_master_port;
-
- // master port also keeps track of slave port
- tokenMasterPort->bind(*this);
- } else if (master_port) {
- tokenMasterPort = nullptr;
+ } else if (token_request_port) {
+ // response port keeps track of the request port
+ tokenRequestPort = token_request_port;
+
+ // request port also keeps track of response port
+ tokenRequestPort->bind(*this);
+ } else if (request_port) {
+ tokenRequestPort = nullptr;
}
}
void
-TokenSlavePort::unbind()
+TokenResponsePort::unbind()
{
ResponsePort::responderUnbind();
- tokenMasterPort = nullptr;
+ tokenRequestPort = nullptr;
}
void
-TokenSlavePort::recvRespRetry()
+TokenResponsePort::recvRespRetry()
{
- // fallback to QueuedSlavePort-like impl for now
+ // fallback to QueuedResponsePort-like impl for now
panic_if(respQueue.empty(),
"Attempted to retry a response when no retry was queued!\n");
}
bool
-TokenSlavePort::sendTimingResp(PacketPtr pkt)
+TokenResponsePort::sendTimingResp(PacketPtr pkt)
{
bool success = ResponsePort::sendTimingResp(pkt);
#include "sim/clocked_object.hh"
class TokenManager;
-class TokenSlavePort;
+class TokenResponsePort;
-class TokenMasterPort : public RequestPort
+class TokenRequestPort : public RequestPort
{
private:
/* Manager to track tokens between this token port pair. */
TokenManager *tokenManager;
public:
- TokenMasterPort(const std::string& name, SimObject* owner,
+ TokenRequestPort(const std::string& name, SimObject* owner,
PortID id = InvalidPortID) :
RequestPort(name, owner, id), tokenManager(nullptr)
{ }
/**
- * Bind this master port to slave port. Called by the slave port in
+ * Bind this request port to response port. Called by the response port in
* this token implementation.
*/
void bind(Port &peer) override;
/**
- * Unbind port. Handled by slave port in token implementation.
+ * Unbind port. Handled by response port in token implementation.
*/
void unbind() override {}
/**
- * Receive tokens returned by the slave port. This increments the number
+ * Receive tokens returned by the response port. This increments the number
* or available tokens across the port.
*/
void recvTokens(int num_tokens);
/**
* Specify a token manger, which will handle tracking of tokens for a
- * TokenMasterPort/SlaveMasterPort pair.
+ * TokenRequestPort/ResponseRequestPort pair.
*/
void setTokenManager(TokenManager *_tokenManager);
};
-class TokenSlavePort : public ResponsePort
+class TokenResponsePort : public ResponsePort
{
private:
- TokenMasterPort *tokenMasterPort;
+ TokenRequestPort *tokenRequestPort;
std::deque<PacketPtr> respQueue;
void recvRespRetry() override;
public:
- TokenSlavePort(const std::string& name, ClockedObject *owner,
+ TokenResponsePort(const std::string& name, ClockedObject *owner,
PortID id = InvalidPortID) :
- ResponsePort(name, owner, id), tokenMasterPort(nullptr)
+ ResponsePort(name, owner, id), tokenRequestPort(nullptr)
{ }
- ~TokenSlavePort() { }
+ ~TokenResponsePort() { }
/**
- * Bind this slave port to a master port. This also does the mirror
- * action and bainds the master port to the slave port as well as
+ * Bind this response port to a request port. This also does the mirror
+ * action and binds the request port to the response port as well as
* binding the base class types.
*/
void bind(Port &peer) override;
/**
- * Unbind this slave port and associated master port.
+ * Unbind this response port and associated request port.
*/
void unbind() override;
/**
- * Return num_tokens tokens back to the master port.
+ * Return num_tokens tokens back to the request port.
*/
void sendTokens(int num_tokens);
SimpleTimingPort::SimpleTimingPort(const std::string& _name,
SimObject* _owner) :
- QueuedSlavePort(_name, _owner, queueImpl), queueImpl(*_owner, *this)
+ QueuedResponsePort(_name, _owner, queueImpl), queueImpl(*_owner, *this)
{
}
bool needsResponse = pkt->needsResponse();
Tick latency = recvAtomic(pkt);
- // turn packet around to go back to requester if response expected
+ // turn packet around to go back to requestor if response expected
if (needsResponse) {
// recvAtomic() should already have turned packet into
// atomic response
/**
* The simple timing port uses a queued port to implement
* recvFunctional and recvTimingReq through recvAtomic. It is always a
- * slave port.
+ * response port.
*/
-class SimpleTimingPort : public QueuedSlavePort
+class SimpleTimingPort : public QueuedResponsePort
{
private:
/**
* The packet queue used to store outgoing responses. Note that
* the queue is made private and that we avoid overloading the
- * name used in the QueuedSlavePort. Access is provided through
+ * name used in the QueuedResponsePort. Access is provided through
* the queue reference in the base class.
*/
RespPacketQueue queueImpl;
gen.next())
{
auto req = std::make_shared<Request>(
- gen.addr(), gen.size(), flags, Request::funcMasterId, 0,
+ gen.addr(), gen.size(), flags, Request::funcRequestorId, 0,
_tc->contextId());
if (!tryTLBs(req, BaseTLB::Read))
gen.next())
{
auto req = std::make_shared<Request>(
- gen.addr(), gen.size(), flags, Request::funcMasterId, 0,
+ gen.addr(), gen.size(), flags, Request::funcRequestorId, 0,
_tc->contextId());
if (!tryTLBs(req, BaseTLB::Write))
gen.next())
{
auto req = std::make_shared<Request>(
- gen.addr(), gen.size(), flags, Request::funcMasterId, 0,
+ gen.addr(), gen.size(), flags, Request::funcRequestorId, 0,
_tc->contextId());
if (!tryTLBs(req, BaseTLB::Write))
headerLatency(p->header_latency),
width(p->width),
gotAddrRanges(p->port_default_connection_count +
- p->port_master_connection_count, false),
+ p->port_mem_side_ports_connection_count, false),
gotAllAddrRanges(false), defaultPortID(InvalidPortID),
useDefaultRange(p->use_default_range),
transDist(this, "trans_dist", "Transaction distribution"),
pktCount(this, "pkt_count",
- "Packet count per connected master and slave (bytes)"),
- pktSize(this, "pkt_size",
- "Cumulative packet size per connected master and slave (bytes)")
+ "Packet count per connected requestor and responder (bytes)"),
+ pktSize(this, "pkt_size", "Cumulative packet size per connected "
+ "requestor and responder (bytes)")
{
}
BaseXBar::~BaseXBar()
{
- for (auto m: masterPorts)
- delete m;
+ for (auto port: memSidePorts)
+ delete port;
- for (auto s: slavePorts)
- delete s;
+ for (auto port: cpuSidePorts)
+ delete port;
}
Port &
BaseXBar::getPort(const std::string &if_name, PortID idx)
{
- if (if_name == "master" && idx < masterPorts.size()) {
- // the master port index translates directly to the vector position
- return *masterPorts[idx];
+ if (if_name == "mem_side_ports" && idx < memSidePorts.size()) {
+ // the memory-side ports index translates directly to the vector
+ // position
+ return *memSidePorts[idx];
} else if (if_name == "default") {
- return *masterPorts[defaultPortID];
- } else if (if_name == "slave" && idx < slavePorts.size()) {
- // the slave port index translates directly to the vector position
- return *slavePorts[idx];
+ return *memSidePorts[defaultPortID];
+ } else if (if_name == "cpu_side_ports" && idx < cpuSidePorts.size()) {
+ // the CPU-side ports index translates directly to the vector position
+ return *cpuSidePorts[idx];
} else {
return ClockedObject::getPort(if_name, idx);
}
{
// if we are in the retry state, we will not see anything but the
// retrying port (or in the case of the snoop ports the snoop
- // response port that mirrors the actual slave port) as we leave
+ // response port that mirrors the actual CPU-side port) as we leave
// this state again in zero time if the peer does not immediately
// call the layer when receiving the retry
BaseXBar::findPort(AddrRange addr_range)
{
// we should never see any address lookups before we've got the
- // ranges of all connected slave modules
+ // ranges of all connected CPU-side-port modules
assert(gotAllAddrRanges);
// Check the address map interval tree
/** Function called by the port when the crossbar is receiving a range change.*/
void
-BaseXBar::recvRangeChange(PortID master_port_id)
+BaseXBar::recvRangeChange(PortID mem_side_port_id)
{
- DPRINTF(AddrRanges, "Received range change from slave port %s\n",
- masterPorts[master_port_id]->getPeer());
+ DPRINTF(AddrRanges, "Received range change from cpu_side_ports %s\n",
+ memSidePorts[mem_side_port_id]->getPeer());
- // remember that we got a range from this master port and thus the
- // connected slave module
- gotAddrRanges[master_port_id] = true;
+ // remember that we got a range from this memory-side port and thus the
+ // connected CPU-side-port module
+ gotAddrRanges[mem_side_port_id] = true;
// update the global flag
if (!gotAllAddrRanges) {
gotAllAddrRanges &= *r++;
}
if (gotAllAddrRanges)
- DPRINTF(AddrRanges, "Got address ranges from all slaves\n");
+ DPRINTF(AddrRanges, "Got address ranges from all responders\n");
}
// note that we could get the range from the default port at any
// point in time, and we cannot assume that the default range is
// set before the other ones are, so we do additional checks once
// all ranges are provided
- if (master_port_id == defaultPortID) {
+ if (mem_side_port_id == defaultPortID) {
// only update if we are indeed checking ranges for the
// default port since the port might not have a valid range
// otherwise
if (useDefaultRange) {
- AddrRangeList ranges = masterPorts[master_port_id]->getAddrRanges();
+ AddrRangeList ranges = memSidePorts[mem_side_port_id]->
+ getAddrRanges();
if (ranges.size() != 1)
fatal("Crossbar %s may only have a single default range",
} else {
// the ports are allowed to update their address ranges
// dynamically, so remove any existing entries
- if (gotAddrRanges[master_port_id]) {
+ if (gotAddrRanges[mem_side_port_id]) {
for (auto p = portMap.begin(); p != portMap.end(); ) {
- if (p->second == master_port_id)
+ if (p->second == mem_side_port_id)
// erasing invalidates the iterator, so advance it
// before the deletion takes place
portMap.erase(p++);
}
}
- AddrRangeList ranges = masterPorts[master_port_id]->getAddrRanges();
+ AddrRangeList ranges = memSidePorts[mem_side_port_id]->
+ getAddrRanges();
for (const auto& r: ranges) {
DPRINTF(AddrRanges, "Adding range %s for id %d\n",
- r.to_string(), master_port_id);
- if (portMap.insert(r, master_port_id) == portMap.end()) {
+ r.to_string(), mem_side_port_id);
+ if (portMap.insert(r, mem_side_port_id) == portMap.end()) {
PortID conflict_id = portMap.intersects(r)->second;
fatal("%s has two ports responding within range "
"%s:\n\t%s\n\t%s\n",
name(),
r.to_string(),
- masterPorts[master_port_id]->getPeer(),
- masterPorts[conflict_id]->getPeer());
+ memSidePorts[mem_side_port_id]->getPeer(),
+ memSidePorts[conflict_id]->getPeer());
}
}
}
- // if we have received ranges from all our neighbouring slave
- // modules, go ahead and tell our connected master modules in
+ // if we have received ranges from all our neighbouring CPU-side-port
+ // modules, go ahead and tell our connected memory-side-port modules in
// turn, this effectively assumes a tree structure of the system
if (gotAllAddrRanges) {
DPRINTF(AddrRanges, "Aggregating address ranges\n");
}
}
- // tell all our neighbouring master ports that our address
+ // tell all our neighbouring memory-side ports that our address
// ranges have changed
- for (const auto& s: slavePorts)
- s->sendRangeChange();
+ for (const auto& port: cpuSidePorts)
+ port->sendRangeChange();
}
}
assert(gotAllAddrRanges);
// at the moment, this never happens, as there are no cycles in
- // the range queries and no devices on the master side of a crossbar
+ // the range queries and no devices on the memory side of a crossbar
// (CPU, cache, bridge etc) actually care about the ranges of the
// ports they are connected to
}
pktCount
- .init(slavePorts.size(), masterPorts.size())
+ .init(cpuSidePorts.size(), memSidePorts.size())
.flags(total | nozero | nonan);
pktSize
- .init(slavePorts.size(), masterPorts.size())
+ .init(cpuSidePorts.size(), memSidePorts.size())
.flags(total | nozero | nonan);
// both the packet count and total size are two-dimensional
- // vectors, indexed by slave port id and master port id, thus the
- // neighbouring master and slave, they do not differentiate what
- // came from the master and was forwarded to the slave (requests
- // and snoop responses) and what came from the slave and was
- // forwarded to the master (responses and snoop requests)
- for (int i = 0; i < slavePorts.size(); i++) {
- pktCount.subname(i, slavePorts[i]->getPeer().name());
- pktSize.subname(i, slavePorts[i]->getPeer().name());
- for (int j = 0; j < masterPorts.size(); j++) {
- pktCount.ysubname(j, masterPorts[j]->getPeer().name());
- pktSize.ysubname(j, masterPorts[j]->getPeer().name());
+ // vectors, indexed by CPU-side port id and memory-side port id, thus the
+ // neighbouring memory-side ports and CPU-side ports, they do not
+ // differentiate what came from the memory-side ports and was forwarded to
+ // the CPU-side ports (requests and snoop responses) and what came from
+ // the CPU-side ports and was forwarded to the memory-side ports (responses
+ // and snoop requests)
+ for (int i = 0; i < cpuSidePorts.size(); i++) {
+ pktCount.subname(i, cpuSidePorts[i]->getPeer().name());
+ pktSize.subname(i, cpuSidePorts[i]->getPeer().name());
+ for (int j = 0; j < memSidePorts.size(); j++) {
+ pktCount.ysubname(j, memSidePorts[j]->getPeer().name());
+ pktSize.ysubname(j, memSidePorts[j]->getPeer().name());
}
}
}
* PCIe, etc.
*
* The template parameter, PortClass, indicates the destination
- * port type for the layer. The retry list holds either master
- * ports or slave ports, depending on the direction of the
- * layer. Thus, a request layer has a retry list containing slave
- * ports, whereas a response layer holds master ports.
+ * port type for the layer. The retry list holds either memory-side ports
+ * or CPU-side ports, depending on the direction of the
+ * layer. Thus, a request layer has a retry list containing
+ * CPU-side ports, whereas a response layer holds memory-side ports.
*/
template <typename SrcType, typename DstType>
class Layer : public Drainable, public Stats::Group
* Function called by the port when the crossbar is recieving a
* range change.
*
- * @param master_port_id id of the port that received the change
+ * @param mem_side_port_id id of the port that received the change
*/
- virtual void recvRangeChange(PortID master_port_id);
+ virtual void recvRangeChange(PortID mem_side_port_id);
/**
* Find which port connected to this crossbar (if any) should be
void calcPacketTiming(PacketPtr pkt, Tick header_delay);
/**
- * Remember for each of the master ports of the crossbar if we got
- * an address range from the connected slave. For convenience,
- * also keep track of if we got ranges from all the slave modules
+ * Remember for each of the memory-side ports of the crossbar if we got
+ * an address range from the connected CPU-side ports. For convenience,
+ * also keep track of if we got ranges from all the CPU-side-port modules
* or not.
*/
std::vector<bool> gotAddrRanges;
bool gotAllAddrRanges;
- /** The master and slave ports of the crossbar */
- std::vector<QueuedSlavePort*> slavePorts;
- std::vector<RequestPort*> masterPorts;
+ /** The memory-side ports and CPU-side ports of the crossbar */
+ std::vector<QueuedResponsePort*> cpuSidePorts;
+ std::vector<RequestPort*> memSidePorts;
/** Port that handles requests that don't match any of the interfaces.*/
PortID defaultPortID;
* crossbar. The transaction distribution is globally counting
* different types of commands. The packet count and total packet
* size are two-dimensional vectors that are indexed by the
- * slave port and master port id (thus the neighbouring master and
- * neighbouring slave), summing up both directions (request and
- * response).
+ * CPU-side port and memory-side port id (thus the neighbouring memory-side
+ * ports and neighbouring CPU-side ports), summing up both directions
+ * (request and response).
*/
Stats::Vector transDist;
Stats::Vector2d pktCount;
for port in simobj._ports.values():
is_vector = isinstance(port, m5.params.VectorPort)
- is_master = port.role == 'MASTER'
+ is_requestor = port.role == 'GEM5 REQUESTOR'
code('ports["%s"] = new PortDesc("%s", %s, %s);' %
(port.name, port.name, cxx_bool(is_vector),
- cxx_bool(is_master)))
+ cxx_bool(is_requestor)))
code.dedent()
code('}')
def cxx_decl(self, code):
code('unsigned int port_${{self.name}}_connection_count;')
-Port.compat('GEM5 REQUESTER', 'GEM5 RESPONDER')
+Port.compat('GEM5 REQUESTOR', 'GEM5 RESPONDER')
class RequestPort(Port):
# RequestPort("description")
def __init__(self, desc):
super(RequestPort, self).__init__(
- 'GEM5 REQUESTER', desc, is_source=True)
+ 'GEM5 REQUESTOR', desc, is_source=True)
class ResponsePort(Port):
# ResponsePort("description")
# VectorRequestPort("description")
def __init__(self, desc):
super(VectorRequestPort, self).__init__(
- 'GEM5 REQUESTER', desc, is_source=True)
+ 'GEM5 REQUESTOR', desc, is_source=True)
class VectorResponsePort(VectorPort):
# VectorResponsePort("description")
# view. The output generated by do_dot() is a DOT-based figure (as a
# pdf and an editable svg file) and its source dot code. Nodes are
# components, and edges represent the memory hierarchy: the edges are
-# directed, from a master to slave. Initially all nodes are
+# directed, from a requestor to responder. Initially all nodes are
# generated, and then all edges are added. do_dot should be called
# with the top-most SimObject (namely root but not necessarily), the
# output folder and the output dot source filename. From the given
/* Is this a vector or singleton parameters/SimObject */
const bool isVector;
- /** Is this a master or slave port */
- const bool isMaster;
+ /** Is this a request or response port */
+ const bool isRequestor;
PortDesc(const std::string &name_,
- bool isVector_, bool isMaster_) :
- name(name_), isVector(isVector_), isMaster(isMaster_)
+ bool isVector_, bool isRequestor_) :
+ name(name_), isVector(isVector_), isRequestor(isRequestor_)
{ }
};
void
CxxConfigManager::bindPort(
- SimObject *master_object, const std::string &master_port_name,
- PortID master_port_index,
- SimObject *slave_object, const std::string &slave_port_name,
- PortID slave_port_index)
+ SimObject *requestor_object, const std::string &request_port_name,
+ PortID request_port_index,
+ SimObject *responder_object, const std::string &response_port_name,
+ PortID response_port_index)
{
- /* FIXME, check slave_port_index against connection_count
+ /* FIXME, check response_port_index against connection_count
* defined for port, need getPortConnectionCount and a
* getCxxConfigDirectoryEntry for each object. */
/* It would be nice to be able to catch the errors from these calls. */
- Port &master_port = master_object->getPort(
- master_port_name, master_port_index);
- Port &slave_port = slave_object->getPort(
- slave_port_name, slave_port_index);
-
- if (master_port.isConnected()) {
- throw Exception(master_object->name(), csprintf(
- "Master port: %s[%d] is already connected\n", master_port_name,
- master_port_index));
+ Port &request_port = requestor_object->getPort(
+ request_port_name, request_port_index);
+ Port &response_port = responder_object->getPort(
+ response_port_name, response_port_index);
+
+ if (request_port.isConnected()) {
+ throw Exception(requestor_object->name(), csprintf(
+ "Request port: %s[%d] is already connected\n", request_port_name,
+ request_port_index));
}
- if (slave_port.isConnected()) {
- throw Exception(slave_object->name(), csprintf(
- "Slave port: %s[%d] is already connected\n", slave_port_name,
- slave_port_index));
+ if (response_port.isConnected()) {
+ throw Exception(responder_object->name(), csprintf(
+ "Response port: %s[%d] is already connected\n", response_port_name,
+ response_port_index));
}
DPRINTF(CxxConfig, "Binding port %s.%s[%d]"
" to %s:%s[%d]\n",
- master_object->name(), master_port_name, master_port_index,
- slave_object->name(), slave_port_name, slave_port_index);
+ requestor_object->name(), request_port_name, request_port_index,
+ responder_object->name(), response_port_name, response_port_index);
- master_port.bind(slave_port);
+ request_port.bind(response_port);
}
void
-CxxConfigManager::bindMasterPort(SimObject *object,
+CxxConfigManager::bindRequestPort(SimObject *object,
const CxxConfigDirectoryEntry::PortDesc &port,
const std::vector<std::string> &peers)
{
- unsigned int master_port_index = 0;
+ unsigned int request_port_index = 0;
for (auto peer_i = peers.begin(); peer_i != peers.end();
++peer_i)
{
const std::string &peer = *peer_i;
- std::string slave_object_name;
- std::string slave_port_name;
- unsigned int slave_port_index;
+ std::string response_object_name;
+ std::string response_port_name;
+ unsigned int response_port_index;
- parsePort(peer, slave_object_name, slave_port_name,
- slave_port_index);
+ parsePort(peer, response_object_name, response_port_name,
+ response_port_index);
- std::string slave_instance_name = rename(slave_object_name);
+ std::string response_instance_name = rename(response_object_name);
- if (objectsByName.find(slave_instance_name) == objectsByName.end()) {
+ if (objectsByName.find(response_instance_name)
+ == objectsByName.end()) {
throw Exception(object->name(), csprintf(
- "Can't find slave port object: %s", slave_instance_name));
+ "Can't find response port object: %s",
+ response_instance_name));
}
- SimObject *slave_object = objectsByName[slave_instance_name];
+ SimObject *responder_object = objectsByName[response_instance_name];
- bindPort(object, port.name, master_port_index,
- slave_object, slave_port_name, slave_port_index);
+ bindPort(object, port.name, request_port_index,
+ responder_object, response_port_name, response_port_index);
- master_port_index++;
+ request_port_index++;
}
}
/* Only handle master ports as binding only needs to happen once
* for each observed pair of ports */
- if (port->isMaster) {
+ if (port->isRequestor) {
if (!port->isVector && peers.size() > 1) {
throw Exception(instance_name, csprintf(
"Too many connections to non-vector port %s (%d)\n",
port->name, peers.size()));
}
- bindMasterPort(object, *port, peers);
+ bindRequestPort(object, *port, peers);
}
}
}
std::list<Renaming> renamings;
/** Bind a single connection between two objects' ports */
- void bindPort(SimObject *masterObject, const std::string &masterPort,
- PortID masterPortIndex, SimObject *slaveObject,
- const std::string &slavePort, PortID slavePortIndex);
+ void bindPort(SimObject *requestorObject, const std::string &requestPort,
+ PortID requestPortIndex, SimObject *responderObject,
+ const std::string &responsePort, PortID responsePortIndex);
- /** Bind a single (possibly vectored) master port to peers from the
+ /** Bind a single (possibly vectored) request port to peers from the
* unparsed list peers with elements in the .ini connection format:
* path(.path)*.port[index] */
- void bindMasterPort(SimObject *object,
+ void bindRequestPort(SimObject *object,
const CxxConfigDirectoryEntry::PortDesc &port,
const std::vector<std::string> &peers);
uint32_t size;
Request::FlagsType flags;
Addr pc;
- MasterID master;
+ RequestorID id;
explicit PacketInfo(const PacketPtr& pkt) :
cmd(pkt->cmd),
size(pkt->getSize()),
flags(pkt->req->getFlags()),
pc(pkt->req->hasPC() ? pkt->req->getPC() : 0),
- master(pkt->req->masterId()) { }
+ id(pkt->req->requestorId()) { }
};
/**
_cacheLineSize == 64 || _cacheLineSize == 128))
warn_once("Cache line size is neither 16, 32, 64 nor 128 bytes.\n");
- // Get the generic system master IDs
- MasterID tmp_id M5_VAR_USED;
- tmp_id = getMasterId(this, "writebacks");
- assert(tmp_id == Request::wbMasterId);
- tmp_id = getMasterId(this, "functional");
- assert(tmp_id == Request::funcMasterId);
- tmp_id = getMasterId(this, "interrupt");
- assert(tmp_id == Request::intMasterId);
+ // Get the generic system requestor IDs
+ RequestorID tmp_id M5_VAR_USED;
+ tmp_id = getRequestorId(this, "writebacks");
+ assert(tmp_id == Request::wbRequestorId);
+ tmp_id = getRequestorId(this, "functional");
+ assert(tmp_id == Request::funcRequestorId);
+ tmp_id = getRequestorId(this, "interrupt");
+ assert(tmp_id == Request::intRequestorId);
// increment the number of running systems
numSystemsRunning++;
}
void
-System::addDeviceMemory(MasterID masterId, AbstractMemory *deviceMemory)
+System::addDeviceMemory(RequestorID requestor_id, AbstractMemory *deviceMemory)
{
- if (!deviceMemMap.count(masterId)) {
- deviceMemMap.insert(std::make_pair(masterId, deviceMemory));
+ if (!deviceMemMap.count(requestor_id)) {
+ deviceMemMap.insert(std::make_pair(requestor_id, deviceMemory));
}
}
bool
System::isDeviceMemAddr(PacketPtr pkt) const
{
- const MasterID& mid = pkt->masterId();
+ const RequestorID& id = pkt->requestorId();
- return (deviceMemMap.count(mid) &&
- deviceMemMap.at(mid)->getAddrRange().contains(pkt->getAddr()));
+ return (deviceMemMap.count(id) &&
+ deviceMemMap.at(id)->getAddrRange().contains(pkt->getAddr()));
}
AbstractMemory *
-System::getDeviceMemory(MasterID mid) const
+System::getDeviceMemory(RequestorID id) const
{
- panic_if(!deviceMemMap.count(mid),
- "No device memory found for MasterID %d\n", mid);
- return deviceMemMap.at(mid);
+ panic_if(!deviceMemMap.count(id),
+ "No device memory found for RequestorID %d\n", id);
+ return deviceMemMap.at(id);
}
void
}
std::string
-System::stripSystemName(const std::string& master_name) const
+System::stripSystemName(const std::string& requestor_name) const
{
- if (startswith(master_name, name())) {
- return master_name.substr(name().size());
+ if (startswith(requestor_name, name())) {
+ return requestor_name.substr(name().size());
} else {
- return master_name;
+ return requestor_name;
}
}
-MasterID
-System::lookupMasterId(const SimObject* obj) const
+RequestorID
+System::lookupRequestorId(const SimObject* obj) const
{
- MasterID id = Request::invldMasterId;
+ RequestorID id = Request::invldRequestorId;
// number of occurrences of the SimObject pointer
- // in the master list.
+ // in the requestor list.
auto obj_number = 0;
- for (int i = 0; i < masters.size(); i++) {
- if (masters[i].obj == obj) {
+ for (int i = 0; i < requestors.size(); i++) {
+ if (requestors[i].obj == obj) {
id = i;
obj_number++;
}
}
fatal_if(obj_number > 1,
- "Cannot lookup MasterID by SimObject pointer: "
- "More than one master is sharing the same SimObject\n");
+ "Cannot lookup RequestorID by SimObject pointer: "
+ "More than one requestor is sharing the same SimObject\n");
return id;
}
-MasterID
-System::lookupMasterId(const std::string& master_name) const
+RequestorID
+System::lookupRequestorId(const std::string& requestor_name) const
{
- std::string name = stripSystemName(master_name);
+ std::string name = stripSystemName(requestor_name);
- for (int i = 0; i < masters.size(); i++) {
- if (masters[i].masterName == name) {
+ for (int i = 0; i < requestors.size(); i++) {
+ if (requestors[i].req_name == name) {
return i;
}
}
- return Request::invldMasterId;
+ return Request::invldRequestorId;
}
-MasterID
-System::getGlobalMasterId(const std::string& master_name)
+RequestorID
+System::getGlobalRequestorId(const std::string& requestor_name)
{
- return _getMasterId(nullptr, master_name);
+ return _getRequestorId(nullptr, requestor_name);
}
-MasterID
-System::getMasterId(const SimObject* master, std::string submaster)
+RequestorID
+System::getRequestorId(const SimObject* requestor, std::string subrequestor)
{
- auto master_name = leafMasterName(master, submaster);
- return _getMasterId(master, master_name);
+ auto requestor_name = leafRequestorName(requestor, subrequestor);
+ return _getRequestorId(requestor, requestor_name);
}
-MasterID
-System::_getMasterId(const SimObject* master, const std::string& master_name)
+RequestorID
+System::_getRequestorId(const SimObject* requestor,
+ const std::string& requestor_name)
{
- std::string name = stripSystemName(master_name);
+ std::string name = stripSystemName(requestor_name);
// CPUs in switch_cpus ask for ids again after switching
- for (int i = 0; i < masters.size(); i++) {
- if (masters[i].masterName == name) {
+ for (int i = 0; i < requestors.size(); i++) {
+ if (requestors[i].req_name == name) {
return i;
}
}
// they will be too small
if (Stats::enabled()) {
- fatal("Can't request a masterId after regStats(). "
+ fatal("Can't request a requestorId after regStats(). "
"You must do so in init().\n");
}
- // Generate a new MasterID incrementally
- MasterID master_id = masters.size();
+ // Generate a new RequestorID incrementally
+ RequestorID requestor_id = requestors.size();
- // Append the new Master metadata to the group of system Masters.
- masters.emplace_back(master, name, master_id);
+ // Append the new Requestor metadata to the group of system Requestors.
+ requestors.emplace_back(requestor, name, requestor_id);
- return masters.back().masterId;
+ return requestors.back().id;
}
std::string
-System::leafMasterName(const SimObject* master, const std::string& submaster)
+System::leafRequestorName(const SimObject* requestor,
+ const std::string& subrequestor)
{
- if (submaster.empty()) {
- return master->name();
+ if (subrequestor.empty()) {
+ return requestor->name();
} else {
- // Get the full master name by appending the submaster name to
- // the root SimObject master name
- return master->name() + "." + submaster;
+ // Get the full requestor name by appending the subrequestor name to
+ // the root SimObject requestor name
+ return requestor->name() + "." + subrequestor;
}
}
std::string
-System::getMasterName(MasterID master_id)
+System::getRequestorName(RequestorID requestor_id)
{
- if (master_id >= masters.size())
- fatal("Invalid master_id passed to getMasterName()\n");
+ if (requestor_id >= requestors.size())
+ fatal("Invalid requestor_id passed to getRequestorName()\n");
- const auto& master_info = masters[master_id];
- return master_info.masterName;
+ const auto& requestor_info = requestors[requestor_id];
+ return requestor_info.req_name;
}
System *
#include "cpu/base.hh"
#include "cpu/pc_event.hh"
#include "enums/MemoryMode.hh"
-#include "mem/mem_master.hh"
+#include "mem/mem_requestor.hh"
#include "mem/physical.hh"
#include "mem/port.hh"
#include "mem/port_proxy.hh"
/**
* Private class for the system port which is only used as a
- * master for debug access and for non-structural entities that do
+ * requestor for debug access and for non-structural entities that do
* not have a port of their own.
*/
class SystemPort : public RequestPort
SystemPort _systemPort;
// Map of memory address ranges for devices with their own backing stores
- std::unordered_map<MasterID, AbstractMemory *> deviceMemMap;
+ std::unordered_map<RequestorID, AbstractMemory *> deviceMemMap;
public:
/**
* Add a physical memory range for a device. The ranges added here will
- * be considered a non-PIO memory address if the masterId of the packet
+ * be considered a non-PIO memory address if the requestorId of the packet
* and range match something in the device memory map.
*/
- void addDeviceMemory(MasterID masterID, AbstractMemory *deviceMemory);
+ void addDeviceMemory(RequestorID requestorId,
+ AbstractMemory *deviceMemory);
/**
* Similar to isMemAddr but for devices. Checks if a physical address
* of the packet match an address range of a device corresponding to the
- * MasterId of the request.
+ * RequestorId of the request.
*/
bool isDeviceMemAddr(PacketPtr pkt) const;
/**
* Return a pointer to the device memory.
*/
- AbstractMemory *getDeviceMemory(MasterID masterID) const;
+ AbstractMemory *getDeviceMemory(RequestorID _id) const;
/**
* Get the architecture.
uint32_t numWorkIds;
/** This array is a per-system list of all devices capable of issuing a
- * memory system request and an associated string for each master id.
- * It's used to uniquely id any master in the system by name for things
+ * memory system request and an associated string for each requestor id.
+ * It's used to uniquely id any requestor in the system by name for things
* like cache statistics.
*/
- std::vector<MasterInfo> masters;
+ std::vector<RequestorInfo> requestors;
ThermalModel * thermalModel;
protected:
/**
- * Strips off the system name from a master name
+ * Strips off the system name from a requestor name
*/
- std::string stripSystemName(const std::string& master_name) const;
+ std::string stripSystemName(const std::string& requestor_name) const;
public:
/**
* Request an id used to create a request object in the system. All objects
* that intend to issues requests into the memory system must request an id
- * in the init() phase of startup. All master ids must be fixed by the
+ * in the init() phase of startup. All requestor ids must be fixed by the
* regStats() phase that immediately precedes it. This allows objects in
- * the memory system to understand how many masters may exist and
- * appropriately name the bins of their per-master stats before the stats
- * are finalized.
+ * the memory system to understand how many requestors may exist and
+ * appropriately name the bins of their per-requestor stats before the
+ * stats are finalized.
*
- * Registers a MasterID:
+ * Registers a RequestorID:
* This method takes two parameters, one of which is optional.
- * The first one is the master object, and it is compulsory; in case
- * a object has multiple (sub)masters, a second parameter must be
- * provided and it contains the name of the submaster. The method will
- * create a master's name by concatenating the SimObject name with the
- * eventual submaster string, separated by a dot.
+ * The first one is the requestor object, and it is compulsory; in case
+ * a object has multiple (sub)requestors, a second parameter must be
+ * provided and it contains the name of the subrequestor. The method will
+ * create a requestor's name by concatenating the SimObject name with the
+ * eventual subrequestor string, separated by a dot.
*
* As an example:
- * For a cpu having two masters: a data master and an instruction master,
+ * For a cpu having two requestors: a data requestor and an
+ * instruction requestor,
* the method must be called twice:
*
- * instMasterId = getMasterId(cpu, "inst");
- * dataMasterId = getMasterId(cpu, "data");
+ * instRequestorId = getRequestorId(cpu, "inst");
+ * dataRequestorId = getRequestorId(cpu, "data");
*
- * and the masters' names will be:
+ * and the requestors' names will be:
* - "cpu.inst"
* - "cpu.data"
*
- * @param master SimObject related to the master
- * @param submaster String containing the submaster's name
- * @return the master's ID.
+ * @param requestor SimObject related to the requestor
+ * @param subrequestor String containing the subrequestor's name
+ * @return the requestor's ID.
*/
- MasterID getMasterId(const SimObject* master,
- std::string submaster = std::string());
+ RequestorID getRequestorId(const SimObject* requestor,
+ std::string subrequestor = std::string());
/**
- * Registers a GLOBAL MasterID, which is a MasterID not related
+ * Registers a GLOBAL RequestorID, which is a RequestorID not related
* to any particular SimObject; since no SimObject is passed,
- * the master gets registered by providing the full master name.
+ * the requestor gets registered by providing the full requestor name.
*
- * @param masterName full name of the master
- * @return the master's ID.
+ * @param requestorName full name of the requestor
+ * @return the requestor's ID.
*/
- MasterID getGlobalMasterId(const std::string& master_name);
+ RequestorID getGlobalRequestorId(const std::string& requestor_name);
/**
* Get the name of an object for a given request id.
*/
- std::string getMasterName(MasterID master_id);
+ std::string getRequestorName(RequestorID requestor_id);
/**
- * Looks up the MasterID for a given SimObject
- * returns an invalid MasterID (invldMasterId) if not found.
+ * Looks up the RequestorID for a given SimObject
+ * returns an invalid RequestorID (invldRequestorId) if not found.
*/
- MasterID lookupMasterId(const SimObject* obj) const;
+ RequestorID lookupRequestorId(const SimObject* obj) const;
/**
- * Looks up the MasterID for a given object name string
- * returns an invalid MasterID (invldMasterId) if not found.
+ * Looks up the RequestorID for a given object name string
+ * returns an invalid RequestorID (invldRequestorId) if not found.
*/
- MasterID lookupMasterId(const std::string& name) const;
+ RequestorID lookupRequestorId(const std::string& name) const;
- /** Get the number of masters registered in the system */
- MasterID maxMasters() { return masters.size(); }
+ /** Get the number of requestors registered in the system */
+ RequestorID maxRequestors() { return requestors.size(); }
protected:
- /** helper function for getMasterId */
- MasterID _getMasterId(const SimObject* master,
- const std::string& master_name);
+ /** helper function for getRequestorId */
+ RequestorID _getRequestorId(const SimObject* requestor,
+ const std::string& requestor_name);
/**
- * Helper function for constructing the full (sub)master name
- * by providing the root master and the relative submaster name.
+ * Helper function for constructing the full (sub)requestor name
+ * by providing the root requestor and the relative subrequestor name.
*/
- std::string leafMasterName(const SimObject* master,
- const std::string& submaster);
+ std::string leafRequestorName(const SimObject* requestor,
+ const std::string& subrequestor);
public:
This class is a simple crossbar switch through which an arbitrary number of initiators
may communicate in parallel as long as they do not talk to the same target.
-If two masters address the same target at the same point of time,
+If two requestors address the same target at the same point of time,
the choice who will be allowed to communicate
is done non-deterministically (based on the SystemC process exectution order).
system = Param.System(Parent.any, "system")
- gem5 = ResponsePort('gem5 slave port')
+ gem5 = ResponsePort('gem5 response port')
addr_ranges = VectorParam.AddrRange([],
'Addresses served by this port\'s TLM side')
system = Param.System(Parent.any, "system")
- gem5 = RequestPort('gem5 master port')
+ gem5 = RequestPort('gem5 request port')
class Gem5ToTlmBridge32(Gem5ToTlmBridgeBase):
// Did another request arrive while blocked, schedule a retry.
if (needToSendRequestRetry) {
needToSendRequestRetry = false;
- bsp.sendRetryReq();
+ bridgeResponsePort.sendRetryReq();
}
}
if (phase == tlm::BEGIN_RESP) {
*/
if (extension.isPipeThrough()) {
if (packet->isResponse()) {
- need_retry = !bsp.sendTimingResp(packet);
+ need_retry = !bridgeResponsePort.sendTimingResp(packet);
}
} else if (packet->needsResponse()) {
packet->makeResponse();
- need_retry = !bsp.sendTimingResp(packet);
+ need_retry = !bridgeResponsePort.sendTimingResp(packet);
}
if (need_retry) {
PacketPtr packet =
Gem5SystemC::Gem5Extension::getExtension(trans).getPacket();
- bool need_retry = !bsp.sendTimingResp(packet);
+ bool need_retry = !bridgeResponsePort.sendTimingResp(packet);
sc_assert(!need_retry);
template <unsigned int BITWIDTH>
Gem5ToTlmBridge<BITWIDTH>::Gem5ToTlmBridge(
Params *params, const sc_core::sc_module_name &mn) :
- Gem5ToTlmBridgeBase(mn), bsp(std::string(name()) + ".gem5", *this),
+ Gem5ToTlmBridgeBase(mn),
+ bridgeResponsePort(std::string(name()) + ".gem5", *this),
socket("tlm_socket"),
wrapper(socket, std::string(name()) + ".tlm", InvalidPortID),
system(params->system), blockingRequest(nullptr),
Gem5ToTlmBridge<BITWIDTH>::gem5_getPort(const std::string &if_name, int idx)
{
if (if_name == "gem5")
- return bsp;
+ return bridgeResponsePort;
else if (if_name == "tlm")
return wrapper;
void
Gem5ToTlmBridge<BITWIDTH>::before_end_of_elaboration()
{
- bsp.sendRangeChange();
+ bridgeResponsePort.sendRangeChange();
socket.register_nb_transport_bw(this, &Gem5ToTlmBridge::nb_transport_bw);
socket.register_invalidate_direct_mem_ptr(
class Gem5ToTlmBridge : public Gem5ToTlmBridgeBase
{
private:
- class BridgeSlavePort : public ResponsePort
+ class BridgeResponsePort : public ResponsePort
{
protected:
Gem5ToTlmBridge<BITWIDTH> &bridge;
void recvRespRetry() override { bridge.recvRespRetry(); }
public:
- BridgeSlavePort(const std::string &name_,
+ BridgeResponsePort(const std::string &name_,
Gem5ToTlmBridge<BITWIDTH> &bridge_) :
ResponsePort(name_, nullptr), bridge(bridge_)
{}
};
- BridgeSlavePort bsp;
+ BridgeResponsePort bridgeResponsePort;
tlm_utils::simple_initiator_socket<
Gem5ToTlmBridge<BITWIDTH>, BITWIDTH> socket;
sc_gem5::TlmInitiatorWrapper<BITWIDTH> wrapper;
{
PacketPtr
-payload2packet(MasterID masterId, tlm::tlm_generic_payload &trans)
+payload2packet(RequestorID _id, tlm::tlm_generic_payload &trans)
{
MemCmd cmd;
Request::Flags flags;
auto req = std::make_shared<Request>(
- trans.get_address(), trans.get_data_length(), flags, masterId);
+ trans.get_address(), trans.get_data_length(), flags, _id);
/*
* Allocate a new Packet. The packet will be deleted when it returns from
extension->setPipeThrough();
pkt = extension->getPacket();
} else {
- pkt = payload2packet(masterId, trans);
+ pkt = payload2packet(_id, trans);
}
auto tlmSenderState = new TlmSenderState(trans);
extension->setPipeThrough();
pkt = extension->getPacket();
} else {
- pkt = payload2packet(masterId, trans);
+ pkt = payload2packet(_id, trans);
}
MemBackdoorPtr backdoor = nullptr;
extension->setPipeThrough();
bmp.sendFunctional(extension->getPacket());
} else {
- auto pkt = payload2packet(masterId, trans);
+ auto pkt = payload2packet(_id, trans);
if (pkt) {
bmp.sendFunctional(pkt);
destroyPacket(pkt);
extension->setPipeThrough();
pkt = extension->getPacket();
} else {
- pkt = payload2packet(masterId, trans);
+ pkt = payload2packet(_id, trans);
pkt->req->setFlags(Request::NO_ACCESS);
}
bmp(std::string(name()) + "master", *this), socket("tlm_socket"),
wrapper(socket, std::string(name()) + ".tlm", InvalidPortID),
system(params->system),
- masterId(params->system->getGlobalMasterId(
+ _id(params->system->getGlobalRequestorId(
std::string("[systemc].") + name()))
{
}
TlmSenderState(tlm::tlm_generic_payload &trans) : trans(trans) {}
};
- class BridgeMasterPort : public RequestPort
+ class BridgeRequestPort : public RequestPort
{
protected:
TlmToGem5Bridge<BITWIDTH> &bridge;
void recvRangeChange() override { bridge.recvRangeChange(); }
public:
- BridgeMasterPort(const std::string &name_,
+ BridgeRequestPort(const std::string &name_,
TlmToGem5Bridge<BITWIDTH> &bridge_) :
RequestPort(name_, nullptr), bridge(bridge_)
{}
bool responseInProgress;
- BridgeMasterPort bmp;
+ BridgeRequestPort bmp;
tlm_utils::simple_target_socket<
TlmToGem5Bridge<BITWIDTH>, BITWIDTH> socket;
sc_gem5::TlmTargetWrapper<BITWIDTH> wrapper;
void before_end_of_elaboration() override;
- const MasterID masterId;
+ const RequestorID _id;
};
} // namespace sc_gem5
class CowDisk(IdeDisk):
def __init__(self, filename):
super(CowDisk, self).__init__()
- self.driveID = 'master'
+ self.driveID = 'device0'
self.image = CowDiskImage(child=RawDiskImage(read_only=True),
read_only=False)
self.image.child.image_file = filename