From: Shivani Parekh Date: Mon, 24 Aug 2020 18:47:44 +0000 (-0700) Subject: misc: Replaced master/slave terminology X-Git-Tag: v20.1.0.0~37 X-Git-Url: https://git.libre-soc.org/?a=commitdiff_plain;h=392c1ce;p=gem5.git misc: Replaced master/slave terminology Change-Id: I4df2557c71e38cc4e3a485b0e590e85eb45de8b6 Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/33553 Maintainer: Jason Lowe-Power Reviewed-by: Daniel Carvalho Reviewed-by: Bobby R. Bruce Tested-by: kokoro --- diff --git a/configs/common/FSConfig.py b/configs/common/FSConfig.py index e15459370..5814a03c0 100644 --- a/configs/common/FSConfig.py +++ b/configs/common/FSConfig.py @@ -98,7 +98,7 @@ def fillInCmdline(mdesc, template, **kwargs): def makeCowDisks(disk_paths): disks = [] for disk_path in disk_paths: - disk = CowIdeDisk(driveID='master') + disk = CowIdeDisk(driveID='device0') disk.childImage(disk_path); disks.append(disk) return disks diff --git a/src/arch/arm/fastmodel/FastModel.py b/src/arch/arm/fastmodel/FastModel.py index 5be451a88..d35264a42 100644 --- a/src/arch/arm/fastmodel/FastModel.py +++ b/src/arch/arm/fastmodel/FastModel.py @@ -35,11 +35,11 @@ def AMBA_TARGET_ROLE(width): def AMBA_INITIATOR_ROLE(width): return 'AMBA INITIATOR %d' % width -def SC_MASTER_PORT_ROLE(port_type): - return 'SC MASTER PORT for %s' % port_type +def SC_REQUEST_PORT_ROLE(port_type): + return 'SC REQUEST PORT for %s' % port_type -def SC_SLAVE_PORT_ROLE(port_type): - return 'SC SLAVE PORT for %s' % port_type +def SC_RESPONSE_PORT_ROLE(port_type): + return 'SC RESPONSE PORT for %s' % port_type class AmbaTargetSocket(Port): def __init__(self, width, desc): @@ -75,21 +75,21 @@ class VectorAmbaInitiatorSocket(VectorPort): super(VectorAmbaInitiatorSocket, self).__init__( my_role, desc, is_source=True) -class ScMasterPort(Port): +class ScRequestPort(Port): def __init__(self, desc, port_type): - my_role = SC_MASTER_PORT_ROLE(port_type) - peer_role = SC_SLAVE_PORT_ROLE(port_type) + my_role = SC_REQUEST_PORT_ROLE(port_type) + peer_role = SC_RESPONSE_PORT_ROLE(port_type) Port.compat(my_role, peer_role) - super(ScMasterPort, self).__init__(my_role, desc) + super(ScRequestPort, self).__init__(my_role, desc) -class ScSlavePort(Port): +class ScResponsePort(Port): def __init__(self, desc, port_type): - my_role = SC_SLAVE_PORT_ROLE(port_type) - peer_role = SC_MASTER_PORT_ROLE(port_type) + my_role = SC_RESPONSE_PORT_ROLE(port_type) + peer_role = SC_REQUEST_PORT_ROLE(port_type) Port.compat(my_role, peer_role) - super(ScSlavePort, self).__init__(my_role, desc) + super(ScResponsePort, self).__init__(my_role, desc) class AmbaToTlmBridge64(SystemC_ScModule): type = 'AmbaToTlmBridge64' diff --git a/src/arch/arm/fastmodel/GIC/FastModelGIC.py b/src/arch/arm/fastmodel/GIC/FastModelGIC.py index 0980cc4f4..ddcf728f8 100644 --- a/src/arch/arm/fastmodel/GIC/FastModelGIC.py +++ b/src/arch/arm/fastmodel/GIC/FastModelGIC.py @@ -443,7 +443,7 @@ class SCFastModelGIC(SystemC_ScModule): output_attributes = Param.String("ExtendedID[62:55]=MPAM_PMG, " "ExtendedID[54:39]=MPAM_PARTID, ExtendedID[38]=MPAM_NS", "User-defined transform to be applied to bus attributes like " - "MasterID, ExtendedID or UserFlags. Currently, only works for " + "RequestorID, ExtendedID or UserFlags. Currently, only works for " "MPAM Attributes encoding into bus attributes.") has_DirtyVLPIOnLoad = Param.Bool(False, "GICR_VPENDBASER.Dirty reflects " "transient loading state when valid=1") diff --git a/src/arch/arm/isa.cc b/src/arch/arm/isa.cc index 3c6a3a23b..9ace2367f 100644 --- a/src/arch/arm/isa.cc +++ b/src/arch/arm/isa.cc @@ -2345,7 +2345,7 @@ ISA::addressTranslation64(TLB::ArmTranslationType tran_type, warn_once("Doing AT (address translation) in functional mode! Fix Me!\n"); auto req = std::make_shared( - val, 0, flags, Request::funcMasterId, + val, 0, flags, Request::funcRequestorId, tc->pcState().pc(), tc->contextId()); Fault fault = getDTBPtr(tc)->translateFunctional( @@ -2396,7 +2396,7 @@ ISA::addressTranslation(TLB::ArmTranslationType tran_type, warn_once("Doing AT (address translation) in functional mode! Fix Me!\n"); auto req = std::make_shared( - val, 0, flags, Request::funcMasterId, + val, 0, flags, Request::funcRequestorId, tc->pcState().pc(), tc->contextId()); Fault fault = getDTBPtr(tc)->translateFunctional( diff --git a/src/arch/arm/stage2_lookup.hh b/src/arch/arm/stage2_lookup.hh index a5a984f9a..66b135930 100644 --- a/src/arch/arm/stage2_lookup.hh +++ b/src/arch/arm/stage2_lookup.hh @@ -82,7 +82,7 @@ class Stage2LookUp : public BaseTLB::Translation { req = std::make_shared(); req->setVirt(s1Te.pAddr(s1Req->getVaddr()), s1Req->getSize(), - s1Req->getFlags(), s1Req->masterId(), 0); + s1Req->getFlags(), s1Req->requestorId(), 0); } Fault getTe(ThreadContext *tc, TlbEntry *destTe); diff --git a/src/arch/arm/stage2_mmu.cc b/src/arch/arm/stage2_mmu.cc index 745bb0fca..090c8c95f 100644 --- a/src/arch/arm/stage2_mmu.cc +++ b/src/arch/arm/stage2_mmu.cc @@ -49,13 +49,13 @@ using namespace ArmISA; Stage2MMU::Stage2MMU(const Params *p) : SimObject(p), _stage1Tlb(p->tlb), _stage2Tlb(p->stage2_tlb), port(_stage1Tlb->getTableWalker(), p->sys), - masterId(p->sys->getMasterId(_stage1Tlb->getTableWalker())) + requestorId(p->sys->getRequestorId(_stage1Tlb->getTableWalker())) { // we use the stage-one table walker as the parent of the port, - // and to get our master id, this is done to keep things + // and to get our requestor id, this is done to keep things // symmetrical with other ISAs in terms of naming and stats - stage1Tlb()->setMMU(this, masterId); - stage2Tlb()->setMMU(this, masterId); + stage1Tlb()->setMMU(this, requestorId); + stage2Tlb()->setMMU(this, requestorId); } Fault @@ -66,7 +66,8 @@ Stage2MMU::readDataUntimed(ThreadContext *tc, Addr oVAddr, Addr descAddr, // translate to physical address using the second stage MMU auto req = std::make_shared(); - req->setVirt(descAddr, numBytes, flags | Request::PT_WALK, masterId, 0); + req->setVirt(descAddr, numBytes, flags | Request::PT_WALK, + requestorId, 0); if (isFunctional) { fault = stage2Tlb()->translateFunctional(req, tc, BaseTLB::Read); } else { @@ -102,7 +103,7 @@ Stage2MMU::readDataTimed(ThreadContext *tc, Addr descAddr, { // translate to physical address using the second stage MMU translation->setVirt( - descAddr, numBytes, flags | Request::PT_WALK, masterId); + descAddr, numBytes, flags | Request::PT_WALK, requestorId); translation->translateTiming(tc); } diff --git a/src/arch/arm/stage2_mmu.hh b/src/arch/arm/stage2_mmu.hh index 0ac7abec6..ed9f59e1a 100644 --- a/src/arch/arm/stage2_mmu.hh +++ b/src/arch/arm/stage2_mmu.hh @@ -60,7 +60,7 @@ class Stage2MMU : public SimObject DmaPort port; /** Request id for requests generated by this MMU */ - MasterID masterId; + RequestorID requestorId; public: /** This translation class is used to trigger the data fetch once a timing @@ -88,10 +88,11 @@ class Stage2MMU : public SimObject finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc, BaseTLB::Mode mode); - void setVirt(Addr vaddr, int size, Request::Flags flags, int masterId) + void setVirt(Addr vaddr, int size, Request::Flags flags, + int requestorId) { numBytes = size; - req->setVirt(vaddr, size, flags, masterId, 0); + req->setVirt(vaddr, size, flags, requestorId, 0); } void translateTiming(ThreadContext *tc) diff --git a/src/arch/arm/table_walker.cc b/src/arch/arm/table_walker.cc index 1c89c2276..9462e2743 100644 --- a/src/arch/arm/table_walker.cc +++ b/src/arch/arm/table_walker.cc @@ -55,7 +55,7 @@ using namespace ArmISA; TableWalker::TableWalker(const Params *p) : ClockedObject(p), - stage2Mmu(NULL), port(NULL), masterId(Request::invldMasterId), + stage2Mmu(NULL), port(NULL), requestorId(Request::invldRequestorId), isStage2(p->is_stage2), tlb(NULL), currState(NULL), pending(false), numSquashable(p->num_squash_per_cycle), @@ -97,11 +97,11 @@ TableWalker::~TableWalker() } void -TableWalker::setMMU(Stage2MMU *m, MasterID master_id) +TableWalker::setMMU(Stage2MMU *m, RequestorID requestor_id) { stage2Mmu = m; port = &m->getDMAPort(); - masterId = master_id; + requestorId = requestor_id; } void @@ -2122,7 +2122,7 @@ TableWalker::fetchDescriptor(Addr descAddr, uint8_t *data, int numBytes, (this->*doDescriptor)(); } else { RequestPtr req = std::make_shared( - descAddr, numBytes, flags, masterId); + descAddr, numBytes, flags, requestorId); req->taskId(ContextSwitchTaskId::DMA); PacketPtr pkt = new Packet(req, MemCmd::ReadReq); diff --git a/src/arch/arm/table_walker.hh b/src/arch/arm/table_walker.hh index 6f0414927..8f4aaefd3 100644 --- a/src/arch/arm/table_walker.hh +++ b/src/arch/arm/table_walker.hh @@ -829,8 +829,8 @@ class TableWalker : public ClockedObject /** Port shared by the two table walkers. */ DmaPort* port; - /** Master id assigned by the MMU. */ - MasterID masterId; + /** Requestor id assigned by the MMU. */ + RequestorID requestorId; /** Indicates whether this table walker is part of the stage 2 mmu */ const bool isStage2; @@ -912,7 +912,7 @@ class TableWalker : public ClockedObject void setTlb(TLB *_tlb) { tlb = _tlb; } TLB* getTlb() { return tlb; } - void setMMU(Stage2MMU *m, MasterID master_id); + void setMMU(Stage2MMU *m, RequestorID requestor_id); void memAttrs(ThreadContext *tc, TlbEntry &te, SCTLR sctlr, uint8_t texcb, bool s); void memAttrsLPAE(ThreadContext *tc, TlbEntry &te, diff --git a/src/arch/arm/tlb.cc b/src/arch/arm/tlb.cc index 1d43a0d7c..413a13e78 100644 --- a/src/arch/arm/tlb.cc +++ b/src/arch/arm/tlb.cc @@ -107,10 +107,10 @@ TLB::init() } void -TLB::setMMU(Stage2MMU *m, MasterID master_id) +TLB::setMMU(Stage2MMU *m, RequestorID requestor_id) { stage2Mmu = m; - tableWalker->setMMU(m, master_id); + tableWalker->setMMU(m, requestor_id); } bool diff --git a/src/arch/arm/tlb.hh b/src/arch/arm/tlb.hh index 004ce0b1f..63928cb81 100644 --- a/src/arch/arm/tlb.hh +++ b/src/arch/arm/tlb.hh @@ -224,7 +224,7 @@ class TLB : public BaseTLB TableWalker *getTableWalker() { return tableWalker; } - void setMMU(Stage2MMU *m, MasterID master_id); + void setMMU(Stage2MMU *m, RequestorID requestor_id); int getsize() const { return size; } @@ -399,7 +399,7 @@ class TLB : public BaseTLB * reference. For ARM this method will always return a valid port * pointer. * - * @return A pointer to the walker master port + * @return A pointer to the walker request port */ Port *getTableWalkerPort() override; diff --git a/src/arch/arm/tracers/tarmac_parser.cc b/src/arch/arm/tracers/tarmac_parser.cc index 9ed5bf40e..c7bf97703 100644 --- a/src/arch/arm/tracers/tarmac_parser.cc +++ b/src/arch/arm/tracers/tarmac_parser.cc @@ -1287,7 +1287,7 @@ TarmacParserRecord::readMemNoEffect(Addr addr, uint8_t *data, unsigned size, ArmISA::TLB* dtb = static_cast(thread->getDTBPtr()); req->setVirt(addr, size, flags, thread->pcState().instAddr(), - Request::funcMasterId); + Request::funcRequestorId); // Translate to physical address Fault fault = dtb->translateAtomic(req, thread, BaseTLB::Read); diff --git a/src/arch/gcn3/gpu_mem_helpers.hh b/src/arch/gcn3/gpu_mem_helpers.hh index 562158d40..9846f41b5 100644 --- a/src/arch/gcn3/gpu_mem_helpers.hh +++ b/src/arch/gcn3/gpu_mem_helpers.hh @@ -87,14 +87,14 @@ initMemReqHelper(GPUDynInstPtr gpuDynInst, MemCmd mem_req_type, assert(!misaligned_acc); req = std::make_shared(vaddr, sizeof(T), 0, - gpuDynInst->computeUnit()->masterId(), 0, + gpuDynInst->computeUnit()->requestorId(), 0, gpuDynInst->wfDynId, gpuDynInst->makeAtomicOpFunctor( &(reinterpret_cast(gpuDynInst->a_data))[lane], &(reinterpret_cast(gpuDynInst->x_data))[lane])); } else { req = std::make_shared(vaddr, req_size, 0, - gpuDynInst->computeUnit()->masterId(), 0, + gpuDynInst->computeUnit()->requestorId(), 0, gpuDynInst->wfDynId); } @@ -158,7 +158,7 @@ initMemReqScalarHelper(GPUDynInstPtr gpuDynInst, MemCmd mem_req_type) bool misaligned_acc = split_addr > vaddr; RequestPtr req = std::make_shared(vaddr, req_size, 0, - gpuDynInst->computeUnit()->masterId(), 0, + gpuDynInst->computeUnit()->requestorId(), 0, gpuDynInst->wfDynId); if (misaligned_acc) { diff --git a/src/arch/gcn3/insts/op_encodings.hh b/src/arch/gcn3/insts/op_encodings.hh index b35fb3d68..e9dcac7c5 100644 --- a/src/arch/gcn3/insts/op_encodings.hh +++ b/src/arch/gcn3/insts/op_encodings.hh @@ -584,7 +584,7 @@ namespace Gcn3ISA gpuDynInst->setStatusVector(0, 1); RequestPtr req = std::make_shared(0, 0, 0, gpuDynInst->computeUnit()-> - masterId(), 0, + requestorId(), 0, gpuDynInst->wfDynId); gpuDynInst->setRequestFlags(req); gpuDynInst->computeUnit()-> diff --git a/src/arch/generic/BaseTLB.py b/src/arch/generic/BaseTLB.py index cca7da6a5..03fb68b48 100644 --- a/src/arch/generic/BaseTLB.py +++ b/src/arch/generic/BaseTLB.py @@ -33,5 +33,9 @@ class BaseTLB(SimObject): abstract = True cxx_header = "arch/generic/tlb.hh" # Ports to connect with other TLB levels - slave = VectorSlavePort("Port closer to the CPU side") - master = RequestPort("Port closer to memory side") + cpu_side_ports = VectorResponsePort("Ports closer to the CPU side") + slave = DeprecatedParam(cpu_side_ports, + '`slave` is now called `cpu_side_ports`') + mem_side_port = RequestPort("Port closer to memory side") + master = DeprecatedParam(mem_side_port, + '`master` is now called `mem_side_port`') diff --git a/src/arch/isa_parser.py b/src/arch/isa_parser.py index 8eb90eff6..7d8bffd3d 100755 --- a/src/arch/isa_parser.py +++ b/src/arch/isa_parser.py @@ -1294,7 +1294,7 @@ class OperandList(object): class SubOperandList(OperandList): '''Find all the operands in the given code block. Returns an operand descriptor list (instance of class OperandList).''' - def __init__(self, parser, code, master_list): + def __init__(self, parser, code, requestor_list): self.items = [] self.bases = {} # delete strings and comments so we don't match on operands inside @@ -1315,17 +1315,17 @@ class SubOperandList(OperandList): if op_base in parser.elemToVector: elem_op = op_base op_base = parser.elemToVector[elem_op] - # find this op in the master list - op_desc = master_list.find_base(op_base) + # find this op in the requestor list + op_desc = requestor_list.find_base(op_base) if not op_desc: - error('Found operand %s which is not in the master list!' + error('Found operand %s which is not in the requestor list!' % op_base) else: # See if we've already found this operand op_desc = self.find_base(op_base) if not op_desc: # if not, add a reference to it to this sub list - self.append(master_list.bases[op_base]) + self.append(requestor_list.bases[op_base]) # start next search after end of current match next_pos = match.end() diff --git a/src/arch/riscv/pagetable_walker.cc b/src/arch/riscv/pagetable_walker.cc index 786cb8193..3832ece46 100644 --- a/src/arch/riscv/pagetable_walker.cc +++ b/src/arch/riscv/pagetable_walker.cc @@ -396,7 +396,7 @@ Walker::WalkerState::stepWalk(PacketPtr &write) else { //If we didn't return, we're setting up another read. RequestPtr request = std::make_shared( - nextRead, oldRead->getSize(), flags, walker->masterId); + nextRead, oldRead->getSize(), flags, walker->requestorId); read = new Packet(request, MemCmd::ReadReq); read->allocate(); @@ -435,7 +435,7 @@ Walker::WalkerState::setupWalk(Addr vaddr) Request::Flags flags = Request::PHYSICAL; RequestPtr request = std::make_shared( - topAddr, sizeof(PTESv39), flags, walker->masterId); + topAddr, sizeof(PTESv39), flags, walker->requestorId); read = new Packet(request, MemCmd::ReadReq); read->allocate(); diff --git a/src/arch/riscv/pagetable_walker.hh b/src/arch/riscv/pagetable_walker.hh index d9ab569c7..de4d6353d 100644 --- a/src/arch/riscv/pagetable_walker.hh +++ b/src/arch/riscv/pagetable_walker.hh @@ -166,7 +166,7 @@ namespace RiscvISA // The TLB we're supposed to load. TLB * tlb; System * sys; - MasterID masterId; + RequestorID requestorId; // The number of outstanding walks that can be squashed per cycle. unsigned numSquashable; @@ -202,7 +202,7 @@ namespace RiscvISA Walker(const Params *params) : ClockedObject(params), port(name() + ".port", this), funcState(this, NULL, NULL, true), tlb(NULL), sys(params->system), - masterId(sys->getMasterId(this)), + requestorId(sys->getRequestorId(this)), numSquashable(params->num_squash_per_cycle), startWalkWrapperEvent([this]{ startWalkWrapper(); }, name()) { diff --git a/src/arch/x86/X86LocalApic.py b/src/arch/x86/X86LocalApic.py index 442a5e17e..39004d228 100644 --- a/src/arch/x86/X86LocalApic.py +++ b/src/arch/x86/X86LocalApic.py @@ -48,8 +48,15 @@ class X86LocalApic(BaseInterrupts): type = 'X86LocalApic' cxx_class = 'X86ISA::Interrupts' cxx_header = 'arch/x86/interrupts.hh' - int_master = RequestPort("Port for sending interrupt messages") - int_slave = ResponsePort("Port for receiving interrupt messages") + + int_requestor = RequestPort("Port for sending interrupt messages") + int_master = DeprecatedParam(int_requestor, + '`int_master` is now called `int_requestor`') + + int_responder = ResponsePort("Port for receiving interrupt messages") + int_slave = DeprecatedParam(int_responder, + '`int_slave` is now called `int_responder`') + int_latency = Param.Latency('1ns', \ "Latency for an interrupt to propagate through this device.") pio = ResponsePort("Programmed I/O port") diff --git a/src/arch/x86/interrupts.cc b/src/arch/x86/interrupts.cc index 2be0746b1..7767c8034 100644 --- a/src/arch/x86/interrupts.cc +++ b/src/arch/x86/interrupts.cc @@ -288,12 +288,12 @@ X86ISA::Interrupts::setThreadContext(ThreadContext *_tc) void X86ISA::Interrupts::init() { - panic_if(!intMasterPort.isConnected(), + panic_if(!intRequestPort.isConnected(), "Int port not connected to anything!"); panic_if(!pioPort.isConnected(), "Pio port of %s not connected to anything!", name()); - intSlavePort.sendRangeChange(); + intResponsePort.sendRangeChange(); pioPort.sendRangeChange(); } @@ -541,7 +541,7 @@ X86ISA::Interrupts::setReg(ApicRegIndex reg, uint32_t val) regs[APIC_INTERRUPT_COMMAND_LOW] = low; for (auto id: apics) { PacketPtr pkt = buildIntTriggerPacket(id, message); - intMasterPort.sendMessage(pkt, sys->isTimingMode(), + intRequestPort.sendMessage(pkt, sys->isTimingMode(), [this](PacketPtr pkt) { completeIPI(pkt); }); } newVal = regs[APIC_INTERRUPT_COMMAND_LOW]; @@ -603,8 +603,8 @@ X86ISA::Interrupts::Interrupts(Params *p) pendingStartup(false), startupVector(0), startedUp(false), pendingUnmaskableInt(false), pendingIPIs(0), - intSlavePort(name() + ".int_slave", this, this), - intMasterPort(name() + ".int_master", this, this, p->int_latency), + intResponsePort(name() + ".int_responder", this, this), + intRequestPort(name() + ".int_requestor", this, this, p->int_latency), pioPort(this), pioDelay(p->pio_latency) { memset(regs, 0, sizeof(regs)); diff --git a/src/arch/x86/interrupts.hh b/src/arch/x86/interrupts.hh index c1b256559..f078d421a 100644 --- a/src/arch/x86/interrupts.hh +++ b/src/arch/x86/interrupts.hh @@ -174,8 +174,8 @@ class Interrupts : public BaseInterrupts int initialApicId; // Ports for interrupts. - IntSlavePort intSlavePort; - IntMasterPort intMasterPort; + IntResponsePort intResponsePort; + IntRequestPort intRequestPort; // Port for memory mapped register accesses. PioPort pioPort; @@ -228,10 +228,10 @@ class Interrupts : public BaseInterrupts Port &getPort(const std::string &if_name, PortID idx=InvalidPortID) override { - if (if_name == "int_master") { - return intMasterPort; - } else if (if_name == "int_slave") { - return intSlavePort; + if (if_name == "int_requestor") { + return intRequestPort; + } else if (if_name == "int_responder") { + return intResponsePort; } else if (if_name == "pio") { return pioPort; } diff --git a/src/arch/x86/pagetable_walker.cc b/src/arch/x86/pagetable_walker.cc index b540be346..f5b552133 100644 --- a/src/arch/x86/pagetable_walker.cc +++ b/src/arch/x86/pagetable_walker.cc @@ -519,7 +519,7 @@ Walker::WalkerState::stepWalk(PacketPtr &write) Request::Flags flags = oldRead->req->getFlags(); flags.set(Request::UNCACHEABLE, uncacheable); RequestPtr request = std::make_shared( - nextRead, oldRead->getSize(), flags, walker->masterId); + nextRead, oldRead->getSize(), flags, walker->requestorId); read = new Packet(request, MemCmd::ReadReq); read->allocate(); // If we need to write, adjust the read packet to write the modified @@ -588,7 +588,7 @@ Walker::WalkerState::setupWalk(Addr vaddr) flags.set(Request::UNCACHEABLE); RequestPtr request = std::make_shared( - topAddr, dataSize, flags, walker->masterId); + topAddr, dataSize, flags, walker->requestorId); read = new Packet(request, MemCmd::ReadReq); read->allocate(); diff --git a/src/arch/x86/pagetable_walker.hh b/src/arch/x86/pagetable_walker.hh index 55bb09873..dba76c110 100644 --- a/src/arch/x86/pagetable_walker.hh +++ b/src/arch/x86/pagetable_walker.hh @@ -168,7 +168,7 @@ namespace X86ISA // The TLB we're supposed to load. TLB * tlb; System * sys; - MasterID masterId; + RequestorID requestorId; // The number of outstanding walks that can be squashed per cycle. unsigned numSquashable; @@ -204,7 +204,7 @@ namespace X86ISA Walker(const Params *params) : ClockedObject(params), port(name() + ".port", this), funcState(this, NULL, NULL, true), tlb(NULL), sys(params->system), - masterId(sys->getMasterId(this)), + requestorId(sys->getRequestorId(this)), numSquashable(params->num_squash_per_cycle), startWalkWrapperEvent([this]{ startWalkWrapper(); }, name()) { diff --git a/src/cpu/BaseCPU.py b/src/cpu/BaseCPU.py index ee6c6461d..c9e8ae6a6 100644 --- a/src/cpu/BaseCPU.py +++ b/src/cpu/BaseCPU.py @@ -182,25 +182,25 @@ class BaseCPU(ClockedObject): if buildEnv['TARGET_ISA'] in ['x86', 'arm', 'riscv']: _cached_ports += ["itb.walker.port", "dtb.walker.port"] - _uncached_slave_ports = [] - _uncached_master_ports = [] + _uncached_interrupt_response_ports = [] + _uncached_interrupt_request_ports = [] if buildEnv['TARGET_ISA'] == 'x86': - _uncached_slave_ports += ["interrupts[0].pio", - "interrupts[0].int_slave"] - _uncached_master_ports += ["interrupts[0].int_master"] + _uncached_interrupt_response_ports += ["interrupts[0].pio", + "interrupts[0].int_responder"] + _uncached_interrupt_request_ports += ["interrupts[0].int_requestor"] def createInterruptController(self): self.interrupts = [ArchInterrupts() for i in range(self.numThreads)] def connectCachedPorts(self, bus): for p in self._cached_ports: - exec('self.%s = bus.slave' % p) + exec('self.%s = bus.cpu_side_ports' % p) def connectUncachedPorts(self, bus): - for p in self._uncached_slave_ports: - exec('self.%s = bus.master' % p) - for p in self._uncached_master_ports: - exec('self.%s = bus.slave' % p) + for p in self._uncached_interrupt_response_ports: + exec('self.%s = bus.mem_side_ports' % p) + for p in self._uncached_interrupt_request_ports: + exec('self.%s = bus.cpu_side_ports' % p) def connectAllPorts(self, cached_bus, uncached_bus = None): self.connectCachedPorts(cached_bus) @@ -237,7 +237,7 @@ class BaseCPU(ClockedObject): self.toL2Bus = xbar if xbar else L2XBar() self.connectCachedPorts(self.toL2Bus) self.l2cache = l2c - self.toL2Bus.master = self.l2cache.cpu_side + self.toL2Bus.mem_side_ports = self.l2cache.cpu_side self._cached_ports = ['l2cache.mem_side'] def createThreads(self): diff --git a/src/cpu/base.cc b/src/cpu/base.cc index fb99712e8..9ba1b315b 100644 --- a/src/cpu/base.cc +++ b/src/cpu/base.cc @@ -122,8 +122,8 @@ CPUProgressEvent::description() const BaseCPU::BaseCPU(Params *p, bool is_checker) : ClockedObject(p), instCnt(0), _cpuId(p->cpu_id), _socketId(p->socket_id), - _instMasterId(p->system->getMasterId(this, "inst")), - _dataMasterId(p->system->getMasterId(this, "data")), + _instRequestorId(p->system->getRequestorId(this, "inst")), + _dataRequestorId(p->system->getRequestorId(this, "data")), _taskId(ContextSwitchTaskId::Unknown), _pid(invldPid), _switchedOut(p->switched_out), _cacheLineSize(p->system->cacheLineSize()), interrupts(p->interrupts), numThreads(p->numThreads), system(p->system), @@ -250,7 +250,7 @@ BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb) if (secondAddr > addr) size = secondAddr - addr; - req->setVirt(addr, size, 0x0, dataMasterId(), tc->instAddr()); + req->setVirt(addr, size, 0x0, dataRequestorId(), tc->instAddr()); // translate to physical address Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read); diff --git a/src/cpu/base.hh b/src/cpu/base.hh index 3ef551949..53204929c 100644 --- a/src/cpu/base.hh +++ b/src/cpu/base.hh @@ -123,10 +123,10 @@ class BaseCPU : public ClockedObject const uint32_t _socketId; /** instruction side request id that must be placed in all requests */ - MasterID _instMasterId; + RequestorID _instRequestorId; /** data side request id that must be placed in all requests */ - MasterID _dataMasterId; + RequestorID _dataRequestorId; /** An intrenal representation of a task identifier within gem5. This is * used so the CPU can add which taskId (which is an internal representation @@ -181,9 +181,9 @@ class BaseCPU : public ClockedObject uint32_t socketId() const { return _socketId; } /** Reads this CPU's unique data requestor ID */ - MasterID dataMasterId() const { return _dataMasterId; } + RequestorID dataRequestorId() const { return _dataRequestorId; } /** Reads this CPU's unique instruction requestor ID */ - MasterID instMasterId() const { return _instMasterId; } + RequestorID instRequestorId() const { return _instRequestorId; } /** * Get a port on this CPU. All CPUs have a data and diff --git a/src/cpu/base_dyn_inst.hh b/src/cpu/base_dyn_inst.hh index 56b9114d2..a6c08cc1f 100644 --- a/src/cpu/base_dyn_inst.hh +++ b/src/cpu/base_dyn_inst.hh @@ -469,7 +469,7 @@ class BaseDynInst : public ExecContext, public RefCounted uint32_t socketId() const { return cpu->socketId(); } /** Read this CPU's data requestor ID */ - MasterID masterId() const { return cpu->dataMasterId(); } + RequestorID requestorId() const { return cpu->dataRequestorId(); } /** Read this context's system-wide ID **/ ContextID contextId() const { return thread->contextId(); } diff --git a/src/cpu/checker/cpu.cc b/src/cpu/checker/cpu.cc index b016938c9..fe0300e74 100644 --- a/src/cpu/checker/cpu.cc +++ b/src/cpu/checker/cpu.cc @@ -58,7 +58,7 @@ using namespace TheISA; void CheckerCPU::init() { - masterId = systemPtr->getMasterId(this); + requestorId = systemPtr->getRequestorId(this); } CheckerCPU::CheckerCPU(Params *p) @@ -154,13 +154,13 @@ CheckerCPU::genMemFragmentRequest(Addr frag_addr, int size, auto it_end = byte_enable.cbegin() + (size - size_left); if (isAnyActiveElement(it_start, it_end)) { mem_req = std::make_shared(frag_addr, frag_size, - flags, masterId, thread->pcState().instAddr(), + flags, requestorId, thread->pcState().instAddr(), tc->contextId()); mem_req->setByteEnable(std::vector(it_start, it_end)); } } else { mem_req = std::make_shared(frag_addr, frag_size, - flags, masterId, thread->pcState().instAddr(), + flags, requestorId, thread->pcState().instAddr(), tc->contextId()); } diff --git a/src/cpu/checker/cpu.hh b/src/cpu/checker/cpu.hh index 4530d4c01..f2395d7b7 100644 --- a/src/cpu/checker/cpu.hh +++ b/src/cpu/checker/cpu.hh @@ -89,7 +89,7 @@ class CheckerCPU : public BaseCPU, public ExecContext using VecRegContainer = TheISA::VecRegContainer; /** id attached to all issued requests */ - MasterID masterId; + RequestorID requestorId; public: void init() override; diff --git a/src/cpu/checker/cpu_impl.hh b/src/cpu/checker/cpu_impl.hh index c6d2cf86a..4fab37513 100644 --- a/src/cpu/checker/cpu_impl.hh +++ b/src/cpu/checker/cpu_impl.hh @@ -237,11 +237,11 @@ Checker::verify(const DynInstPtr &completed_inst) if (!curMacroStaticInst) { // set up memory request for instruction fetch auto mem_req = std::make_shared( - fetch_PC, sizeof(MachInst), 0, masterId, fetch_PC, + fetch_PC, sizeof(MachInst), 0, requestorId, fetch_PC, thread->contextId()); mem_req->setVirt(fetch_PC, sizeof(MachInst), - Request::INST_FETCH, masterId, + Request::INST_FETCH, requestorId, thread->instAddr()); fault = itb->translateFunctional( diff --git a/src/cpu/kvm/base.cc b/src/cpu/kvm/base.cc index 5e3ffd723..83992cd3f 100644 --- a/src/cpu/kvm/base.cc +++ b/src/cpu/kvm/base.cc @@ -1073,7 +1073,7 @@ BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write) syncThreadContext(); RequestPtr mmio_req = std::make_shared( - paddr, size, Request::UNCACHEABLE, dataMasterId()); + paddr, size, Request::UNCACHEABLE, dataRequestorId()); mmio_req->setContext(tc->contextId()); // Some architectures do need to massage physical addresses a bit diff --git a/src/cpu/kvm/x86_cpu.cc b/src/cpu/kvm/x86_cpu.cc index 6c44af0d1..5a667d427 100644 --- a/src/cpu/kvm/x86_cpu.cc +++ b/src/cpu/kvm/x86_cpu.cc @@ -1351,7 +1351,7 @@ X86KvmCPU::handleKvmExitIO() for (int i = 0; i < count; ++i) { RequestPtr io_req = std::make_shared( pAddr, kvm_run.io.size, - Request::UNCACHEABLE, dataMasterId()); + Request::UNCACHEABLE, dataRequestorId()); io_req->setContext(tc->contextId()); diff --git a/src/cpu/minor/fetch1.cc b/src/cpu/minor/fetch1.cc index e49140e04..4977e3de4 100644 --- a/src/cpu/minor/fetch1.cc +++ b/src/cpu/minor/fetch1.cc @@ -168,7 +168,7 @@ Fetch1::fetchLine(ThreadID tid) request->request->setContext(cpu.threads[tid]->getTC()->contextId()); request->request->setVirt( - aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(), + aligned_pc, request_size, Request::INST_FETCH, cpu.instRequestorId(), /* I've no idea why we need the PC, but give it */ thread.pc.instAddr()); diff --git a/src/cpu/minor/lsq.cc b/src/cpu/minor/lsq.cc index e4a9dc024..106b51b6e 100644 --- a/src/cpu/minor/lsq.cc +++ b/src/cpu/minor/lsq.cc @@ -498,7 +498,7 @@ LSQ::SplitDataRequest::makeFragmentRequests() if (byte_enable.empty()) { fragment->setVirt( fragment_addr, fragment_size, request->getFlags(), - request->masterId(), request->getPC()); + request->requestorId(), request->getPC()); } else { // Set up byte-enable mask for the current fragment auto it_start = byte_enable.begin() + @@ -508,7 +508,7 @@ LSQ::SplitDataRequest::makeFragmentRequests() if (isAnyActiveElement(it_start, it_end)) { fragment->setVirt( fragment_addr, fragment_size, request->getFlags(), - request->masterId(), request->getPC()); + request->requestorId(), request->getPC()); fragment->setByteEnable(std::vector(it_start, it_end)); } else { disabled_fragment = true; @@ -1645,7 +1645,7 @@ LSQ::pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data, int cid = cpu.threads[inst->id.threadId]->getTC()->contextId(); request->request->setContext(cid); request->request->setVirt( - addr, size, flags, cpu.dataMasterId(), + addr, size, flags, cpu.dataRequestorId(), /* I've no idea why we need the PC, but give it */ inst->pc.instAddr(), std::move(amo_op)); request->request->setByteEnable(byte_enable); diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh index fd9146b97..75d065fd5 100644 --- a/src/cpu/o3/commit_impl.hh +++ b/src/cpu/o3/commit_impl.hh @@ -736,7 +736,7 @@ DefaultCommit::handleInterrupt() { // Verify that we still have an interrupt to handle if (!cpu->checkInterrupts(0)) { - DPRINTF(Commit, "Pending interrupt is cleared by master before " + DPRINTF(Commit, "Pending interrupt is cleared by requestor before " "it got handled. Restart fetching from the orig path.\n"); toIEW->commitInfo[0].clearInterrupt = true; interrupt = NoFault; diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc index 414913d00..01938f141 100644 --- a/src/cpu/o3/cpu.cc +++ b/src/cpu/o3/cpu.cc @@ -1843,7 +1843,7 @@ FullO3CPU::htmSendAbortSignal(ThreadID tid, uint64_t htm_uid, // notify l1 d-cache (ruby) that core has aborted transaction RequestPtr req = - std::make_shared(addr, size, flags, _dataMasterId); + std::make_shared(addr, size, flags, _dataRequestorId); req->taskId(taskId()); req->setContext(this->thread[tid]->contextId()); diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh index f449cac47..d38420b7e 100644 --- a/src/cpu/o3/fetch_impl.hh +++ b/src/cpu/o3/fetch_impl.hh @@ -599,7 +599,7 @@ DefaultFetch::fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc) // Build request here. RequestPtr mem_req = std::make_shared( fetchBufferBlockPC, fetchBufferSize, - Request::INST_FETCH, cpu->instMasterId(), pc, + Request::INST_FETCH, cpu->instRequestorId(), pc, cpu->thread[tid]->contextId()); mem_req->taskId(cpu->taskId()); diff --git a/src/cpu/o3/lsq.hh b/src/cpu/o3/lsq.hh index bc5e154f0..6e7d8d7e5 100644 --- a/src/cpu/o3/lsq.hh +++ b/src/cpu/o3/lsq.hh @@ -409,7 +409,7 @@ class LSQ if (byte_enable.empty() || isAnyActiveElement(byte_enable.begin(), byte_enable.end())) { auto request = std::make_shared( - addr, size, _flags, _inst->masterId(), + addr, size, _flags, _inst->requestorId(), _inst->instAddr(), _inst->contextId(), std::move(_amo_op)); if (!byte_enable.empty()) { @@ -456,9 +456,9 @@ class LSQ */ void setVirt(Addr vaddr, unsigned size, Request::Flags flags_, - MasterID mid, Addr pc) + RequestorID requestor_id, Addr pc) { - request()->setVirt(vaddr, size, flags_, mid, pc); + request()->setVirt(vaddr, size, flags_, requestor_id, pc); } void diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh index 7657b23e9..c4cb45ea0 100644 --- a/src/cpu/o3/lsq_impl.hh +++ b/src/cpu/o3/lsq_impl.hh @@ -892,7 +892,7 @@ LSQ::SplitDataRequest::initiateTranslation() uint32_t size_so_far = 0; mainReq = std::make_shared(base_addr, - _size, _flags, _inst->masterId(), + _size, _flags, _inst->requestorId(), _inst->instAddr(), _inst->contextId()); if (!_byteEnable.empty()) { mainReq->setByteEnable(_byteEnable); diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index 34be352d9..20c6e1cb6 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -350,14 +350,14 @@ AtomicSimpleCPU::genMemFragmentRequest(const RequestPtr& req, Addr frag_addr, auto it_start = byte_enable.begin() + (size - (frag_size + size_left)); auto it_end = byte_enable.begin() + (size - size_left); if (isAnyActiveElement(it_start, it_end)) { - req->setVirt(frag_addr, frag_size, flags, dataMasterId(), + req->setVirt(frag_addr, frag_size, flags, dataRequestorId(), inst_addr); req->setByteEnable(std::vector(it_start, it_end)); } else { predicate = false; } } else { - req->setVirt(frag_addr, frag_size, flags, dataMasterId(), + req->setVirt(frag_addr, frag_size, flags, dataRequestorId(), inst_addr); req->setByteEnable(std::vector()); } @@ -592,7 +592,7 @@ AtomicSimpleCPU::amoMem(Addr addr, uint8_t* data, unsigned size, dcache_latency = 0; req->taskId(taskId()); - req->setVirt(addr, size, flags, dataMasterId(), + req->setVirt(addr, size, flags, dataRequestorId(), thread->pcState().instAddr(), std::move(amo_op)); // translate to physical address diff --git a/src/cpu/simple/base.cc b/src/cpu/simple/base.cc index bf940ba98..132d9196a 100644 --- a/src/cpu/simple/base.cc +++ b/src/cpu/simple/base.cc @@ -487,7 +487,7 @@ BaseSimpleCPU::setupFetchRequest(const RequestPtr &req) DPRINTF(Fetch, "Fetch: Inst PC:%08p, Fetch PC:%08p\n", instAddr, fetchPC); req->setVirt(fetchPC, sizeof(MachInst), Request::INST_FETCH, - instMasterId(), instAddr); + instRequestorId(), instAddr); } diff --git a/src/cpu/simple/probes/simpoint.cc b/src/cpu/simple/probes/simpoint.cc index e72e4ca48..10f310547 100644 --- a/src/cpu/simple/probes/simpoint.cc +++ b/src/cpu/simple/probes/simpoint.cc @@ -93,7 +93,8 @@ SimPoint::profile(const std::pair& p) auto map_itr = bbMap.find(currentBBV); if (map_itr == bbMap.end()){ // If a new (previously unseen) basic block is found, - // add a new unique id, record num of insts and insert into bbMap. + // add a new unique id, record num of insts and insert + // into bbMap. BBInfo info; info.id = bbMap.size() + 1; info.insts = currentBBVInstCount; diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 820bede1b..c898d79b1 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -466,7 +466,7 @@ TimingSimpleCPU::initiateMemRead(Addr addr, unsigned size, traceData->setMem(addr, size, flags); RequestPtr req = std::make_shared( - addr, size, flags, dataMasterId(), pc, thread->contextId()); + addr, size, flags, dataRequestorId(), pc, thread->contextId()); if (!byte_enable.empty()) { req->setByteEnable(byte_enable); } @@ -550,7 +550,7 @@ TimingSimpleCPU::writeMem(uint8_t *data, unsigned size, traceData->setMem(addr, size, flags); RequestPtr req = std::make_shared( - addr, size, flags, dataMasterId(), pc, thread->contextId()); + addr, size, flags, dataRequestorId(), pc, thread->contextId()); if (!byte_enable.empty()) { req->setByteEnable(byte_enable); } @@ -608,7 +608,7 @@ TimingSimpleCPU::initiateMemAMO(Addr addr, unsigned size, traceData->setMem(addr, size, flags); RequestPtr req = make_shared(addr, size, flags, - dataMasterId(), pc, thread->contextId(), + dataRequestorId(), pc, thread->contextId(), std::move(amo_op)); assert(req->hasAtomicOpFunctor()); @@ -1228,7 +1228,7 @@ TimingSimpleCPU::initiateHtmCmd(Request::Flags flags) traceData->setMem(addr, size, flags); RequestPtr req = std::make_shared( - addr, size, flags, dataMasterId()); + addr, size, flags, dataRequestorId()); req->setPC(pc); req->setContext(thread->contextId()); @@ -1277,7 +1277,7 @@ TimingSimpleCPU::htmSendAbortSignal(HtmFailureFaultCause cause) // notify l1 d-cache (ruby) that core has aborted transaction RequestPtr req = std::make_shared( - addr, size, flags, dataMasterId()); + addr, size, flags, dataRequestorId()); req->setPC(pc); req->setContext(thread->contextId()); diff --git a/src/cpu/testers/directedtest/DirectedGenerator.cc b/src/cpu/testers/directedtest/DirectedGenerator.cc index 2d76b8618..44f364049 100644 --- a/src/cpu/testers/directedtest/DirectedGenerator.cc +++ b/src/cpu/testers/directedtest/DirectedGenerator.cc @@ -33,7 +33,7 @@ DirectedGenerator::DirectedGenerator(const Params *p) : SimObject(p), - masterId(p->system->getMasterId(this)) + requestorId(p->system->getRequestorId(this)) { m_num_cpus = p->num_cpus; m_directed_tester = NULL; diff --git a/src/cpu/testers/directedtest/DirectedGenerator.hh b/src/cpu/testers/directedtest/DirectedGenerator.hh index 2d033723d..f53ff079e 100644 --- a/src/cpu/testers/directedtest/DirectedGenerator.hh +++ b/src/cpu/testers/directedtest/DirectedGenerator.hh @@ -49,7 +49,7 @@ class DirectedGenerator : public SimObject protected: int m_num_cpus; - MasterID masterId; + RequestorID requestorId; RubyDirectedTester* m_directed_tester; }; diff --git a/src/cpu/testers/directedtest/InvalidateGenerator.cc b/src/cpu/testers/directedtest/InvalidateGenerator.cc index 5640163fd..a35c87e82 100644 --- a/src/cpu/testers/directedtest/InvalidateGenerator.cc +++ b/src/cpu/testers/directedtest/InvalidateGenerator.cc @@ -60,7 +60,8 @@ InvalidateGenerator::initiate() Packet::Command cmd; // For simplicity, requests are assumed to be 1 byte-sized - RequestPtr req = std::make_shared(m_address, 1, flags, masterId); + RequestPtr req = std::make_shared(m_address, 1, flags, + requestorId); // // Based on the current state, issue a load or a store diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.py b/src/cpu/testers/directedtest/RubyDirectedTester.py index 25a5228dd..0bbcb3427 100644 --- a/src/cpu/testers/directedtest/RubyDirectedTester.py +++ b/src/cpu/testers/directedtest/RubyDirectedTester.py @@ -53,6 +53,6 @@ class InvalidateGenerator(DirectedGenerator): class RubyDirectedTester(ClockedObject): type = 'RubyDirectedTester' cxx_header = "cpu/testers/directedtest/RubyDirectedTester.hh" - cpuPort = VectorMasterPort("the cpu ports") + cpuPort = VectorRequestPort("the cpu ports") requests_to_complete = Param.Int("checks to complete") generator = Param.DirectedGenerator("the request generator") diff --git a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc index 562b7d590..a404ee9f0 100644 --- a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc +++ b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc @@ -60,7 +60,8 @@ SeriesRequestGenerator::initiate() Request::Flags flags; // For simplicity, requests are assumed to be 1 byte-sized - RequestPtr req = std::make_shared(m_address, 1, flags, masterId); + RequestPtr req = std::make_shared(m_address, 1, flags, + requestorId); Packet::Command cmd; bool do_write = (random_mt.random(0, 100) < m_percent_writes); diff --git a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc index 87e940ce5..dc920553e 100644 --- a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc +++ b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc @@ -90,7 +90,7 @@ GarnetSyntheticTraffic::GarnetSyntheticTraffic(const Params *p) injVnet(p->inj_vnet), precision(p->precision), responseLimit(p->response_limit), - masterId(p->system->getMasterId(this)) + requestorId(p->system->getRequestorId(this)) { // set up counters noResponseCycles = 0; @@ -290,18 +290,20 @@ GarnetSyntheticTraffic::generatePkt() if (injReqType == 0) { // generate packet for virtual network 0 requestType = MemCmd::ReadReq; - req = std::make_shared(paddr, access_size, flags, masterId); + req = std::make_shared(paddr, access_size, flags, + requestorId); } else if (injReqType == 1) { // generate packet for virtual network 1 requestType = MemCmd::ReadReq; flags.set(Request::INST_FETCH); req = std::make_shared( - 0x0, access_size, flags, masterId, 0x0, 0); + 0x0, access_size, flags, requestorId, 0x0, 0); req->setPaddr(paddr); } else { // if (injReqType == 2) // generate packet for virtual network 2 requestType = MemCmd::WriteReq; - req = std::make_shared(paddr, access_size, flags, masterId); + req = std::make_shared(paddr, access_size, flags, + requestorId); } req->setContext(id); diff --git a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh index 524a960cb..2864ccfb5 100644 --- a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh +++ b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.hh @@ -130,7 +130,7 @@ class GarnetSyntheticTraffic : public ClockedObject const Cycles responseLimit; - MasterID masterId; + RequestorID requestorId; void completeRequest(PacketPtr pkt); diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc index 026a3258e..134f0f671 100644 --- a/src/cpu/testers/memtest/memtest.cc +++ b/src/cpu/testers/memtest/memtest.cc @@ -91,7 +91,7 @@ MemTest::MemTest(const Params *p) percentReads(p->percent_reads), percentFunctional(p->percent_functional), percentUncacheable(p->percent_uncacheable), - masterId(p->system->getMasterId(this)), + requestorId(p->system->getRequestorId(this)), blockSize(p->system->cacheLineSize()), blockAddrMask(blockSize - 1), progressInterval(p->progress_interval), @@ -230,7 +230,7 @@ MemTest::tick() bool do_functional = (random_mt.random(0, 100) < percentFunctional) && !uncacheable; - RequestPtr req = std::make_shared(paddr, 1, flags, masterId); + RequestPtr req = std::make_shared(paddr, 1, flags, requestorId); req->setContext(id); outstandingAddrs.insert(paddr); diff --git a/src/cpu/testers/memtest/memtest.hh b/src/cpu/testers/memtest/memtest.hh index 5eb4e3575..fc61b7526 100644 --- a/src/cpu/testers/memtest/memtest.hh +++ b/src/cpu/testers/memtest/memtest.hh @@ -126,7 +126,7 @@ class MemTest : public ClockedObject const unsigned percentUncacheable; /** Request id for all generated traffic */ - MasterID masterId; + RequestorID requestorId; unsigned int id; diff --git a/src/cpu/testers/rubytest/Check.cc b/src/cpu/testers/rubytest/Check.cc index e3732bf5e..cf60097ab 100644 --- a/src/cpu/testers/rubytest/Check.cc +++ b/src/cpu/testers/rubytest/Check.cc @@ -108,7 +108,7 @@ Check::initiatePrefetch() // Prefetches are assumed to be 0 sized RequestPtr req = std::make_shared( - m_address, 0, flags, m_tester_ptr->masterId()); + m_address, 0, flags, m_tester_ptr->requestorId()); req->setPC(m_pc); req->setContext(index); @@ -147,7 +147,7 @@ Check::initiateFlush() Request::Flags flags; RequestPtr req = std::make_shared( - m_address, CHECK_SIZE, flags, m_tester_ptr->masterId()); + m_address, CHECK_SIZE, flags, m_tester_ptr->requestorId()); req->setPC(m_pc); Packet::Command cmd; @@ -181,7 +181,7 @@ Check::initiateAction() // Stores are assumed to be 1 byte-sized RequestPtr req = std::make_shared( - writeAddr, 1, flags, m_tester_ptr->masterId()); + writeAddr, 1, flags, m_tester_ptr->requestorId()); req->setPC(m_pc); req->setContext(index); @@ -246,7 +246,7 @@ Check::initiateCheck() // Checks are sized depending on the number of bytes written RequestPtr req = std::make_shared( - m_address, CHECK_SIZE, flags, m_tester_ptr->masterId()); + m_address, CHECK_SIZE, flags, m_tester_ptr->requestorId()); req->setPC(m_pc); req->setContext(index); diff --git a/src/cpu/testers/rubytest/RubyTester.cc b/src/cpu/testers/rubytest/RubyTester.cc index 8dfe99479..a64a965b9 100644 --- a/src/cpu/testers/rubytest/RubyTester.cc +++ b/src/cpu/testers/rubytest/RubyTester.cc @@ -53,7 +53,7 @@ RubyTester::RubyTester(const Params *p) : ClockedObject(p), checkStartEvent([this]{ wakeup(); }, "RubyTester tick", false, Event::CPU_Tick_Pri), - _masterId(p->system->getMasterId(this)), + _requestorId(p->system->getRequestorId(this)), m_checkTable_ptr(nullptr), m_num_cpus(p->num_cpus), m_checks_to_complete(p->checks_to_complete), diff --git a/src/cpu/testers/rubytest/RubyTester.hh b/src/cpu/testers/rubytest/RubyTester.hh index e63729ab5..64c33b87f 100644 --- a/src/cpu/testers/rubytest/RubyTester.hh +++ b/src/cpu/testers/rubytest/RubyTester.hh @@ -117,11 +117,11 @@ class RubyTester : public ClockedObject void print(std::ostream& out) const; bool getCheckFlush() { return m_check_flush; } - MasterID masterId() { return _masterId; } + RequestorID requestorId() { return _requestorId; } protected: EventFunctionWrapper checkStartEvent; - MasterID _masterId; + RequestorID _requestorId; private: void hitCallback(NodeID proc, SubBlock* data); diff --git a/src/cpu/testers/rubytest/RubyTester.py b/src/cpu/testers/rubytest/RubyTester.py index ecf52b668..9bcbcd1b3 100644 --- a/src/cpu/testers/rubytest/RubyTester.py +++ b/src/cpu/testers/rubytest/RubyTester.py @@ -34,9 +34,10 @@ class RubyTester(ClockedObject): type = 'RubyTester' cxx_header = "cpu/testers/rubytest/RubyTester.hh" num_cpus = Param.Int("number of cpus / RubyPorts") - cpuInstDataPort = VectorMasterPort("cpu combo ports to inst & data caches") - cpuInstPort = VectorMasterPort("cpu ports to only inst caches") - cpuDataPort = VectorMasterPort("cpu ports to only data caches") + cpuInstDataPort = VectorRequestPort("cpu combo ports to inst & " + "data caches") + cpuInstPort = VectorRequestPort("cpu ports to only inst caches") + cpuDataPort = VectorRequestPort("cpu ports to only data caches") checks_to_complete = Param.Int(100, "checks to complete") deadlock_threshold = Param.Int(50000, "how often to check for deadlock") wakeup_frequency = Param.Int(10, "number of cycles between wakeups") diff --git a/src/cpu/testers/traffic_gen/BaseTrafficGen.py b/src/cpu/testers/traffic_gen/BaseTrafficGen.py index ff50a19ba..3055348fb 100644 --- a/src/cpu/testers/traffic_gen/BaseTrafficGen.py +++ b/src/cpu/testers/traffic_gen/BaseTrafficGen.py @@ -44,7 +44,7 @@ from m5.objects.ClockedObject import ClockedObject # generated (Random, Linear, Trace etc) class StreamGenType(ScopedEnum): vals = [ 'none', 'fixed', 'random' ] -# The traffic generator is a master module that generates stimuli for +# The traffic generator is a requestor module that generates stimuli for # the memory system, based on a collection of simple behaviours that # are either probabilistic or based on traces. It can be used stand # alone for creating test cases for interconnect and memory @@ -57,7 +57,7 @@ class BaseTrafficGen(ClockedObject): cxx_header = "cpu/testers/traffic_gen/traffic_gen.hh" # Port used for sending requests and receiving responses - port = RequestPort("Master port") + port = RequestPort("This port sends requests and receives responses") # System used to determine the mode of the memory system system = Param.System(Parent.any, "System this generator is part of") @@ -110,9 +110,9 @@ class BaseTrafficGen(ClockedObject): def connectCachedPorts(self, bus): if hasattr(self, '_cached_ports') and (len(self._cached_ports) > 0): for p in self._cached_ports: - exec('self.%s = bus.slave' % p) + exec('self.%s = bus.cpu_side_ports' % p) else: - self.port = bus.slave + self.port = bus.cpu_side_ports def connectAllPorts(self, cached_bus, uncached_bus = None): self.connectCachedPorts(cached_bus) diff --git a/src/cpu/testers/traffic_gen/base.cc b/src/cpu/testers/traffic_gen/base.cc index bc8b60150..dcc410bf8 100644 --- a/src/cpu/testers/traffic_gen/base.cc +++ b/src/cpu/testers/traffic_gen/base.cc @@ -80,7 +80,7 @@ BaseTrafficGen::BaseTrafficGen(const BaseTrafficGenParams* p) retryPktTick(0), blockedWaitingResp(false), updateEvent([this]{ update(); }, name()), stats(this), - masterID(system->getMasterId(this)), + requestorId(system->getRequestorId(this)), streamGenerator(StreamGen::create(p)) { } @@ -358,13 +358,15 @@ BaseTrafficGen::StatGroup::StatGroup(Stats::Group *parent) std::shared_ptr BaseTrafficGen::createIdle(Tick duration) { - return std::shared_ptr(new IdleGen(*this, masterID, duration)); + return std::shared_ptr(new IdleGen(*this, requestorId, + duration)); } std::shared_ptr BaseTrafficGen::createExit(Tick duration) { - return std::shared_ptr(new ExitGen(*this, masterID, duration)); + return std::shared_ptr(new ExitGen(*this, requestorId, + duration)); } std::shared_ptr @@ -373,7 +375,7 @@ BaseTrafficGen::createLinear(Tick duration, Tick min_period, Tick max_period, uint8_t read_percent, Addr data_limit) { - return std::shared_ptr(new LinearGen(*this, masterID, + return std::shared_ptr(new LinearGen(*this, requestorId, duration, start_addr, end_addr, blocksize, system->cacheLineSize(), @@ -387,7 +389,7 @@ BaseTrafficGen::createRandom(Tick duration, Tick min_period, Tick max_period, uint8_t read_percent, Addr data_limit) { - return std::shared_ptr(new RandomGen(*this, masterID, + return std::shared_ptr(new RandomGen(*this, requestorId, duration, start_addr, end_addr, blocksize, system->cacheLineSize(), @@ -406,7 +408,7 @@ BaseTrafficGen::createDram(Tick duration, Enums::AddrMap addr_mapping, unsigned int nbr_of_ranks) { - return std::shared_ptr(new DramGen(*this, masterID, + return std::shared_ptr(new DramGen(*this, requestorId, duration, start_addr, end_addr, blocksize, system->cacheLineSize(), @@ -432,7 +434,7 @@ BaseTrafficGen::createDramRot(Tick duration, unsigned int nbr_of_ranks, unsigned int max_seq_count_per_rank) { - return std::shared_ptr(new DramRotGen(*this, masterID, + return std::shared_ptr(new DramRotGen(*this, requestorId, duration, start_addr, end_addr, blocksize, system->cacheLineSize(), @@ -467,7 +469,7 @@ BaseTrafficGen::createHybrid(Tick duration, unsigned int nbr_of_ranks_nvm, uint8_t nvm_percent) { - return std::shared_ptr(new HybridGen(*this, masterID, + return std::shared_ptr(new HybridGen(*this, requestorId, duration, start_addr_dram, end_addr_dram, blocksize_dram, start_addr_nvm, @@ -500,7 +502,7 @@ BaseTrafficGen::createNvm(Tick duration, Enums::AddrMap addr_mapping, unsigned int nbr_of_ranks) { - return std::shared_ptr(new NvmGen(*this, masterID, + return std::shared_ptr(new NvmGen(*this, requestorId, duration, start_addr, end_addr, blocksize, system->cacheLineSize(), @@ -519,7 +521,7 @@ BaseTrafficGen::createTrace(Tick duration, { #if HAVE_PROTOBUF return std::shared_ptr( - new TraceGen(*this, masterID, duration, trace_file, addr_offset)); + new TraceGen(*this, requestorId, duration, trace_file, addr_offset)); #else panic("Can't instantiate trace generation without Protobuf support!\n"); #endif diff --git a/src/cpu/testers/traffic_gen/base.hh b/src/cpu/testers/traffic_gen/base.hh index 6f419e88d..7c3386e72 100644 --- a/src/cpu/testers/traffic_gen/base.hh +++ b/src/cpu/testers/traffic_gen/base.hh @@ -53,7 +53,7 @@ class System; struct BaseTrafficGenParams; /** - * The traffic generator is a master module that generates stimuli for + * The traffic generator is a module that generates stimuli for * the memory system, based on a collection of simple generator * behaviours that are either probabilistic or based on traces. It can * be used stand alone for creating test cases for interconnect and @@ -123,7 +123,7 @@ class BaseTrafficGen : public ClockedObject const int maxOutstandingReqs; - /** Master port specialisation for the traffic generator */ + /** Request port specialisation for the traffic generator */ class TrafficGenPort : public RequestPort { public: @@ -157,7 +157,7 @@ class BaseTrafficGen : public ClockedObject */ void update(); - /** The instance of master port used by the traffic generator. */ + /** The instance of request port used by the traffic generator. */ TrafficGenPort port; /** Packet waiting to be sent. */ @@ -324,9 +324,9 @@ class BaseTrafficGen : public ClockedObject virtual std::shared_ptr nextGenerator() = 0; /** - * MasterID used in generated requests. + * RequestorID used in generated requests. */ - const MasterID masterID; + const RequestorID requestorId; /** Currently active generator */ std::shared_ptr activeGenerator; diff --git a/src/cpu/testers/traffic_gen/base_gen.cc b/src/cpu/testers/traffic_gen/base_gen.cc index d5cdf716f..d8ce00132 100644 --- a/src/cpu/testers/traffic_gen/base_gen.cc +++ b/src/cpu/testers/traffic_gen/base_gen.cc @@ -46,8 +46,8 @@ #include "debug/TrafficGen.hh" #include "sim/system.hh" -BaseGen::BaseGen(SimObject &obj, MasterID master_id, Tick _duration) - : _name(obj.name()), masterID(master_id), +BaseGen::BaseGen(SimObject &obj, RequestorID requestor_id, Tick _duration) + : _name(obj.name()), requestorId(requestor_id), duration(_duration) { } @@ -57,10 +57,11 @@ BaseGen::getPacket(Addr addr, unsigned size, const MemCmd& cmd, Request::FlagsType flags) { // Create new request - RequestPtr req = std::make_shared(addr, size, flags, masterID); + RequestPtr req = std::make_shared(addr, size, flags, + requestorId); // Dummy PC to have PC-based prefetchers latch on; get entropy into higher // bits - req->setPC(((Addr)masterID) << 2); + req->setPC(((Addr)requestorId) << 2); // Embed it in a packet PacketPtr pkt = new Packet(req, cmd); @@ -69,19 +70,19 @@ BaseGen::getPacket(Addr addr, unsigned size, const MemCmd& cmd, pkt->dataDynamic(pkt_data); if (cmd.isWrite()) { - std::fill_n(pkt_data, req->getSize(), (uint8_t)masterID); + std::fill_n(pkt_data, req->getSize(), (uint8_t)requestorId); } return pkt; } StochasticGen::StochasticGen(SimObject &obj, - MasterID master_id, Tick _duration, + RequestorID requestor_id, Tick _duration, Addr start_addr, Addr end_addr, Addr _blocksize, Addr cacheline_size, Tick min_period, Tick max_period, uint8_t read_percent, Addr data_limit) - : BaseGen(obj, master_id, _duration), + : BaseGen(obj, requestor_id, _duration), startAddr(start_addr), endAddr(end_addr), blocksize(_blocksize), cacheLineSize(cacheline_size), minPeriod(min_period), maxPeriod(max_period), diff --git a/src/cpu/testers/traffic_gen/base_gen.hh b/src/cpu/testers/traffic_gen/base_gen.hh index 0f51b6c76..ab9d3856e 100644 --- a/src/cpu/testers/traffic_gen/base_gen.hh +++ b/src/cpu/testers/traffic_gen/base_gen.hh @@ -62,8 +62,8 @@ class BaseGen /** Name to use for status and debug printing */ const std::string _name; - /** The MasterID used for generating requests */ - const MasterID masterID; + /** The RequestorID used for generating requests */ + const RequestorID requestorId; /** * Generate a new request and associated packet @@ -85,10 +85,10 @@ class BaseGen * Create a base generator. * * @param obj simobject owning the generator - * @param master_id MasterID set on each request + * @param requestor_id RequestorID set on each request * @param _duration duration of this state before transitioning */ - BaseGen(SimObject &obj, MasterID master_id, Tick _duration); + BaseGen(SimObject &obj, RequestorID requestor_id, Tick _duration); virtual ~BaseGen() { } @@ -133,7 +133,7 @@ class StochasticGen : public BaseGen { public: StochasticGen(SimObject &obj, - MasterID master_id, Tick _duration, + RequestorID requestor_id, Tick _duration, Addr start_addr, Addr end_addr, Addr _blocksize, Addr cacheline_size, Tick min_period, Tick max_period, diff --git a/src/cpu/testers/traffic_gen/dram_gen.cc b/src/cpu/testers/traffic_gen/dram_gen.cc index 0534a8a65..e29f6d147 100644 --- a/src/cpu/testers/traffic_gen/dram_gen.cc +++ b/src/cpu/testers/traffic_gen/dram_gen.cc @@ -45,7 +45,7 @@ #include "enums/AddrMap.hh" DramGen::DramGen(SimObject &obj, - MasterID master_id, Tick _duration, + RequestorID requestor_id, Tick _duration, Addr start_addr, Addr end_addr, Addr _blocksize, Addr cacheline_size, Tick min_period, Tick max_period, @@ -55,7 +55,7 @@ DramGen::DramGen(SimObject &obj, unsigned int nbr_of_banks_util, Enums::AddrMap addr_mapping, unsigned int nbr_of_ranks) - : RandomGen(obj, master_id, _duration, start_addr, end_addr, + : RandomGen(obj, requestor_id, _duration, start_addr, end_addr, _blocksize, cacheline_size, min_period, max_period, read_percent, data_limit), numSeqPkts(num_seq_pkts), countNumSeqPkts(0), addr(0), diff --git a/src/cpu/testers/traffic_gen/dram_gen.hh b/src/cpu/testers/traffic_gen/dram_gen.hh index 081ca2425..b09081bd2 100644 --- a/src/cpu/testers/traffic_gen/dram_gen.hh +++ b/src/cpu/testers/traffic_gen/dram_gen.hh @@ -64,7 +64,7 @@ class DramGen : public RandomGen * Create a DRAM address sequence generator. * * @param obj SimObject owning this sequence generator - * @param master_id MasterID related to the memory requests + * @param requestor_id RequestorID related to the memory requests * @param _duration duration of this state before transitioning * @param start_addr Start address * @param end_addr End address @@ -83,7 +83,7 @@ class DramGen : public RandomGen * assumes single channel system */ DramGen(SimObject &obj, - MasterID master_id, Tick _duration, + RequestorID requestor_id, Tick _duration, Addr start_addr, Addr end_addr, Addr _blocksize, Addr cacheline_size, Tick min_period, Tick max_period, diff --git a/src/cpu/testers/traffic_gen/dram_rot_gen.hh b/src/cpu/testers/traffic_gen/dram_rot_gen.hh index bb53d49ac..34140ac88 100644 --- a/src/cpu/testers/traffic_gen/dram_rot_gen.hh +++ b/src/cpu/testers/traffic_gen/dram_rot_gen.hh @@ -63,7 +63,7 @@ class DramRotGen : public DramGen * 3) Ranks per channel * * @param obj SimObject owning this sequence generator - * @param master_id MasterID related to the memory requests + * @param requestor_id RequestorID related to the memory requests * @param _duration duration of this state before transitioning * @param start_addr Start address * @param end_addr End address @@ -82,7 +82,7 @@ class DramRotGen : public DramGen * @param addr_mapping Address mapping to be used, * assumes single channel system */ - DramRotGen(SimObject &obj, MasterID master_id, Tick _duration, + DramRotGen(SimObject &obj, RequestorID requestor_id, Tick _duration, Addr start_addr, Addr end_addr, Addr _blocksize, Addr cacheline_size, Tick min_period, Tick max_period, @@ -92,7 +92,7 @@ class DramRotGen : public DramGen Enums::AddrMap addr_mapping, unsigned int nbr_of_ranks, unsigned int max_seq_count_per_rank) - : DramGen(obj, master_id, _duration, start_addr, end_addr, + : DramGen(obj, requestor_id, _duration, start_addr, end_addr, _blocksize, cacheline_size, min_period, max_period, read_percent, data_limit, num_seq_pkts, page_size, nbr_of_banks_DRAM, diff --git a/src/cpu/testers/traffic_gen/exit_gen.hh b/src/cpu/testers/traffic_gen/exit_gen.hh index ec863f702..65939a6a8 100644 --- a/src/cpu/testers/traffic_gen/exit_gen.hh +++ b/src/cpu/testers/traffic_gen/exit_gen.hh @@ -54,8 +54,8 @@ class ExitGen : public BaseGen public: - ExitGen(SimObject &obj, MasterID master_id, Tick _duration) - : BaseGen(obj, master_id, _duration) + ExitGen(SimObject &obj, RequestorID requestor_id, Tick _duration) + : BaseGen(obj, requestor_id, _duration) { } void enter(); diff --git a/src/cpu/testers/traffic_gen/hybrid_gen.cc b/src/cpu/testers/traffic_gen/hybrid_gen.cc index 303884a60..638d7a327 100644 --- a/src/cpu/testers/traffic_gen/hybrid_gen.cc +++ b/src/cpu/testers/traffic_gen/hybrid_gen.cc @@ -49,7 +49,7 @@ using namespace std; HybridGen::HybridGen(SimObject &obj, - MasterID master_id, Tick _duration, + RequestorID requestor_id, Tick _duration, Addr start_addr_dram, Addr end_addr_dram, Addr blocksize_dram, Addr start_addr_nvm, Addr end_addr_nvm, @@ -67,7 +67,7 @@ HybridGen::HybridGen(SimObject &obj, unsigned int nbr_of_ranks_dram, unsigned int nbr_of_ranks_nvm, uint8_t nvm_percent) - : BaseGen(obj, master_id, _duration), + : BaseGen(obj, requestor_id, _duration), startAddrDram(start_addr_dram), endAddrDram(end_addr_dram), blocksizeDram(blocksize_dram), diff --git a/src/cpu/testers/traffic_gen/hybrid_gen.hh b/src/cpu/testers/traffic_gen/hybrid_gen.hh index 795826c14..59ac87fdb 100644 --- a/src/cpu/testers/traffic_gen/hybrid_gen.hh +++ b/src/cpu/testers/traffic_gen/hybrid_gen.hh @@ -66,7 +66,7 @@ class HybridGen : public BaseGen * Create a hybrid DRAM + NVM address sequence generator. * * @param obj SimObject owning this sequence generator - * @param master_id MasterID related to the memory requests + * @param requestor_id RequestorID related to the memory requests * @param _duration duration of this state before transitioning * @param start_addr_dram Start address for DRAM range * @param end_addr_dram End address for DRAM range @@ -96,7 +96,7 @@ class HybridGen : public BaseGen * @param nvm_percent Percentage of traffic going to NVM */ HybridGen(SimObject &obj, - MasterID master_id, Tick _duration, + RequestorID requestor_id, Tick _duration, Addr start_addr_dram, Addr end_addr_dram, Addr blocksize_dram, Addr start_addr_nvm, Addr end_addr_nvm, diff --git a/src/cpu/testers/traffic_gen/idle_gen.hh b/src/cpu/testers/traffic_gen/idle_gen.hh index 761b717ac..40e98b8be 100644 --- a/src/cpu/testers/traffic_gen/idle_gen.hh +++ b/src/cpu/testers/traffic_gen/idle_gen.hh @@ -56,8 +56,8 @@ class IdleGen : public BaseGen public: - IdleGen(SimObject &obj, MasterID master_id, Tick _duration) - : BaseGen(obj, master_id, _duration) + IdleGen(SimObject &obj, RequestorID requestor_id, Tick _duration) + : BaseGen(obj, requestor_id, _duration) { } void enter(); diff --git a/src/cpu/testers/traffic_gen/linear_gen.hh b/src/cpu/testers/traffic_gen/linear_gen.hh index b6f428258..fbd3d8f9f 100644 --- a/src/cpu/testers/traffic_gen/linear_gen.hh +++ b/src/cpu/testers/traffic_gen/linear_gen.hh @@ -67,7 +67,7 @@ class LinearGen : public StochasticGen * time. * * @param obj SimObject owning this sequence generator - * @param master_id MasterID related to the memory requests + * @param requestor_id RequestorID related to the memory requests * @param _duration duration of this state before transitioning * @param start_addr Start address * @param end_addr End address @@ -79,12 +79,12 @@ class LinearGen : public StochasticGen * @param data_limit Upper limit on how much data to read/write */ LinearGen(SimObject &obj, - MasterID master_id, Tick _duration, + RequestorID requestor_id, Tick _duration, Addr start_addr, Addr end_addr, Addr _blocksize, Addr cacheline_size, Tick min_period, Tick max_period, uint8_t read_percent, Addr data_limit) - : StochasticGen(obj, master_id, _duration, start_addr, end_addr, + : StochasticGen(obj, requestor_id, _duration, start_addr, end_addr, _blocksize, cacheline_size, min_period, max_period, read_percent, data_limit), nextAddr(0), diff --git a/src/cpu/testers/traffic_gen/nvm_gen.cc b/src/cpu/testers/traffic_gen/nvm_gen.cc index fa3efd3af..2191b4e69 100644 --- a/src/cpu/testers/traffic_gen/nvm_gen.cc +++ b/src/cpu/testers/traffic_gen/nvm_gen.cc @@ -47,7 +47,7 @@ #include "enums/AddrMap.hh" NvmGen::NvmGen(SimObject &obj, - MasterID master_id, Tick _duration, + RequestorID requestor_id, Tick _duration, Addr start_addr, Addr end_addr, Addr _blocksize, Addr cacheline_size, Tick min_period, Tick max_period, @@ -57,7 +57,7 @@ NvmGen::NvmGen(SimObject &obj, unsigned int nbr_of_banks_util, Enums::AddrMap addr_mapping, unsigned int nbr_of_ranks) - : RandomGen(obj, master_id, _duration, start_addr, end_addr, + : RandomGen(obj, requestor_id, _duration, start_addr, end_addr, _blocksize, cacheline_size, min_period, max_period, read_percent, data_limit), numSeqPkts(num_seq_pkts), countNumSeqPkts(0), addr(0), diff --git a/src/cpu/testers/traffic_gen/nvm_gen.hh b/src/cpu/testers/traffic_gen/nvm_gen.hh index c5a89ee97..9251898af 100644 --- a/src/cpu/testers/traffic_gen/nvm_gen.hh +++ b/src/cpu/testers/traffic_gen/nvm_gen.hh @@ -66,7 +66,7 @@ class NvmGen : public RandomGen * Create a NVM address sequence generator. * * @param obj SimObject owning this sequence generator - * @param master_id MasterID related to the memory requests + * @param requestor_id RequestorID related to the memory requests * @param _duration duration of this state before transitioning * @param start_addr Start address * @param end_addr End address @@ -85,7 +85,7 @@ class NvmGen : public RandomGen * assumes single channel system */ NvmGen(SimObject &obj, - MasterID master_id, Tick _duration, + RequestorID requestor_id, Tick _duration, Addr start_addr, Addr end_addr, Addr _blocksize, Addr cacheline_size, Tick min_period, Tick max_period, diff --git a/src/cpu/testers/traffic_gen/random_gen.hh b/src/cpu/testers/traffic_gen/random_gen.hh index ba398a977..3bf1a862d 100644 --- a/src/cpu/testers/traffic_gen/random_gen.hh +++ b/src/cpu/testers/traffic_gen/random_gen.hh @@ -66,21 +66,23 @@ class RandomGen : public StochasticGen * * @param gen Traffic generator owning this sequence generator * @param _duration duration of this state before transitioning + * @param requestor_id RequestorID related to the memory requests * @param start_addr Start address * @param end_addr End address * @param _blocksize Size used for transactions injected + * @param cacheline_size cache line size in the system * @param min_period Lower limit of random inter-transaction time * @param max_period Upper limit of random inter-transaction time * @param read_percent Percent of transactions that are reads * @param data_limit Upper limit on how much data to read/write */ RandomGen(SimObject &obj, - MasterID master_id, Tick _duration, + RequestorID requestor_id, Tick _duration, Addr start_addr, Addr end_addr, Addr _blocksize, Addr cacheline_size, Tick min_period, Tick max_period, uint8_t read_percent, Addr data_limit) - : StochasticGen(obj, master_id, _duration, start_addr, end_addr, + : StochasticGen(obj, requestor_id, _duration, start_addr, end_addr, _blocksize, cacheline_size, min_period, max_period, read_percent, data_limit), dataManipulated(0) diff --git a/src/cpu/testers/traffic_gen/trace_gen.hh b/src/cpu/testers/traffic_gen/trace_gen.hh index fe386d826..a31868e13 100644 --- a/src/cpu/testers/traffic_gen/trace_gen.hh +++ b/src/cpu/testers/traffic_gen/trace_gen.hh @@ -148,14 +148,14 @@ class TraceGen : public BaseGen * Create a trace generator. * * @param obj SimObject owning this sequence generator - * @param master_id MasterID related to the memory requests + * @param requestor_id RequestorID related to the memory requests * @param _duration duration of this state before transitioning * @param trace_file File to read the transactions from * @param addr_offset Positive offset to add to trace address */ - TraceGen(SimObject &obj, MasterID master_id, Tick _duration, + TraceGen(SimObject &obj, RequestorID requestor_id, Tick _duration, const std::string& trace_file, Addr addr_offset) - : BaseGen(obj, master_id, _duration), + : BaseGen(obj, requestor_id, _duration), trace(trace_file), tickOffset(0), addrOffset(addr_offset), diff --git a/src/cpu/testers/traffic_gen/traffic_gen.hh b/src/cpu/testers/traffic_gen/traffic_gen.hh index 3f1c2ab51..d90df6492 100644 --- a/src/cpu/testers/traffic_gen/traffic_gen.hh +++ b/src/cpu/testers/traffic_gen/traffic_gen.hh @@ -45,7 +45,7 @@ struct TrafficGenParams; /** - * The traffic generator is a master module that generates stimuli for + * The traffic generator is a module that generates stimuli for * the memory system, based on a collection of simple behaviours that * are either probabilistic or based on traces. It can be used stand * alone for creating test cases for interconnect and memory @@ -53,7 +53,7 @@ struct TrafficGenParams; * components that are not yet modelled in detail, e.g. a video engine * or baseband subsystem in an SoC. * - * The traffic generator has a single master port that is used to send + * The traffic generator has a single request port that is used to send * requests, independent of the specific behaviour. The behaviour of * the traffic generator is specified in a configuration file, and this * file describes a state transition graph where each state is a diff --git a/src/cpu/trace/trace_cpu.cc b/src/cpu/trace/trace_cpu.cc index 80db94c4c..3ac3207be 100644 --- a/src/cpu/trace/trace_cpu.cc +++ b/src/cpu/trace/trace_cpu.cc @@ -46,12 +46,12 @@ TraceCPU::TraceCPU(TraceCPUParams *params) : BaseCPU(params), icachePort(this), dcachePort(this), - instMasterID(params->system->getMasterId(this, "inst")), - dataMasterID(params->system->getMasterId(this, "data")), + instRequestorID(params->system->getRequestorId(this, "inst")), + dataRequestorID(params->system->getRequestorId(this, "data")), instTraceFile(params->instTraceFile), dataTraceFile(params->dataTraceFile), - icacheGen(*this, "iside", icachePort, instMasterID, instTraceFile), - dcacheGen(*this, "dside", dcachePort, dataMasterID, dataTraceFile, + icacheGen(*this, ".iside", icachePort, instRequestorID, instTraceFile), + dcacheGen(*this, ".dside", dcachePort, dataRequestorID, dataTraceFile, params), icacheNextEvent([this]{ schedIcacheNext(); }, name()), dcacheNextEvent([this]{ schedDcacheNext(); }, name()), @@ -593,7 +593,7 @@ TraceCPU::ElasticDataGen::executeMemReq(GraphNode* node_ptr) // Create a request and the packet containing request auto req = std::make_shared( - node_ptr->physAddr, node_ptr->size, node_ptr->flags, masterID); + node_ptr->physAddr, node_ptr->size, node_ptr->flags, requestorId); req->setReqInstSeqNum(node_ptr->seqNum); // If this is not done it triggers assert in L1 cache for invalid contextId @@ -604,7 +604,7 @@ TraceCPU::ElasticDataGen::executeMemReq(GraphNode* node_ptr) // of the request. if (node_ptr->virtAddr != 0) { req->setVirt(node_ptr->virtAddr, node_ptr->size, - node_ptr->flags, masterID, node_ptr->pc); + node_ptr->flags, requestorId, node_ptr->pc); req->setPaddr(node_ptr->physAddr); req->setReqInstSeqNum(node_ptr->seqNum); } @@ -619,7 +619,7 @@ TraceCPU::ElasticDataGen::executeMemReq(GraphNode* node_ptr) } pkt->dataDynamic(pkt_data); - // Call MasterPort method to send a timing request for this packet + // Call RequestPort method to send a timing request for this packet bool success = port.sendTimingReq(pkt); ++elasticStats.numSendAttempted; @@ -1073,7 +1073,7 @@ TraceCPU::FixedRetryGen::send(Addr addr, unsigned size, const MemCmd& cmd, { // Create new request - auto req = std::make_shared(addr, size, flags, masterID); + auto req = std::make_shared(addr, size, flags, requestorId); req->setPC(pc); // If this is not done it triggers assert in L1 cache for invalid contextId @@ -1089,7 +1089,7 @@ TraceCPU::FixedRetryGen::send(Addr addr, unsigned size, const MemCmd& cmd, memset(pkt_data, 0xA, req->getSize()); } - // Call MasterPort method to send a timing request for this packet + // Call RequestPort method to send a timing request for this packet bool success = port.sendTimingReq(pkt); if (!success) { // If it fails, save the packet to retry when a retry is signalled by diff --git a/src/cpu/trace/trace_cpu.hh b/src/cpu/trace/trace_cpu.hh index 8754bfdf4..ba1c5e641 100644 --- a/src/cpu/trace/trace_cpu.hh +++ b/src/cpu/trace/trace_cpu.hh @@ -65,7 +65,7 @@ * same trace is used for playback on different memory sub-systems. * * The TraceCPU inherits from BaseCPU so some virtual methods need to be - * defined. It has two port subclasses inherited from MasterPort for + * defined. It has two port subclasses inherited from RequestPort for * instruction and data ports. It issues the memory requests deducing the * timing from the trace and without performing real execution of micro-ops. As * soon as the last dependency for an instruction is complete, its @@ -321,11 +321,11 @@ class TraceCPU : public BaseCPU /** Port to connect to L1 data cache. */ DcachePort dcachePort; - /** Master id for instruction read requests. */ - const MasterID instMasterID; + /** Requestor id for instruction read requests. */ + const RequestorID instRequestorID; - /** Master id for data read and write requests. */ - const MasterID dataMasterID; + /** Requestor id for data read and write requests. */ + const RequestorID dataRequestorID; /** File names for input instruction and data traces. */ std::string instTraceFile, dataTraceFile; @@ -423,11 +423,11 @@ class TraceCPU : public BaseCPU public: /* Constructor */ FixedRetryGen(TraceCPU& _owner, const std::string& _name, - RequestPort& _port, MasterID master_id, + RequestPort& _port, RequestorID requestor_id, const std::string& trace_file) : owner(_owner), port(_port), - masterID(master_id), + requestorId(requestor_id), trace(trace_file), genName(owner.name() + ".fixedretry." + _name), retryPkt(nullptr), @@ -502,8 +502,8 @@ class TraceCPU : public BaseCPU /** Reference of the port to be used to issue memory requests. */ RequestPort& port; - /** MasterID used for the requests being sent. */ - const MasterID masterID; + /** RequestorID used for the requests being sent. */ + const RequestorID requestorId; /** Input stream used for reading the input trace file. */ InputStream trace; @@ -852,11 +852,11 @@ class TraceCPU : public BaseCPU public: /* Constructor */ ElasticDataGen(TraceCPU& _owner, const std::string& _name, - RequestPort& _port, MasterID master_id, + RequestPort& _port, RequestorID requestor_id, const std::string& trace_file, TraceCPUParams *params) : owner(_owner), port(_port), - masterID(master_id), + requestorId(requestor_id), trace(trace_file, 1.0 / params->freqMultiplier), genName(owner.name() + ".elastic." + _name), retryPkt(nullptr), @@ -990,8 +990,8 @@ class TraceCPU : public BaseCPU /** Reference of the port to be used to issue memory requests. */ RequestPort& port; - /** MasterID used for the requests being sent. */ - const MasterID masterID; + /** RequestorID used for the requests being sent. */ + const RequestorID requestorId; /** Input stream used for reading the input trace file. */ InputStream trace; diff --git a/src/dev/arm/RealView.py b/src/dev/arm/RealView.py index 684567fa0..9ab04725d 100644 --- a/src/dev/arm/RealView.py +++ b/src/dev/arm/RealView.py @@ -93,8 +93,9 @@ class AmbaDmaDevice(DmaDevice): type = 'AmbaDmaDevice' abstract = True cxx_header = "dev/arm/amba_device.hh" - pio_addr = Param.Addr("Address for AMBA slave interface") - pio_latency = Param.Latency("10ns", "Time between action and write/read result by AMBA DMA Device") + pio_addr = Param.Addr("Address for AMBA responder interface") + pio_latency = Param.Latency("10ns", "Time between action and write/read" + "result by AMBA DMA Device") interrupt = Param.ArmInterruptPin("Interrupt that connects to GIC") amba_id = Param.UInt32("ID of AMBA device for kernel detection") @@ -576,16 +577,16 @@ class RealView(Platform): def _attach_memory(self, mem, bus, mem_ports=None): if hasattr(mem, "port"): if mem_ports is None: - mem.port = bus.master + mem.port = bus.mem_side_ports else: mem_ports.append(mem.port) def _attach_device(self, device, bus, dma_ports=None): if hasattr(device, "pio"): - device.pio = bus.master + device.pio = bus.mem_side_ports if hasattr(device, "dma"): if dma_ports is None: - device.dma = bus.slave + device.dma = bus.cpu_side_ports else: dma_ports.append(device.dma) @@ -1092,15 +1093,15 @@ Interrupts: """ Instantiate a single SMMU and attach a group of client devices to it. The devices' dma port is wired to the SMMU and the SMMU's dma port - (master) is attached to the bus. In order to make it work, the list - of clients shouldn't contain any device part of the _off_chip_devices - or _on_chip_devices. + is attached to the bus. In order to make it work, the list of clients + shouldn't contain any device part of the _off_chip_devices or + _on_chip_devices. This method should be called only once. Parameters: devices (list): List of devices which will be using the SMMU - bus (Bus): The bus downstream of the SMMU. Its slave port will - receive memory requests from the SMMU, and its master + bus (Bus): The bus downstream of the SMMU. Its response port will + receive memory requests from the SMMU, and its request port will forward accesses to the memory mapped devices """ if hasattr(self, 'smmu'): @@ -1108,8 +1109,8 @@ Interrupts: self.smmu = SMMUv3(reg_map=AddrRange(0x2b400000, size=0x00020000)) - self.smmu.master = bus.slave - self.smmu.control = bus.master + self.smmu.request = bus.cpu_side_ports + self.smmu.control = bus.mem_side_ports dma_ports = [] for dev in devices: diff --git a/src/dev/arm/SConscript b/src/dev/arm/SConscript index 7041bd979..46ff259ae 100644 --- a/src/dev/arm/SConscript +++ b/src/dev/arm/SConscript @@ -77,7 +77,7 @@ if env['TARGET_ISA'] == 'arm': Source('smmu_v3_ports.cc'); Source('smmu_v3_proc.cc'); Source('smmu_v3_ptops.cc'); - Source('smmu_v3_slaveifc.cc'); + Source('smmu_v3_deviceifc.cc'); Source('smmu_v3_transl.cc'); Source('timer_sp804.cc') Source('watchdog_sp805.cc') diff --git a/src/dev/arm/SMMUv3.py b/src/dev/arm/SMMUv3.py index 0b9ab2128..29c15682b 100644 --- a/src/dev/arm/SMMUv3.py +++ b/src/dev/arm/SMMUv3.py @@ -39,13 +39,21 @@ from m5.util.fdthelper import * from m5.SimObject import * from m5.objects.ClockedObject import ClockedObject -class SMMUv3SlaveInterface(ClockedObject): - type = 'SMMUv3SlaveInterface' - cxx_header = 'dev/arm/smmu_v3_slaveifc.hh' - - slave = ResponsePort('Device port') - ats_master = RequestPort('ATS master port') - ats_slave = ResponsePort('ATS slave port') +class SMMUv3DeviceInterface(ClockedObject): + type = 'SMMUv3DeviceInterface' + cxx_header = 'dev/arm/smmu_v3_deviceifc.hh' + + device_port = ResponsePort('Device port') + slave = DeprecatedParam(device_port, + '`slave` is now called `device_port`') + ats_mem_side_port = RequestPort('ATS mem side port,' + 'sends requests and receives responses') + ats_master = DeprecatedParam(ats_mem_side_port, + '`ats_master` is now called `ats_mem_side_port`') + ats_dev_side_port = ResponsePort('ATS dev_side_port,' + 'sends responses and receives requests') + ats_slave = DeprecatedParam(ats_dev_side_port, + '`ats_slave` is now called `ats_dev_side_port`') port_width = Param.Unsigned(16, 'Port width in bytes (= 1 beat)') wrbuf_slots = Param.Unsigned(16, 'Write buffer size (in beats)') @@ -74,18 +82,19 @@ class SMMUv3(ClockedObject): type = 'SMMUv3' cxx_header = 'dev/arm/smmu_v3.hh' - master = RequestPort('Master port') - master_walker = RequestPort( - 'Master port for SMMU initiated HWTW requests (optional)') + request = RequestPort('Request port') + walker = RequestPort( + 'Request port for SMMU initiated HWTW requests (optional)') control = ResponsePort( 'Control port for accessing memory-mapped registers') sample_period = Param.Clock('10us', 'Stats sample period') reg_map = Param.AddrRange('Address range for control registers') system = Param.System(Parent.any, "System this device is part of") - slave_interfaces = VectorParam.SMMUv3SlaveInterface([], "Slave interfaces") + device_interfaces = VectorParam.SMMUv3DeviceInterface([], + "Responder interfaces") - # SLAVE INTERFACE<->SMMU link parameters + # RESPONDER INTERFACE<->SMMU link parameters ifc_smmu_lat = Param.Cycles(8, 'IFC to SMMU communication latency') smmu_ifc_lat = Param.Cycles(8, 'SMMU to IFC communication latency') @@ -93,8 +102,8 @@ class SMMUv3(ClockedObject): xlate_slots = Param.Unsigned(64, 'SMMU translation slots') ptw_slots = Param.Unsigned(16, 'SMMU page table walk slots') - master_port_width = Param.Unsigned(16, - 'Master port width in bytes (= 1 beat)') + request_port_width = Param.Unsigned(16, + 'Request port width in bytes (= 1 beat)') tlb_entries = Param.Unsigned(2048, 'TLB size (entries)') tlb_assoc = Param.Unsigned(4, 'TLB associativity (0=full)') @@ -185,23 +194,23 @@ class SMMUv3(ClockedObject): def connect(self, device): """ - Helper method used to connect the SMMU. The master could + Helper method used to connect the SMMU. The requestor could be either a dma port (if the SMMU is attached directly to a - dma device), or to a master port (this is the case where the SMMU + dma device), or to a request port (this is the case where the SMMU is attached to a bridge). """ - slave_interface = SMMUv3SlaveInterface() + device_interface = SMMUv3DeviceInterface() - if hasattr(device, "master"): - slave_interface.slave = device.master + if hasattr(device, "request_port"): + device_interface.device_port = device.request_port elif hasattr(device, "dma"): - slave_interface.slave = device.dma + device_interface.device_port = device.dma else: print("Unable to attach SMMUv3\n") sys.exit(1) - self.slave_interfaces.append(slave_interface) + self.device_interfaces.append(device_interface) # Storing a reference to the smmu to be used when generating # the binding in the device DTB. diff --git a/src/dev/arm/UFSHostDevice.py b/src/dev/arm/UFSHostDevice.py index a444a9a48..879503a3f 100644 --- a/src/dev/arm/UFSHostDevice.py +++ b/src/dev/arm/UFSHostDevice.py @@ -42,7 +42,7 @@ from m5.objects.AbstractNVM import * class UFSHostDevice(DmaDevice): type = 'UFSHostDevice' cxx_header = "dev/arm/ufs_device.hh" - pio_addr = Param.Addr("Address for SCSI configuration slave interface") + pio_addr = Param.Addr("Address for SCSI configuration responder interface") pio_latency = Param.Latency("10ns", "Time between action and write/read \ result by AMBA DMA Device") gic = Param.BaseGic(Parent.any, "Gic to use for interrupting") diff --git a/src/dev/arm/amba.hh b/src/dev/arm/amba.hh index 4bfba345c..cfc3f56b0 100644 --- a/src/dev/arm/amba.hh +++ b/src/dev/arm/amba.hh @@ -43,12 +43,12 @@ namespace AMBA { -typedef MasterID OrderID; +typedef RequestorID OrderID; static OrderID orderId(PacketPtr pkt) { - return pkt->req->masterId(); + return pkt->req->requestorId(); } } // namespace AMBA diff --git a/src/dev/arm/gic_v3_its.cc b/src/dev/arm/gic_v3_its.cc index 4ea721958..a3bb6726d 100644 --- a/src/dev/arm/gic_v3_its.cc +++ b/src/dev/arm/gic_v3_its.cc @@ -89,7 +89,7 @@ ItsProcess::doRead(Yield &yield, Addr addr, void *ptr, size_t size) a.type = ItsActionType::SEND_REQ; RequestPtr req = std::make_shared( - addr, size, 0, its.masterId); + addr, size, 0, its.requestorId); req->taskId(ContextSwitchTaskId::DMA); @@ -113,7 +113,7 @@ ItsProcess::doWrite(Yield &yield, Addr addr, void *ptr, size_t size) a.type = ItsActionType::SEND_REQ; RequestPtr req = std::make_shared( - addr, size, 0, its.masterId); + addr, size, 0, its.requestorId); req->taskId(ContextSwitchTaskId::DMA); @@ -779,7 +779,7 @@ Gicv3Its::Gicv3Its(const Gicv3ItsParams *params) gitsCbaser(0), gitsCreadr(0), gitsCwriter(0), gitsIidr(0), tableBases(NUM_BASER_REGS, 0), - masterId(params->system->getMasterId(this)), + requestorId(params->system->getRequestorId(this)), gic(nullptr), commandEvent([this] { checkCommandQueue(); }, name()), pendingCommands(false), diff --git a/src/dev/arm/gic_v3_its.hh b/src/dev/arm/gic_v3_its.hh index 54beb3ef6..e3b8734b0 100644 --- a/src/dev/arm/gic_v3_its.hh +++ b/src/dev/arm/gic_v3_its.hh @@ -319,7 +319,7 @@ class Gicv3Its : public BasicPioDevice private: std::queue packetsToRetry; - uint32_t masterId; + uint32_t requestorId; Gicv3 *gic; EventFunctionWrapper commandEvent; diff --git a/src/dev/arm/smmu_v3.cc b/src/dev/arm/smmu_v3.cc index f9d99da11..f9bdc277c 100644 --- a/src/dev/arm/smmu_v3.cc +++ b/src/dev/arm/smmu_v3.cc @@ -54,9 +54,9 @@ SMMUv3::SMMUv3(SMMUv3Params *params) : ClockedObject(params), system(*params->system), - masterId(params->system->getMasterId(this)), - masterPort(name() + ".master", *this), - masterTableWalkPort(name() + ".master_walker", *this), + requestorId(params->system->getRequestorId(this)), + requestPort(name() + ".request", *this), + tableWalkPort(name() + ".walker", *this), controlPort(name() + ".control", *this, params->reg_map), tlb(params->tlb_entries, params->tlb_assoc, params->tlb_policy), configCache(params->cfg_entries, params->cfg_assoc, params->cfg_policy), @@ -74,14 +74,14 @@ SMMUv3::SMMUv3(SMMUv3Params *params) : walkCacheNonfinalEnable(params->wc_nonfinal_enable), walkCacheS1Levels(params->wc_s1_levels), walkCacheS2Levels(params->wc_s2_levels), - masterPortWidth(params->master_port_width), + requestPortWidth(params->request_port_width), tlbSem(params->tlb_slots), ifcSmmuSem(1), smmuIfcSem(1), configSem(params->cfg_slots), ipaSem(params->ipa_slots), walkSem(params->walk_slots), - masterPortSem(1), + requestPortSem(1), transSem(params->xlate_slots), ptwSem(params->ptw_slots), cycleSem(1), @@ -91,7 +91,7 @@ SMMUv3::SMMUv3(SMMUv3Params *params) : configLat(params->cfg_lat), ipaLat(params->ipa_lat), walkLat(params->walk_lat), - slaveInterfaces(params->slave_interfaces), + deviceInterfaces(params->device_interfaces), commandExecutor(name() + ".cmd_exec", *this), regsMap(params->reg_map), processCommandsEvent(this) @@ -119,14 +119,14 @@ SMMUv3::SMMUv3(SMMUv3Params *params) : // store an unallowed values or if the are configuration conflicts. warn("SMMUv3 IDx register values unchecked\n"); - for (auto ifc : slaveInterfaces) + for (auto ifc : deviceInterfaces) ifc->setSMMU(this); } bool -SMMUv3::masterRecvTimingResp(PacketPtr pkt) +SMMUv3::recvTimingResp(PacketPtr pkt) { - DPRINTF(SMMUv3, "[t] master resp addr=%#x size=%#x\n", + DPRINTF(SMMUv3, "[t] requestor resp addr=%#x size=%#x\n", pkt->getAddr(), pkt->getSize()); // @todo: We need to pay for this and not just zero it out @@ -141,7 +141,7 @@ SMMUv3::masterRecvTimingResp(PacketPtr pkt) } void -SMMUv3::masterRecvReqRetry() +SMMUv3::recvReqRetry() { assert(!packetsToRetry.empty()); @@ -150,29 +150,29 @@ SMMUv3::masterRecvReqRetry() assert(a.type==ACTION_SEND_REQ || a.type==ACTION_SEND_REQ_FINAL); - DPRINTF(SMMUv3, "[t] master retr addr=%#x size=%#x\n", + DPRINTF(SMMUv3, "[t] requestor retr addr=%#x size=%#x\n", a.pkt->getAddr(), a.pkt->getSize()); - if (!masterPort.sendTimingReq(a.pkt)) + if (!requestPort.sendTimingReq(a.pkt)) break; packetsToRetry.pop(); /* * ACTION_SEND_REQ_FINAL means that we have just forwarded the packet - * on the master interface; this means that we no longer hold on to + * on the requestor interface; this means that we no longer hold on to * that transaction and therefore can accept a new one. - * If the slave port was stalled then unstall it (send retry). + * If the response port was stalled then unstall it (send retry). */ if (a.type == ACTION_SEND_REQ_FINAL) - scheduleSlaveRetries(); + scheduleDeviceRetries(); } } bool -SMMUv3::masterTableWalkRecvTimingResp(PacketPtr pkt) +SMMUv3::tableWalkRecvTimingResp(PacketPtr pkt) { - DPRINTF(SMMUv3, "[t] master HWTW resp addr=%#x size=%#x\n", + DPRINTF(SMMUv3, "[t] requestor HWTW resp addr=%#x size=%#x\n", pkt->getAddr(), pkt->getSize()); // @todo: We need to pay for this and not just zero it out @@ -187,7 +187,7 @@ SMMUv3::masterTableWalkRecvTimingResp(PacketPtr pkt) } void -SMMUv3::masterTableWalkRecvReqRetry() +SMMUv3::tableWalkRecvReqRetry() { assert(tableWalkPortEnable); assert(!packetsTableWalkToRetry.empty()); @@ -197,10 +197,10 @@ SMMUv3::masterTableWalkRecvReqRetry() assert(a.type==ACTION_SEND_REQ); - DPRINTF(SMMUv3, "[t] master HWTW retr addr=%#x size=%#x\n", + DPRINTF(SMMUv3, "[t] requestor HWTW retr addr=%#x size=%#x\n", a.pkt->getAddr(), a.pkt->getSize()); - if (!masterTableWalkPort.sendTimingReq(a.pkt)) + if (!tableWalkPort.sendTimingReq(a.pkt)) break; packetsTableWalkToRetry.pop(); @@ -208,9 +208,9 @@ SMMUv3::masterTableWalkRecvReqRetry() } void -SMMUv3::scheduleSlaveRetries() +SMMUv3::scheduleDeviceRetries() { - for (auto ifc : slaveInterfaces) { + for (auto ifc : deviceInterfaces) { ifc->scheduleDeviceRetry(); } } @@ -239,17 +239,17 @@ SMMUv3::runProcessAtomic(SMMUProcess *proc, PacketPtr pkt) switch (action.type) { case ACTION_SEND_REQ: - // Send an MMU initiated request on the table walk port if it is - // enabled. Otherwise, fall through and handle same as the final - // ACTION_SEND_REQ_FINAL request. + // Send an MMU initiated request on the table walk port if + // it is enabled. Otherwise, fall through and handle same + // as the final ACTION_SEND_REQ_FINAL request. if (tableWalkPortEnable) { - delay += masterTableWalkPort.sendAtomic(action.pkt); + delay += tableWalkPort.sendAtomic(action.pkt); pkt = action.pkt; break; } M5_FALLTHROUGH; case ACTION_SEND_REQ_FINAL: - delay += masterPort.sendAtomic(action.pkt); + delay += requestPort.sendAtomic(action.pkt); pkt = action.pkt; break; @@ -289,14 +289,14 @@ SMMUv3::runProcessTiming(SMMUProcess *proc, PacketPtr pkt) if (tableWalkPortEnable) { action.pkt->pushSenderState(proc); - DPRINTF(SMMUv3, "[t] master HWTW req addr=%#x size=%#x\n", + DPRINTF(SMMUv3, "[t] requestor HWTW req addr=%#x size=%#x\n", action.pkt->getAddr(), action.pkt->getSize()); if (packetsTableWalkToRetry.empty() - && masterTableWalkPort.sendTimingReq(action.pkt)) { - scheduleSlaveRetries(); + && tableWalkPort.sendTimingReq(action.pkt)) { + scheduleDeviceRetries(); } else { - DPRINTF(SMMUv3, "[t] master HWTW req needs retry," + DPRINTF(SMMUv3, "[t] requestor HWTW req needs retry," " qlen=%d\n", packetsTableWalkToRetry.size()); packetsTableWalkToRetry.push(action); } @@ -307,13 +307,14 @@ SMMUv3::runProcessTiming(SMMUProcess *proc, PacketPtr pkt) case ACTION_SEND_REQ_FINAL: action.pkt->pushSenderState(proc); - DPRINTF(SMMUv3, "[t] master req addr=%#x size=%#x\n", + DPRINTF(SMMUv3, "[t] requestor req addr=%#x size=%#x\n", action.pkt->getAddr(), action.pkt->getSize()); - if (packetsToRetry.empty() && masterPort.sendTimingReq(action.pkt)) { - scheduleSlaveRetries(); + if (packetsToRetry.empty() && + requestPort.sendTimingReq(action.pkt)) { + scheduleDeviceRetries(); } else { - DPRINTF(SMMUv3, "[t] master req needs retry, qlen=%d\n", + DPRINTF(SMMUv3, "[t] requestor req needs retry, qlen=%d\n", packetsToRetry.size()); packetsToRetry.push(action); } @@ -324,7 +325,7 @@ SMMUv3::runProcessTiming(SMMUProcess *proc, PacketPtr pkt) // @todo: We need to pay for this and not just zero it out action.pkt->headerDelay = action.pkt->payloadDelay = 0; - DPRINTF(SMMUv3, "[t] slave resp addr=%#x size=%#x\n", + DPRINTF(SMMUv3, "[t] responder resp addr=%#x size=%#x\n", action.pkt->getAddr(), action.pkt->getSize()); @@ -338,7 +339,7 @@ SMMUv3::runProcessTiming(SMMUProcess *proc, PacketPtr pkt) // @todo: We need to pay for this and not just zero it out action.pkt->headerDelay = action.pkt->payloadDelay = 0; - DPRINTF(SMMUv3, "[t] ATS slave resp addr=%#x size=%#x\n", + DPRINTF(SMMUv3, "[t] ATS responder resp addr=%#x size=%#x\n", action.pkt->getAddr(), action.pkt->getSize()); assert(action.ifc); @@ -394,9 +395,9 @@ SMMUv3::processCommand(const SMMUCommand &cmd) DPRINTF(SMMUv3, "CMD_CFGI_STE sid=%#x\n", cmd.dw0.sid); configCache.invalidateSID(cmd.dw0.sid); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateSID(cmd.dw0.sid); - slave_interface->mainTLB->invalidateSID(cmd.dw0.sid); + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateSID(cmd.dw0.sid); + dev_interface->mainTLB->invalidateSID(cmd.dw0.sid); } break; } @@ -409,9 +410,9 @@ SMMUv3::processCommand(const SMMUCommand &cmd) DPRINTF(SMMUv3, "CMD_CFGI_ALL\n"); configCache.invalidateAll(); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateAll(); - slave_interface->mainTLB->invalidateAll(); + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateAll(); + dev_interface->mainTLB->invalidateAll(); } } else { DPRINTF(SMMUv3, "CMD_CFGI_STE_RANGE\n"); @@ -420,9 +421,9 @@ SMMUv3::processCommand(const SMMUCommand &cmd) for (auto sid = start_sid; sid <= end_sid; sid++) { configCache.invalidateSID(sid); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateSID(sid); - slave_interface->mainTLB->invalidateSID(sid); + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateSID(sid); + dev_interface->mainTLB->invalidateSID(sid); } } } @@ -434,10 +435,10 @@ SMMUv3::processCommand(const SMMUCommand &cmd) cmd.dw0.sid, cmd.dw0.ssid); configCache.invalidateSSID(cmd.dw0.sid, cmd.dw0.ssid); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateSSID( + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateSSID( cmd.dw0.sid, cmd.dw0.ssid); - slave_interface->mainTLB->invalidateSSID( + dev_interface->mainTLB->invalidateSSID( cmd.dw0.sid, cmd.dw0.ssid); } break; @@ -447,18 +448,18 @@ SMMUv3::processCommand(const SMMUCommand &cmd) DPRINTF(SMMUv3, "CMD_CFGI_CD_ALL sid=%#x\n", cmd.dw0.sid); configCache.invalidateSID(cmd.dw0.sid); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateSID(cmd.dw0.sid); - slave_interface->mainTLB->invalidateSID(cmd.dw0.sid); + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateSID(cmd.dw0.sid); + dev_interface->mainTLB->invalidateSID(cmd.dw0.sid); } break; } case CMD_TLBI_NH_ALL: { DPRINTF(SMMUv3, "CMD_TLBI_NH_ALL vmid=%#x\n", cmd.dw0.vmid); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid); - slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid); + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateVMID(cmd.dw0.vmid); + dev_interface->mainTLB->invalidateVMID(cmd.dw0.vmid); } tlb.invalidateVMID(cmd.dw0.vmid); walkCache.invalidateVMID(cmd.dw0.vmid); @@ -468,10 +469,10 @@ SMMUv3::processCommand(const SMMUCommand &cmd) case CMD_TLBI_NH_ASID: { DPRINTF(SMMUv3, "CMD_TLBI_NH_ASID asid=%#x vmid=%#x\n", cmd.dw0.asid, cmd.dw0.vmid); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateASID( + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateASID( cmd.dw0.asid, cmd.dw0.vmid); - slave_interface->mainTLB->invalidateASID( + dev_interface->mainTLB->invalidateASID( cmd.dw0.asid, cmd.dw0.vmid); } tlb.invalidateASID(cmd.dw0.asid, cmd.dw0.vmid); @@ -483,10 +484,10 @@ SMMUv3::processCommand(const SMMUCommand &cmd) const Addr addr = cmd.addr(); DPRINTF(SMMUv3, "CMD_TLBI_NH_VAA va=%#08x vmid=%#x\n", addr, cmd.dw0.vmid); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateVAA( + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateVAA( addr, cmd.dw0.vmid); - slave_interface->mainTLB->invalidateVAA( + dev_interface->mainTLB->invalidateVAA( addr, cmd.dw0.vmid); } tlb.invalidateVAA(addr, cmd.dw0.vmid); @@ -499,10 +500,10 @@ SMMUv3::processCommand(const SMMUCommand &cmd) const Addr addr = cmd.addr(); DPRINTF(SMMUv3, "CMD_TLBI_NH_VA va=%#08x asid=%#x vmid=%#x\n", addr, cmd.dw0.asid, cmd.dw0.vmid); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateVA( + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateVA( addr, cmd.dw0.asid, cmd.dw0.vmid); - slave_interface->mainTLB->invalidateVA( + dev_interface->mainTLB->invalidateVA( addr, cmd.dw0.asid, cmd.dw0.vmid); } tlb.invalidateVA(addr, cmd.dw0.asid, cmd.dw0.vmid); @@ -527,9 +528,9 @@ SMMUv3::processCommand(const SMMUCommand &cmd) case CMD_TLBI_S12_VMALL: { DPRINTF(SMMUv3, "CMD_TLBI_S12_VMALL vmid=%#x\n", cmd.dw0.vmid); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateVMID(cmd.dw0.vmid); - slave_interface->mainTLB->invalidateVMID(cmd.dw0.vmid); + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateVMID(cmd.dw0.vmid); + dev_interface->mainTLB->invalidateVMID(cmd.dw0.vmid); } tlb.invalidateVMID(cmd.dw0.vmid); ipaCache.invalidateVMID(cmd.dw0.vmid); @@ -539,9 +540,9 @@ SMMUv3::processCommand(const SMMUCommand &cmd) case CMD_TLBI_NSNH_ALL: { DPRINTF(SMMUv3, "CMD_TLBI_NSNH_ALL\n"); - for (auto slave_interface : slaveInterfaces) { - slave_interface->microTLB->invalidateAll(); - slave_interface->mainTLB->invalidateAll(); + for (auto dev_interface : deviceInterfaces) { + dev_interface->microTLB->invalidateAll(); + dev_interface->mainTLB->invalidateAll(); } tlb.invalidateAll(); ipaCache.invalidateAll(); @@ -717,16 +718,16 @@ void SMMUv3::init() { // make sure both sides are connected and have the same block size - if (!masterPort.isConnected()) - fatal("Master port is not connected.\n"); + if (!requestPort.isConnected()) + fatal("Request port is not connected.\n"); - // If the second master port is connected for the table walks, enable + // If the second request port is connected for the table walks, enable // the mode to send table walks through this port instead - if (masterTableWalkPort.isConnected()) + if (tableWalkPort.isConnected()) tableWalkPortEnable = true; - // notify the master side of our address ranges - for (auto ifc : slaveInterfaces) { + // notify the request side of our address ranges + for (auto ifc : deviceInterfaces) { ifc->sendRange(); } @@ -741,10 +742,10 @@ SMMUv3::regStats() using namespace Stats; - for (size_t i = 0; i < slaveInterfaces.size(); i++) { - slaveInterfaces[i]->microTLB->regStats( + for (size_t i = 0; i < deviceInterfaces.size(); i++) { + deviceInterfaces[i]->microTLB->regStats( csprintf("%s.utlb%d", name(), i)); - slaveInterfaces[i]->mainTLB->regStats( + deviceInterfaces[i]->mainTLB->regStats( csprintf("%s.maintlb%d", name(), i)); } @@ -815,10 +816,10 @@ SMMUv3::unserialize(CheckpointIn &cp) Port& SMMUv3::getPort(const std::string &name, PortID id) { - if (name == "master") { - return masterPort; - } else if (name == "master_walker") { - return masterTableWalkPort; + if (name == "request") { + return requestPort; + } else if (name == "walker") { + return tableWalkPort; } else if (name == "control") { return controlPort; } else { diff --git a/src/dev/arm/smmu_v3.hh b/src/dev/arm/smmu_v3.hh index 8f35cdff5..6b3f3982b 100644 --- a/src/dev/arm/smmu_v3.hh +++ b/src/dev/arm/smmu_v3.hh @@ -48,11 +48,11 @@ #include "dev/arm/smmu_v3_caches.hh" #include "dev/arm/smmu_v3_cmdexec.hh" #include "dev/arm/smmu_v3_defs.hh" +#include "dev/arm/smmu_v3_deviceifc.hh" #include "dev/arm/smmu_v3_events.hh" #include "dev/arm/smmu_v3_ports.hh" #include "dev/arm/smmu_v3_proc.hh" #include "dev/arm/smmu_v3_ptops.hh" -#include "dev/arm/smmu_v3_slaveifc.hh" #include "mem/packet.hh" #include "params/SMMUv3.hh" #include "sim/clocked_object.hh" @@ -85,13 +85,13 @@ class SMMUv3 : public ClockedObject friend class SMMUProcess; friend class SMMUTranslationProcess; friend class SMMUCommandExecProcess; - friend class SMMUv3SlaveInterface; + friend class SMMUv3DeviceInterface; const System &system; - const MasterID masterId; + const RequestorID requestorId; - SMMUMasterPort masterPort; - SMMUMasterTableWalkPort masterTableWalkPort; + SMMURequestPort requestPort; + SMMUTableWalkPort tableWalkPort; SMMUControlPort controlPort; ARMArchTLB tlb; @@ -108,7 +108,7 @@ class SMMUv3 : public ClockedObject const bool walkCacheNonfinalEnable; const unsigned walkCacheS1Levels; const unsigned walkCacheS2Levels; - const unsigned masterPortWidth; // in bytes + const unsigned requestPortWidth; // in bytes SMMUSemaphore tlbSem; SMMUSemaphore ifcSmmuSem; @@ -116,7 +116,7 @@ class SMMUv3 : public ClockedObject SMMUSemaphore configSem; SMMUSemaphore ipaSem; SMMUSemaphore walkSem; - SMMUSemaphore masterPortSem; + SMMUSemaphore requestPortSem; SMMUSemaphore transSem; // max N transactions in SMMU SMMUSemaphore ptwSem; // max N concurrent PTWs @@ -138,7 +138,7 @@ class SMMUv3 : public ClockedObject Stats::Distribution translationTimeDist; Stats::Distribution ptwTimeDist; - std::vector slaveInterfaces; + std::vector deviceInterfaces; SMMUCommandExecProcess commandExecutor; @@ -151,7 +151,7 @@ class SMMUv3 : public ClockedObject std::queue packetsTableWalkToRetry; - void scheduleSlaveRetries(); + void scheduleDeviceRetries(); SMMUAction runProcess(SMMUProcess *proc, PacketPtr pkt); SMMUAction runProcessAtomic(SMMUProcess *proc, PacketPtr pkt); @@ -171,13 +171,13 @@ class SMMUv3 : public ClockedObject virtual void init() override; virtual void regStats() override; - Tick slaveRecvAtomic(PacketPtr pkt, PortID id); - bool slaveRecvTimingReq(PacketPtr pkt, PortID id); - bool masterRecvTimingResp(PacketPtr pkt); - void masterRecvReqRetry(); + Tick recvAtomic(PacketPtr pkt, PortID id); + bool recvTimingReq(PacketPtr pkt, PortID id); + bool recvTimingResp(PacketPtr pkt); + void recvReqRetry(); - bool masterTableWalkRecvTimingResp(PacketPtr pkt); - void masterTableWalkRecvReqRetry(); + bool tableWalkRecvTimingResp(PacketPtr pkt); + void tableWalkRecvReqRetry(); Tick readControl(PacketPtr pkt); Tick writeControl(PacketPtr pkt); diff --git a/src/dev/arm/smmu_v3_deviceifc.cc b/src/dev/arm/smmu_v3_deviceifc.cc new file mode 100644 index 000000000..751605556 --- /dev/null +++ b/src/dev/arm/smmu_v3_deviceifc.cc @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2019 ARM Limited + * All rights reserved + * + * The license below extends only to copyright in the software and shall + * not be construed as granting a license to any other intellectual + * property including but not limited to intellectual property relating + * to a hardware implementation of the functionality of the software + * licensed hereunder. You may use the software subject to the license + * terms below provided that you ensure that this notice is replicated + * unmodified and in its entirety in all distributions of the software, + * modified or unmodified, in source code or in binary form. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "dev/arm/smmu_v3_deviceifc.hh" + +#include "base/trace.hh" +#include "debug/SMMUv3.hh" +#include "dev/arm/smmu_v3.hh" +#include "dev/arm/smmu_v3_transl.hh" + +SMMUv3DeviceInterface::SMMUv3DeviceInterface( + const SMMUv3DeviceInterfaceParams *p) : + ClockedObject(p), + smmu(nullptr), + microTLB(new SMMUTLB(p->utlb_entries, + p->utlb_assoc, + p->utlb_policy)), + mainTLB(new SMMUTLB(p->tlb_entries, + p->tlb_assoc, + p->tlb_policy)), + microTLBEnable(p->utlb_enable), + mainTLBEnable(p->tlb_enable), + devicePortSem(1), + microTLBSem(p->utlb_slots), + mainTLBSem(p->tlb_slots), + microTLBLat(p->utlb_lat), + mainTLBLat(p->tlb_lat), + devicePort(new SMMUDevicePort(csprintf("%s.device_port", + name()), *this)), + atsDevicePort(name() + ".atsDevicePort", *this), + atsMemPort(name() + ".atsMemPort", *this), + portWidth(p->port_width), + wrBufSlotsRemaining(p->wrbuf_slots), + xlateSlotsRemaining(p->xlate_slots), + pendingMemAccesses(0), + prefetchEnable(p->prefetch_enable), + prefetchReserveLastWay( + p->prefetch_reserve_last_way), + deviceNeedsRetry(false), + atsDeviceNeedsRetry(false), + sendDeviceRetryEvent(*this), + atsSendDeviceRetryEvent(this) +{} + +void +SMMUv3DeviceInterface::sendRange() +{ + if (devicePort->isConnected()) { + inform("Device port is connected to %s\n", devicePort->getPeer()); + + devicePort->sendRangeChange(); + } else { + fatal("Device port is not connected.\n"); + } +} + +Port& +SMMUv3DeviceInterface::getPort(const std::string &name, PortID id) +{ + if (name == "ats_mem_side_port") { + return atsMemPort; + } else if (name == "device_port") { + return *devicePort; + } else if (name == "ats_dev_side_port") { + return atsDevicePort; + } else { + return ClockedObject::getPort(name, id); + } +} + +void +SMMUv3DeviceInterface::schedTimingResp(PacketPtr pkt) +{ + devicePort->schedTimingResp(pkt, nextCycle()); +} + +void +SMMUv3DeviceInterface::schedAtsTimingResp(PacketPtr pkt) +{ + atsDevicePort.schedTimingResp(pkt, nextCycle()); + + if (atsDeviceNeedsRetry) { + atsDeviceNeedsRetry = false; + schedule(atsSendDeviceRetryEvent, nextCycle()); + } +} + +Tick +SMMUv3DeviceInterface::recvAtomic(PacketPtr pkt) +{ + DPRINTF(SMMUv3, "[a] req from %s addr=%#x size=%#x\n", + devicePort->getPeer(), pkt->getAddr(), pkt->getSize()); + + std::string proc_name = csprintf("%s.port", name()); + SMMUTranslationProcess proc(proc_name, *smmu, *this); + proc.beginTransaction(SMMUTranslRequest::fromPacket(pkt)); + + SMMUAction a = smmu->runProcessAtomic(&proc, pkt); + assert(a.type == ACTION_SEND_RESP); + + return a.delay; +} + +bool +SMMUv3DeviceInterface::recvTimingReq(PacketPtr pkt) +{ + DPRINTF(SMMUv3, "[t] req from %s addr=%#x size=%#x\n", + devicePort->getPeer(), pkt->getAddr(), pkt->getSize()); + + // @todo: We need to pay for this and not just zero it out + pkt->headerDelay = pkt->payloadDelay = 0; + + unsigned nbeats = + (pkt->getSize() + (portWidth-1)) / portWidth; + + if (xlateSlotsRemaining==0 || + (pkt->isWrite() && wrBufSlotsRemaining < nbeats)) + { + deviceNeedsRetry = true; + return false; + } + + if (pkt->isWrite()) + wrBufSlotsRemaining -= nbeats; + + std::string proc_name = csprintf("%s.port", name()); + SMMUTranslationProcess *proc = + new SMMUTranslationProcess(proc_name, *smmu, *this); + proc->beginTransaction(SMMUTranslRequest::fromPacket(pkt)); + + smmu->runProcessTiming(proc, pkt); + + return true; +} + +Tick +SMMUv3DeviceInterface::atsRecvAtomic(PacketPtr pkt) +{ + DPRINTF(SMMUv3, "[a] ATS responder req addr=%#x size=%#x\n", + pkt->getAddr(), pkt->getSize()); + + std::string proc_name = csprintf("%s.atsport", name()); + const bool ats_request = true; + SMMUTranslationProcess proc( + proc_name, *smmu, *this); + proc.beginTransaction(SMMUTranslRequest::fromPacket(pkt, ats_request)); + + SMMUAction a = smmu->runProcessAtomic(&proc, pkt); + assert(a.type == ACTION_SEND_RESP_ATS); + + return a.delay; +} + +bool +SMMUv3DeviceInterface::atsRecvTimingReq(PacketPtr pkt) +{ + DPRINTF(SMMUv3, "[t] ATS responder req addr=%#x size=%#x\n", + pkt->getAddr(), pkt->getSize()); + + // @todo: We need to pay for this and not just zero it out + pkt->headerDelay = pkt->payloadDelay = 0; + + if (xlateSlotsRemaining == 0) { + deviceNeedsRetry = true; + return false; + } + + std::string proc_name = csprintf("%s.atsport", name()); + const bool ats_request = true; + SMMUTranslationProcess *proc = + new SMMUTranslationProcess(proc_name, *smmu, *this); + proc->beginTransaction(SMMUTranslRequest::fromPacket(pkt, ats_request)); + + smmu->runProcessTiming(proc, pkt); + + return true; +} + +bool +SMMUv3DeviceInterface::atsRecvTimingResp(PacketPtr pkt) +{ + DPRINTF(SMMUv3, "[t] ATS requestor resp addr=%#x size=%#x\n", + pkt->getAddr(), pkt->getSize()); + + // @todo: We need to pay for this and not just zero it out + pkt->headerDelay = pkt->payloadDelay = 0; + + SMMUProcess *proc = + safe_cast(pkt->popSenderState()); + + smmu->runProcessTiming(proc, pkt); + + return true; +} + +void +SMMUv3DeviceInterface::sendDeviceRetry() +{ + devicePort->sendRetryReq(); +} + +void +SMMUv3DeviceInterface::atsSendDeviceRetry() +{ + DPRINTF(SMMUv3, "ATS retry\n"); + atsDevicePort.sendRetryReq(); +} + +void +SMMUv3DeviceInterface::scheduleDeviceRetry() +{ + if (deviceNeedsRetry && !sendDeviceRetryEvent.scheduled()) { + DPRINTF(SMMUv3, "sched responder retry\n"); + deviceNeedsRetry = false; + schedule(sendDeviceRetryEvent, nextCycle()); + } +} + +DrainState +SMMUv3DeviceInterface::drain() +{ + // Wait until all SMMU translations are completed + if (xlateSlotsRemaining < params()->xlate_slots) { + return DrainState::Draining; + } + return DrainState::Drained; +} + +SMMUv3DeviceInterface* +SMMUv3DeviceInterfaceParams::create() +{ + return new SMMUv3DeviceInterface(this); +} diff --git a/src/dev/arm/smmu_v3_deviceifc.hh b/src/dev/arm/smmu_v3_deviceifc.hh new file mode 100644 index 000000000..64dcc5798 --- /dev/null +++ b/src/dev/arm/smmu_v3_deviceifc.hh @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2013, 2018-2019 ARM Limited + * All rights reserved + * + * The license below extends only to copyright in the software and shall + * not be construed as granting a license to any other intellectual + * property including but not limited to intellectual property relating + * to a hardware implementation of the functionality of the software + * licensed hereunder. You may use the software subject to the license + * terms below provided that you ensure that this notice is replicated + * unmodified and in its entirety in all distributions of the software, + * modified or unmodified, in source code or in binary form. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DEV_ARM_SMMU_V3_DEVICEIFC_HH__ +#define __DEV_ARM_SMMU_V3_DEVICEIFC_HH__ + +#include + +#include "dev/arm/smmu_v3_caches.hh" +#include "dev/arm/smmu_v3_defs.hh" +#include "dev/arm/smmu_v3_events.hh" +#include "dev/arm/smmu_v3_ports.hh" +#include "dev/arm/smmu_v3_proc.hh" +#include "params/SMMUv3DeviceInterface.hh" +#include "sim/clocked_object.hh" + +class SMMUTranslationProcess; +class SMMUv3; +class SMMUDevicePort; + +class SMMUv3DeviceInterface : public ClockedObject +{ + protected: + friend class SMMUTranslationProcess; + + public: + SMMUv3 *smmu; + SMMUTLB* microTLB; + SMMUTLB* mainTLB; + + const bool microTLBEnable; + const bool mainTLBEnable; + + SMMUSemaphore devicePortSem; + SMMUSemaphore microTLBSem; + SMMUSemaphore mainTLBSem; + + const Cycles microTLBLat; + const Cycles mainTLBLat; + + SMMUDevicePort *devicePort; + SMMUATSDevicePort atsDevicePort; + SMMUATSMemoryPort atsMemPort; + + // in bytes + const unsigned portWidth; + + unsigned wrBufSlotsRemaining; + unsigned xlateSlotsRemaining; + unsigned pendingMemAccesses; + + const bool prefetchEnable; + const bool prefetchReserveLastWay; + + std::list duplicateReqs; + SMMUSignal duplicateReqRemoved; + + std::list dependentReads[SMMU_MAX_TRANS_ID]; + std::list dependentWrites[SMMU_MAX_TRANS_ID]; + SMMUSignal dependentReqRemoved; + + // Receiving translation requests from the requestor device + Tick recvAtomic(PacketPtr pkt); + bool recvTimingReq(PacketPtr pkt); + void schedTimingResp(PacketPtr pkt); + + Tick atsRecvAtomic(PacketPtr pkt); + bool atsRecvTimingReq(PacketPtr pkt); + bool atsRecvTimingResp(PacketPtr pkt); + void schedAtsTimingResp(PacketPtr pkt); + + void scheduleDeviceRetry(); + void sendDeviceRetry(); + void atsSendDeviceRetry(); + + bool deviceNeedsRetry; + bool atsDeviceNeedsRetry; + + SMMUDeviceRetryEvent sendDeviceRetryEvent; + EventWrapper< + SMMUv3DeviceInterface, + &SMMUv3DeviceInterface::atsSendDeviceRetry> atsSendDeviceRetryEvent; + + Port& getPort(const std::string &name, PortID id) override; + + public: + SMMUv3DeviceInterface(const SMMUv3DeviceInterfaceParams *p); + + ~SMMUv3DeviceInterface() + { + delete microTLB; + delete mainTLB; + } + + const SMMUv3DeviceInterfaceParams * + params() const + { + return static_cast(_params); + } + + DrainState drain() override; + + void setSMMU(SMMUv3 *_smmu) { smmu = _smmu; } + void sendRange(); +}; + +#endif /* __DEV_ARM_SMMU_V3_DEVICEIFC_HH__ */ diff --git a/src/dev/arm/smmu_v3_events.cc b/src/dev/arm/smmu_v3_events.cc index be0f2df81..774f2da14 100644 --- a/src/dev/arm/smmu_v3_events.cc +++ b/src/dev/arm/smmu_v3_events.cc @@ -37,7 +37,7 @@ #include "dev/arm/smmu_v3_events.hh" -#include "dev/arm/smmu_v3_slaveifc.hh" +#include "dev/arm/smmu_v3_deviceifc.hh" void SMMUDeviceRetryEvent::process() diff --git a/src/dev/arm/smmu_v3_events.hh b/src/dev/arm/smmu_v3_events.hh index e8b4975d2..cfc980e37 100644 --- a/src/dev/arm/smmu_v3_events.hh +++ b/src/dev/arm/smmu_v3_events.hh @@ -41,15 +41,15 @@ #include #include -class SMMUv3SlaveInterface; +class SMMUv3DeviceInterface; class SMMUDeviceRetryEvent : public Event { private: - SMMUv3SlaveInterface &smmuIfc; + SMMUv3DeviceInterface &smmuIfc; public: - SMMUDeviceRetryEvent(SMMUv3SlaveInterface &ifc) + SMMUDeviceRetryEvent(SMMUv3DeviceInterface &ifc) : smmuIfc(ifc) {} @@ -58,7 +58,7 @@ class SMMUDeviceRetryEvent : public Event const std::string name() const; const char *description() const - { return "SlaveRetryEvent"; } + { return "DeviceRetryEvent"; } }; #endif /* __DEV_ARM_SMMU_V3_EVENTS_HH__ */ diff --git a/src/dev/arm/smmu_v3_ports.cc b/src/dev/arm/smmu_v3_ports.cc index 3f5425021..5d5e78761 100644 --- a/src/dev/arm/smmu_v3_ports.cc +++ b/src/dev/arm/smmu_v3_ports.cc @@ -39,67 +39,67 @@ #include "base/logging.hh" #include "dev/arm/smmu_v3.hh" -#include "dev/arm/smmu_v3_slaveifc.hh" +#include "dev/arm/smmu_v3_deviceifc.hh" -SMMUMasterPort::SMMUMasterPort(const std::string &_name, SMMUv3 &_smmu) : +SMMURequestPort::SMMURequestPort(const std::string &_name, SMMUv3 &_smmu) : RequestPort(_name, &_smmu), smmu(_smmu) {} bool -SMMUMasterPort::recvTimingResp(PacketPtr pkt) +SMMURequestPort::recvTimingResp(PacketPtr pkt) { - return smmu.masterRecvTimingResp(pkt); + return smmu.recvTimingResp(pkt); } void -SMMUMasterPort::recvReqRetry() +SMMURequestPort::recvReqRetry() { - return smmu.masterRecvReqRetry(); + return smmu.recvReqRetry(); } -SMMUMasterTableWalkPort::SMMUMasterTableWalkPort(const std::string &_name, +SMMUTableWalkPort::SMMUTableWalkPort(const std::string &_name, SMMUv3 &_smmu) : RequestPort(_name, &_smmu), smmu(_smmu) {} bool -SMMUMasterTableWalkPort::recvTimingResp(PacketPtr pkt) +SMMUTableWalkPort::recvTimingResp(PacketPtr pkt) { - return smmu.masterTableWalkRecvTimingResp(pkt); + return smmu.tableWalkRecvTimingResp(pkt); } void -SMMUMasterTableWalkPort::recvReqRetry() +SMMUTableWalkPort::recvReqRetry() { - return smmu.masterTableWalkRecvReqRetry(); + return smmu.tableWalkRecvReqRetry(); } -SMMUSlavePort::SMMUSlavePort(const std::string &_name, - SMMUv3SlaveInterface &_ifc, +SMMUDevicePort::SMMUDevicePort(const std::string &_name, + SMMUv3DeviceInterface &_ifc, PortID _id) : - QueuedSlavePort(_name, &_ifc, respQueue, _id), + QueuedResponsePort(_name, &_ifc, respQueue, _id), ifc(_ifc), respQueue(_ifc, *this) {} void -SMMUSlavePort::recvFunctional(PacketPtr pkt) +SMMUDevicePort::recvFunctional(PacketPtr pkt) { if (!respQueue.trySatisfyFunctional(pkt)) recvAtomic(pkt); } Tick -SMMUSlavePort::recvAtomic(PacketPtr pkt) +SMMUDevicePort::recvAtomic(PacketPtr pkt) { return ifc.recvAtomic(pkt); } bool -SMMUSlavePort::recvTimingReq(PacketPtr pkt) +SMMUDevicePort::recvTimingReq(PacketPtr pkt) { return ifc.recvTimingReq(pkt); } @@ -136,41 +136,41 @@ SMMUControlPort::getAddrRanges() const return list; } -SMMUATSMasterPort::SMMUATSMasterPort(const std::string &_name, - SMMUv3SlaveInterface &_ifc) : - QueuedMasterPort(_name, &_ifc, reqQueue, snoopRespQueue), +SMMUATSMemoryPort::SMMUATSMemoryPort(const std::string &_name, + SMMUv3DeviceInterface &_ifc) : + QueuedRequestPort(_name, &_ifc, reqQueue, snoopRespQueue), ifc(_ifc), reqQueue(_ifc, *this), snoopRespQueue(_ifc, *this) {} bool -SMMUATSMasterPort::recvTimingResp(PacketPtr pkt) +SMMUATSMemoryPort::recvTimingResp(PacketPtr pkt) { - return ifc.atsMasterRecvTimingResp(pkt); + return ifc.atsRecvTimingResp(pkt); } -SMMUATSSlavePort::SMMUATSSlavePort(const std::string &_name, - SMMUv3SlaveInterface &_ifc) : - QueuedSlavePort(_name, &_ifc, respQueue), +SMMUATSDevicePort::SMMUATSDevicePort(const std::string &_name, + SMMUv3DeviceInterface &_ifc) : + QueuedResponsePort(_name, &_ifc, respQueue), ifc(_ifc), respQueue(_ifc, *this) {} void -SMMUATSSlavePort::recvFunctional(PacketPtr pkt) +SMMUATSDevicePort::recvFunctional(PacketPtr pkt) { panic("Functional access on ATS port!"); } Tick -SMMUATSSlavePort::recvAtomic(PacketPtr pkt) +SMMUATSDevicePort::recvAtomic(PacketPtr pkt) { - return ifc.atsSlaveRecvAtomic(pkt); + return ifc.atsRecvAtomic(pkt); } bool -SMMUATSSlavePort::recvTimingReq(PacketPtr pkt) +SMMUATSDevicePort::recvTimingReq(PacketPtr pkt) { - return ifc.atsSlaveRecvTimingReq(pkt); + return ifc.atsRecvTimingReq(pkt); } diff --git a/src/dev/arm/smmu_v3_ports.hh b/src/dev/arm/smmu_v3_ports.hh index ee68bbb87..9d567fa87 100644 --- a/src/dev/arm/smmu_v3_ports.hh +++ b/src/dev/arm/smmu_v3_ports.hh @@ -42,9 +42,9 @@ #include "mem/tport.hh" class SMMUv3; -class SMMUv3SlaveInterface; +class SMMUv3DeviceInterface; -class SMMUMasterPort : public RequestPort +class SMMURequestPort : public RequestPort { protected: SMMUv3 &smmu; @@ -53,12 +53,12 @@ class SMMUMasterPort : public RequestPort virtual void recvReqRetry(); public: - SMMUMasterPort(const std::string &_name, SMMUv3 &_smmu); - virtual ~SMMUMasterPort() {} + SMMURequestPort(const std::string &_name, SMMUv3 &_smmu); + virtual ~SMMURequestPort() {} }; -// Separate master port to send MMU initiated requests on -class SMMUMasterTableWalkPort : public RequestPort +// Separate request port to send MMU initiated requests on +class SMMUTableWalkPort : public RequestPort { protected: SMMUv3 &smmu; @@ -67,14 +67,14 @@ class SMMUMasterTableWalkPort : public RequestPort virtual void recvReqRetry(); public: - SMMUMasterTableWalkPort(const std::string &_name, SMMUv3 &_smmu); - virtual ~SMMUMasterTableWalkPort() {} + SMMUTableWalkPort(const std::string &_name, SMMUv3 &_smmu); + virtual ~SMMUTableWalkPort() {} }; -class SMMUSlavePort : public QueuedSlavePort +class SMMUDevicePort : public QueuedResponsePort { protected: - SMMUv3SlaveInterface &ifc; + SMMUv3DeviceInterface &ifc; RespPacketQueue respQueue; virtual void recvFunctional(PacketPtr pkt); @@ -82,10 +82,10 @@ class SMMUSlavePort : public QueuedSlavePort virtual bool recvTimingReq(PacketPtr pkt); public: - SMMUSlavePort(const std::string &_name, - SMMUv3SlaveInterface &_ifc, + SMMUDevicePort(const std::string &_name, + SMMUv3DeviceInterface &_ifc, PortID _id = InvalidPortID); - virtual ~SMMUSlavePort() {} + virtual ~SMMUDevicePort() {} virtual AddrRangeList getAddrRanges() const { return AddrRangeList { AddrRange(0, UINT64_MAX) }; } @@ -106,24 +106,24 @@ class SMMUControlPort : public SimpleTimingPort virtual ~SMMUControlPort() {} }; -class SMMUATSMasterPort : public QueuedMasterPort +class SMMUATSMemoryPort : public QueuedRequestPort { protected: - SMMUv3SlaveInterface &ifc; + SMMUv3DeviceInterface &ifc; ReqPacketQueue reqQueue; SnoopRespPacketQueue snoopRespQueue; virtual bool recvTimingResp(PacketPtr pkt); public: - SMMUATSMasterPort(const std::string &_name, SMMUv3SlaveInterface &_ifc); - virtual ~SMMUATSMasterPort() {} + SMMUATSMemoryPort(const std::string &_name, SMMUv3DeviceInterface &_ifc); + virtual ~SMMUATSMemoryPort() {} }; -class SMMUATSSlavePort : public QueuedSlavePort +class SMMUATSDevicePort : public QueuedResponsePort { protected: - SMMUv3SlaveInterface &ifc; + SMMUv3DeviceInterface &ifc; RespPacketQueue respQueue; virtual void recvFunctional(PacketPtr pkt); @@ -134,8 +134,8 @@ class SMMUATSSlavePort : public QueuedSlavePort { return AddrRangeList(); } public: - SMMUATSSlavePort(const std::string &_name, SMMUv3SlaveInterface &_ifc); - virtual ~SMMUATSSlavePort() {} + SMMUATSDevicePort(const std::string &_name, SMMUv3DeviceInterface &_ifc); + virtual ~SMMUATSDevicePort() {} }; #endif /* __DEV_ARM_SMMU_V3_PORTS_HH__ */ diff --git a/src/dev/arm/smmu_v3_proc.cc b/src/dev/arm/smmu_v3_proc.cc index 9dec2cb77..0a11c1562 100644 --- a/src/dev/arm/smmu_v3_proc.cc +++ b/src/dev/arm/smmu_v3_proc.cc @@ -68,15 +68,15 @@ SMMUProcess::reinit() void SMMUProcess::doRead(Yield &yield, Addr addr, void *ptr, size_t size) { - doSemaphoreDown(yield, smmu.masterPortSem); + doSemaphoreDown(yield, smmu.requestPortSem); doDelay(yield, Cycles(1)); // request - assume 1 cycle - doSemaphoreUp(smmu.masterPortSem); + doSemaphoreUp(smmu.requestPortSem); SMMUAction a; a.type = ACTION_SEND_REQ; RequestPtr req = std::make_shared( - addr, size, 0, smmu.masterId); + addr, size, 0, smmu.requestorId); req->taskId(ContextSwitchTaskId::DMA); @@ -97,18 +97,19 @@ SMMUProcess::doRead(Yield &yield, Addr addr, void *ptr, size_t size) void SMMUProcess::doWrite(Yield &yield, Addr addr, const void *ptr, size_t size) { - unsigned nbeats = (size + (smmu.masterPortWidth-1)) / smmu.masterPortWidth; + unsigned nbeats = (size + (smmu.requestPortWidth-1)) + / smmu.requestPortWidth; - doSemaphoreDown(yield, smmu.masterPortSem); + doSemaphoreDown(yield, smmu.requestPortSem); doDelay(yield, Cycles(nbeats)); - doSemaphoreUp(smmu.masterPortSem); + doSemaphoreUp(smmu.requestPortSem); SMMUAction a; a.type = ACTION_SEND_REQ; RequestPtr req = std::make_shared( - addr, size, 0, smmu.masterId); + addr, size, 0, smmu.requestorId); req->taskId(ContextSwitchTaskId::DMA); diff --git a/src/dev/arm/smmu_v3_proc.hh b/src/dev/arm/smmu_v3_proc.hh index 89b5a6771..fe81d19ee 100644 --- a/src/dev/arm/smmu_v3_proc.hh +++ b/src/dev/arm/smmu_v3_proc.hh @@ -46,7 +46,7 @@ #include "base/types.hh" #include "mem/packet.hh" -class SMMUv3SlaveInterface; +class SMMUv3DeviceInterface; /* * The meaning of these becomes apparent when you @@ -67,7 +67,7 @@ struct SMMUAction { SMMUActionType type; PacketPtr pkt; - SMMUv3SlaveInterface *ifc; + SMMUv3DeviceInterface *ifc; Tick delay; }; diff --git a/src/dev/arm/smmu_v3_slaveifc.cc b/src/dev/arm/smmu_v3_slaveifc.cc deleted file mode 100644 index 5b3dd98ab..000000000 --- a/src/dev/arm/smmu_v3_slaveifc.cc +++ /dev/null @@ -1,266 +0,0 @@ -/* - * Copyright (c) 2019 ARM Limited - * All rights reserved - * - * The license below extends only to copyright in the software and shall - * not be construed as granting a license to any other intellectual - * property including but not limited to intellectual property relating - * to a hardware implementation of the functionality of the software - * licensed hereunder. You may use the software subject to the license - * terms below provided that you ensure that this notice is replicated - * unmodified and in its entirety in all distributions of the software, - * modified or unmodified, in source code or in binary form. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "dev/arm/smmu_v3_slaveifc.hh" - -#include "base/trace.hh" -#include "debug/SMMUv3.hh" -#include "dev/arm/smmu_v3.hh" -#include "dev/arm/smmu_v3_transl.hh" - -SMMUv3SlaveInterface::SMMUv3SlaveInterface( - const SMMUv3SlaveInterfaceParams *p) : - ClockedObject(p), - smmu(nullptr), - microTLB(new SMMUTLB(p->utlb_entries, - p->utlb_assoc, - p->utlb_policy)), - mainTLB(new SMMUTLB(p->tlb_entries, - p->tlb_assoc, - p->tlb_policy)), - microTLBEnable(p->utlb_enable), - mainTLBEnable(p->tlb_enable), - slavePortSem(1), - microTLBSem(p->utlb_slots), - mainTLBSem(p->tlb_slots), - microTLBLat(p->utlb_lat), - mainTLBLat(p->tlb_lat), - slavePort(new SMMUSlavePort(csprintf("%s.slave", name()), *this)), - atsSlavePort(name() + ".atsSlave", *this), - atsMasterPort(name() + ".atsMaster", *this), - portWidth(p->port_width), - wrBufSlotsRemaining(p->wrbuf_slots), - xlateSlotsRemaining(p->xlate_slots), - pendingMemAccesses(0), - prefetchEnable(p->prefetch_enable), - prefetchReserveLastWay( - p->prefetch_reserve_last_way), - deviceNeedsRetry(false), - atsDeviceNeedsRetry(false), - sendDeviceRetryEvent(*this), - atsSendDeviceRetryEvent(this) -{} - -void -SMMUv3SlaveInterface::sendRange() -{ - if (slavePort->isConnected()) { - inform("Slave port is connected to %s\n", slavePort->getPeer()); - - slavePort->sendRangeChange(); - } else { - fatal("Slave port is not connected.\n"); - } -} - -Port& -SMMUv3SlaveInterface::getPort(const std::string &name, PortID id) -{ - if (name == "ats_master") { - return atsMasterPort; - } else if (name == "slave") { - return *slavePort; - } else if (name == "ats_slave") { - return atsSlavePort; - } else { - return ClockedObject::getPort(name, id); - } -} - -void -SMMUv3SlaveInterface::schedTimingResp(PacketPtr pkt) -{ - slavePort->schedTimingResp(pkt, nextCycle()); -} - -void -SMMUv3SlaveInterface::schedAtsTimingResp(PacketPtr pkt) -{ - atsSlavePort.schedTimingResp(pkt, nextCycle()); - - if (atsDeviceNeedsRetry) { - atsDeviceNeedsRetry = false; - schedule(atsSendDeviceRetryEvent, nextCycle()); - } -} - -Tick -SMMUv3SlaveInterface::recvAtomic(PacketPtr pkt) -{ - DPRINTF(SMMUv3, "[a] req from %s addr=%#x size=%#x\n", - slavePort->getPeer(), pkt->getAddr(), pkt->getSize()); - - std::string proc_name = csprintf("%s.port", name()); - SMMUTranslationProcess proc(proc_name, *smmu, *this); - proc.beginTransaction(SMMUTranslRequest::fromPacket(pkt)); - - SMMUAction a = smmu->runProcessAtomic(&proc, pkt); - assert(a.type == ACTION_SEND_RESP); - - return a.delay; -} - -bool -SMMUv3SlaveInterface::recvTimingReq(PacketPtr pkt) -{ - DPRINTF(SMMUv3, "[t] req from %s addr=%#x size=%#x\n", - slavePort->getPeer(), pkt->getAddr(), pkt->getSize()); - - // @todo: We need to pay for this and not just zero it out - pkt->headerDelay = pkt->payloadDelay = 0; - - unsigned nbeats = - (pkt->getSize() + (portWidth-1)) / portWidth; - - if (xlateSlotsRemaining==0 || - (pkt->isWrite() && wrBufSlotsRemaining < nbeats)) - { - deviceNeedsRetry = true; - return false; - } - - if (pkt->isWrite()) - wrBufSlotsRemaining -= nbeats; - - std::string proc_name = csprintf("%s.port", name()); - SMMUTranslationProcess *proc = - new SMMUTranslationProcess(proc_name, *smmu, *this); - proc->beginTransaction(SMMUTranslRequest::fromPacket(pkt)); - - smmu->runProcessTiming(proc, pkt); - - return true; -} - -Tick -SMMUv3SlaveInterface::atsSlaveRecvAtomic(PacketPtr pkt) -{ - DPRINTF(SMMUv3, "[a] ATS slave req addr=%#x size=%#x\n", - pkt->getAddr(), pkt->getSize()); - - std::string proc_name = csprintf("%s.atsport", name()); - const bool ats_request = true; - SMMUTranslationProcess proc( - proc_name, *smmu, *this); - proc.beginTransaction(SMMUTranslRequest::fromPacket(pkt, ats_request)); - - SMMUAction a = smmu->runProcessAtomic(&proc, pkt); - assert(a.type == ACTION_SEND_RESP_ATS); - - return a.delay; -} - -bool -SMMUv3SlaveInterface::atsSlaveRecvTimingReq(PacketPtr pkt) -{ - DPRINTF(SMMUv3, "[t] ATS slave req addr=%#x size=%#x\n", - pkt->getAddr(), pkt->getSize()); - - // @todo: We need to pay for this and not just zero it out - pkt->headerDelay = pkt->payloadDelay = 0; - - if (xlateSlotsRemaining == 0) { - deviceNeedsRetry = true; - return false; - } - - std::string proc_name = csprintf("%s.atsport", name()); - const bool ats_request = true; - SMMUTranslationProcess *proc = - new SMMUTranslationProcess(proc_name, *smmu, *this); - proc->beginTransaction(SMMUTranslRequest::fromPacket(pkt, ats_request)); - - smmu->runProcessTiming(proc, pkt); - - return true; -} - -bool -SMMUv3SlaveInterface::atsMasterRecvTimingResp(PacketPtr pkt) -{ - DPRINTF(SMMUv3, "[t] ATS master resp addr=%#x size=%#x\n", - pkt->getAddr(), pkt->getSize()); - - // @todo: We need to pay for this and not just zero it out - pkt->headerDelay = pkt->payloadDelay = 0; - - SMMUProcess *proc = - safe_cast(pkt->popSenderState()); - - smmu->runProcessTiming(proc, pkt); - - return true; -} - -void -SMMUv3SlaveInterface::sendDeviceRetry() -{ - slavePort->sendRetryReq(); -} - -void -SMMUv3SlaveInterface::atsSendDeviceRetry() -{ - DPRINTF(SMMUv3, "ATS retry\n"); - atsSlavePort.sendRetryReq(); -} - -void -SMMUv3SlaveInterface::scheduleDeviceRetry() -{ - if (deviceNeedsRetry && !sendDeviceRetryEvent.scheduled()) { - DPRINTF(SMMUv3, "sched slave retry\n"); - deviceNeedsRetry = false; - schedule(sendDeviceRetryEvent, nextCycle()); - } -} - -DrainState -SMMUv3SlaveInterface::drain() -{ - // Wait until all SMMU translations are completed - if (xlateSlotsRemaining < params()->xlate_slots) { - return DrainState::Draining; - } - return DrainState::Drained; -} - -SMMUv3SlaveInterface* -SMMUv3SlaveInterfaceParams::create() -{ - return new SMMUv3SlaveInterface(this); -} diff --git a/src/dev/arm/smmu_v3_slaveifc.hh b/src/dev/arm/smmu_v3_slaveifc.hh deleted file mode 100644 index e1f8ef283..000000000 --- a/src/dev/arm/smmu_v3_slaveifc.hh +++ /dev/null @@ -1,141 +0,0 @@ -/* - * Copyright (c) 2013, 2018-2019 ARM Limited - * All rights reserved - * - * The license below extends only to copyright in the software and shall - * not be construed as granting a license to any other intellectual - * property including but not limited to intellectual property relating - * to a hardware implementation of the functionality of the software - * licensed hereunder. You may use the software subject to the license - * terms below provided that you ensure that this notice is replicated - * unmodified and in its entirety in all distributions of the software, - * modified or unmodified, in source code or in binary form. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef __DEV_ARM_SMMU_V3_SLAVEIFC_HH__ -#define __DEV_ARM_SMMU_V3_SLAVEIFC_HH__ - -#include - -#include "dev/arm/smmu_v3_caches.hh" -#include "dev/arm/smmu_v3_defs.hh" -#include "dev/arm/smmu_v3_events.hh" -#include "dev/arm/smmu_v3_ports.hh" -#include "dev/arm/smmu_v3_proc.hh" -#include "params/SMMUv3SlaveInterface.hh" -#include "sim/clocked_object.hh" - -class SMMUTranslationProcess; -class SMMUv3; -class SMMUSlavePort; - -class SMMUv3SlaveInterface : public ClockedObject -{ - protected: - friend class SMMUTranslationProcess; - - public: - SMMUv3 *smmu; - SMMUTLB* microTLB; - SMMUTLB* mainTLB; - - const bool microTLBEnable; - const bool mainTLBEnable; - - SMMUSemaphore slavePortSem; - SMMUSemaphore microTLBSem; - SMMUSemaphore mainTLBSem; - - const Cycles microTLBLat; - const Cycles mainTLBLat; - - SMMUSlavePort *slavePort; - SMMUATSSlavePort atsSlavePort; - SMMUATSMasterPort atsMasterPort; - - // in bytes - const unsigned portWidth; - - unsigned wrBufSlotsRemaining; - unsigned xlateSlotsRemaining; - unsigned pendingMemAccesses; - - const bool prefetchEnable; - const bool prefetchReserveLastWay; - - std::list duplicateReqs; - SMMUSignal duplicateReqRemoved; - - std::list dependentReads[SMMU_MAX_TRANS_ID]; - std::list dependentWrites[SMMU_MAX_TRANS_ID]; - SMMUSignal dependentReqRemoved; - - // Receiving translation requests from the master device - Tick recvAtomic(PacketPtr pkt); - bool recvTimingReq(PacketPtr pkt); - void schedTimingResp(PacketPtr pkt); - - Tick atsSlaveRecvAtomic(PacketPtr pkt); - bool atsSlaveRecvTimingReq(PacketPtr pkt); - bool atsMasterRecvTimingResp(PacketPtr pkt); - void schedAtsTimingResp(PacketPtr pkt); - - void scheduleDeviceRetry(); - void sendDeviceRetry(); - void atsSendDeviceRetry(); - - bool deviceNeedsRetry; - bool atsDeviceNeedsRetry; - - SMMUDeviceRetryEvent sendDeviceRetryEvent; - EventWrapper< - SMMUv3SlaveInterface, - &SMMUv3SlaveInterface::atsSendDeviceRetry> atsSendDeviceRetryEvent; - - Port& getPort(const std::string &name, PortID id) override; - - public: - SMMUv3SlaveInterface(const SMMUv3SlaveInterfaceParams *p); - - ~SMMUv3SlaveInterface() - { - delete microTLB; - delete mainTLB; - } - - const SMMUv3SlaveInterfaceParams * - params() const - { - return static_cast(_params); - } - - DrainState drain() override; - - void setSMMU(SMMUv3 *_smmu) { smmu = _smmu; } - void sendRange(); -}; - -#endif /* __DEV_ARM_SMMU_V3_SLAVEIFC_HH__ */ diff --git a/src/dev/arm/smmu_v3_transl.cc b/src/dev/arm/smmu_v3_transl.cc index c7b20f924..ab8a0e710 100644 --- a/src/dev/arm/smmu_v3_transl.cc +++ b/src/dev/arm/smmu_v3_transl.cc @@ -77,12 +77,12 @@ SMMUTranslRequest::prefetch(Addr addr, uint32_t sid, uint32_t ssid) } SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name, - SMMUv3 &_smmu, SMMUv3SlaveInterface &_ifc) + SMMUv3 &_smmu, SMMUv3DeviceInterface &_ifc) : SMMUProcess(name, _smmu), ifc(_ifc) { - // Decrease number of pending translation slots on the slave interface + // Decrease number of pending translation slots on the device interface assert(ifc.xlateSlotsRemaining > 0); ifc.xlateSlotsRemaining--; @@ -92,12 +92,12 @@ SMMUTranslationProcess::SMMUTranslationProcess(const std::string &name, SMMUTranslationProcess::~SMMUTranslationProcess() { - // Increase number of pending translation slots on the slave interface + // Increase number of pending translation slots on the device interface assert(ifc.pendingMemAccesses > 0); ifc.pendingMemAccesses--; // If no more SMMU memory accesses are pending, - // signal SMMU Slave Interface as drained + // signal SMMU Device Interface as drained if (ifc.pendingMemAccesses == 0) { ifc.signalDrainDone(); } @@ -147,12 +147,12 @@ SMMUTranslationProcess::main(Yield &yield) request.addr, request.size); - unsigned numSlaveBeats = request.isWrite ? + unsigned numResponderBeats = request.isWrite ? (request.size + (ifc.portWidth - 1)) / ifc.portWidth : 1; - doSemaphoreDown(yield, ifc.slavePortSem); - doDelay(yield, Cycles(numSlaveBeats)); - doSemaphoreUp(ifc.slavePortSem); + doSemaphoreDown(yield, ifc.devicePortSem); + doDelay(yield, Cycles(numResponderBeats)); + doSemaphoreUp(ifc.devicePortSem); recvTick = curTick(); @@ -261,7 +261,7 @@ SMMUTranslationProcess::smmuTranslation(Yield &yield) bool haveConfig = true; if (!configCacheLookup(yield, context)) { - if(findConfig(yield, context, tr)) { + if (findConfig(yield, context, tr)) { configCacheUpdate(yield, context); } else { haveConfig = false; @@ -295,7 +295,7 @@ SMMUTranslationProcess::smmuTranslation(Yield &yield) smmuTLBUpdate(yield, tr); } - // Simulate pipelined SMMU->SLAVE INTERFACE link + // Simulate pipelined SMMU->RESPONSE INTERFACE link doSemaphoreDown(yield, smmu.smmuIfcSem); doDelay(yield, Cycles(1)); // serialize transactions doSemaphoreUp(smmu.smmuIfcSem); @@ -353,14 +353,14 @@ SMMUTranslationProcess::ifcTLBLookup(Yield &yield, TranslResult &tr, if (!e) { DPRINTF(SMMUv3, - "SLAVE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n", + "RESPONSE Interface TLB miss vaddr=%#x sid=%#x ssid=%#x\n", request.addr, request.sid, request.ssid); return false; } DPRINTF(SMMUv3, - "SLAVE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x " + "RESPONSE Interface TLB hit vaddr=%#x amask=%#x sid=%#x ssid=%#x " "paddr=%#x\n", request.addr, e->vaMask, request.sid, request.ssid, e->pa); @@ -465,7 +465,7 @@ SMMUTranslationProcess::ifcTLBUpdate(Yield &yield, doSemaphoreDown(yield, ifc.mainTLBSem); DPRINTF(SMMUv3, - "SLAVE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x " + "RESPONSE Interface upd vaddr=%#x amask=%#x paddr=%#x sid=%#x " "ssid=%#x\n", e.va, e.vaMask, e.pa, e.sid, e.ssid); ifc.mainTLB->store(e, alloc); @@ -1226,14 +1226,14 @@ SMMUTranslationProcess::completeTransaction(Yield &yield, { assert(tr.fault == FAULT_NONE); - unsigned numMasterBeats = request.isWrite ? - (request.size + (smmu.masterPortWidth-1)) - / smmu.masterPortWidth : + unsigned numRequestorBeats = request.isWrite ? + (request.size + (smmu.requestPortWidth-1)) + / smmu.requestPortWidth : 1; - doSemaphoreDown(yield, smmu.masterPortSem); - doDelay(yield, Cycles(numMasterBeats)); - doSemaphoreUp(smmu.masterPortSem); + doSemaphoreDown(yield, smmu.requestPortSem); + doDelay(yield, Cycles(numRequestorBeats)); + doSemaphoreUp(smmu.requestPortSem); smmu.translationTimeDist.sample(curTick() - recvTick); @@ -1242,7 +1242,7 @@ SMMUTranslationProcess::completeTransaction(Yield &yield, ifc.wrBufSlotsRemaining += (request.size + (ifc.portWidth-1)) / ifc.portWidth; - smmu.scheduleSlaveRetries(); + smmu.scheduleDeviceRetries(); SMMUAction a; diff --git a/src/dev/arm/smmu_v3_transl.hh b/src/dev/arm/smmu_v3_transl.hh index d72c44c80..878addd0e 100644 --- a/src/dev/arm/smmu_v3_transl.hh +++ b/src/dev/arm/smmu_v3_transl.hh @@ -39,9 +39,9 @@ #define __DEV_ARM_SMMU_V3_TRANSL_HH__ #include "base/compiler.hh" +#include "dev/arm/smmu_v3_deviceifc.hh" #include "dev/arm/smmu_v3_proc.hh" #include "dev/arm/smmu_v3_ptops.hh" -#include "dev/arm/smmu_v3_slaveifc.hh" #include "mem/packet.hh" struct SMMUTranslRequest @@ -91,7 +91,7 @@ class SMMUTranslationProcess : public SMMUProcess bool writable; }; - SMMUv3SlaveInterface &ifc; + SMMUv3DeviceInterface &ifc; SMMUTranslRequest request; TranslContext context; @@ -174,7 +174,7 @@ class SMMUTranslationProcess : public SMMUProcess public: SMMUTranslationProcess(const std::string &name, SMMUv3 &_smmu, - SMMUv3SlaveInterface &_ifc); + SMMUv3DeviceInterface &_ifc); virtual ~SMMUTranslationProcess(); diff --git a/src/dev/arm/vgic.cc b/src/dev/arm/vgic.cc index 441e18214..154256179 100644 --- a/src/dev/arm/vgic.cc +++ b/src/dev/arm/vgic.cc @@ -148,7 +148,7 @@ VGic::readCtrl(PacketPtr pkt) DPRINTF(VGIC, "VGIC HVCtrl read register %#x\n", daddr); - /* Munge the address: 0-0xfff is the usual space banked by requester CPU. + /* Munge the address: 0-0xfff is the usual space banked by requestor CPU. * Anything > that is 0x200-sized slices of 'per CPU' regs. */ if (daddr & ~0x1ff) { @@ -292,7 +292,7 @@ VGic::writeCtrl(PacketPtr pkt) DPRINTF(VGIC, "VGIC HVCtrl write register %#x <= %#x\n", daddr, pkt->getLE()); - /* Munge the address: 0-0xfff is the usual space banked by requester CPU. + /* Munge the address: 0-0xfff is the usual space banked by requestor CPU. * Anything > that is 0x200-sized slices of 'per CPU' regs. */ if (daddr & ~0x1ff) { diff --git a/src/dev/dma_device.cc b/src/dev/dma_device.cc index 03882e320..ace8f2c86 100644 --- a/src/dev/dma_device.cc +++ b/src/dev/dma_device.cc @@ -52,7 +52,7 @@ DmaPort::DmaPort(ClockedObject *dev, System *s, uint32_t sid, uint32_t ssid) : RequestPort(dev->name() + ".dma", dev), - device(dev), sys(s), masterId(s->getMasterId(dev)), + device(dev), sys(s), requestorId(s->getRequestorId(dev)), sendEvent([this]{ sendDma(); }, dev->name()), pendingCount(0), inRetry(false), defaultSid(sid), @@ -166,7 +166,7 @@ DmaPort::dmaAction(Packet::Command cmd, Addr addr, int size, Event *event, !gen.done(); gen.next()) { req = std::make_shared( - gen.addr(), gen.size(), flag, masterId); + gen.addr(), gen.size(), flag, requestorId); req->setStreamId(sid); req->setSubStreamId(ssid); diff --git a/src/dev/dma_device.hh b/src/dev/dma_device.hh index 56c8d4ca5..2369fc471 100644 --- a/src/dev/dma_device.hh +++ b/src/dev/dma_device.hh @@ -114,7 +114,7 @@ class DmaPort : public RequestPort, public Drainable System *const sys; /** Id for all requests */ - const MasterID masterId; + const RequestorID requestorId; protected: /** Use a deque as we never do any insertion or removal in the middle */ diff --git a/src/dev/mips/Malta.py b/src/dev/mips/Malta.py index baed7e8ff..18fa21952 100755 --- a/src/dev/mips/Malta.py +++ b/src/dev/mips/Malta.py @@ -59,6 +59,6 @@ class Malta(Platform): # earlier, since the bus object itself is typically defined at the # System level. def attachIO(self, bus): - self.cchip.pio = bus.master - self.io.pio = bus.master - self.uart.pio = bus.master + self.cchip.pio = bus.mem_side_ports + self.io.pio = bus.mem_side_ports + self.uart.pio = bus.mem_side_ports diff --git a/src/dev/net/dist_iface.cc b/src/dev/net/dist_iface.cc index cc408e0c1..79742425e 100644 --- a/src/dev/net/dist_iface.cc +++ b/src/dev/net/dist_iface.cc @@ -60,7 +60,7 @@ System *DistIface::sys = nullptr; DistIface::SyncEvent *DistIface::syncEvent = nullptr; unsigned DistIface::distIfaceNum = 0; unsigned DistIface::recvThreadsNum = 0; -DistIface *DistIface::master = nullptr; +DistIface *DistIface::primary = nullptr; bool DistIface::isSwitch = false; void @@ -142,7 +142,7 @@ DistIface::SyncNode::run(bool same_tick) needExit = ReqType::pending; if (needStopSync != ReqType::none) needStopSync = ReqType::pending; - DistIface::master->sendCmd(header); + DistIface::primary->sendCmd(header); // now wait until all receiver threads complete the synchronisation auto lf = [this]{ return waitNum == 0; }; cv.wait(sync_lock, lf); @@ -191,7 +191,7 @@ DistIface::SyncSwitch::run(bool same_tick) } else { header.needStopSync = ReqType::none; } - DistIface::master->sendCmd(header); + DistIface::primary->sendCmd(header); return true; } @@ -410,7 +410,7 @@ DistIface::SyncEvent::process() start(); } else { // Wake up thread contexts on non-switch nodes. - for (auto *tc: master->sys->threads) { + for (auto *tc: primary->sys->threads) { if (tc->status() == ThreadContext::Suspended) tc->activate(); else @@ -503,10 +503,10 @@ DistIface::RecvScheduler::pushPacket(EthPacketPtr new_packet, "send_tick:%llu send_delay:%llu link_delay:%llu recv_tick:%llu\n", send_tick, send_delay, linkDelay, recv_tick); // Every packet must be sent and arrive in the same quantum - assert(send_tick > master->syncEvent->when() - - master->syncEvent->repeat); + assert(send_tick > primary->syncEvent->when() - + primary->syncEvent->repeat); // No packet may be scheduled for receive in the arrival quantum - assert(send_tick + send_delay + linkDelay > master->syncEvent->when()); + assert(send_tick + send_delay + linkDelay > primary->syncEvent->when()); // Now we are about to schedule a recvDone event for the new data packet. // We use the same recvDone object for all incoming data packets. Packet @@ -611,8 +611,8 @@ DistIface::DistIface(unsigned dist_rank, rank(dist_rank), size(dist_size) { DPRINTF(DistEthernet, "DistIface() ctor rank:%d\n",dist_rank); - isMaster = false; - if (master == nullptr) { + isPrimary = false; + if (primary == nullptr) { assert(sync == nullptr); assert(syncEvent == nullptr); isSwitch = is_switch; @@ -621,8 +621,8 @@ DistIface::DistIface(unsigned dist_rank, else sync = new SyncNode(); syncEvent = new SyncEvent(); - master = this; - isMaster = true; + primary = this; + isPrimary = true; } distIfaceId = distIfaceNum; distIfaceNum++; @@ -639,8 +639,8 @@ DistIface::~DistIface() assert(sync); delete sync; } - if (this == master) - master = nullptr; + if (this == primary) + primary = nullptr; } void @@ -728,7 +728,7 @@ DistIface::drain() { DPRINTF(DistEthernet,"DistIFace::drain() called\n"); // This can be called multiple times in the same drain cycle. - if (this == master) + if (this == primary) syncEvent->draining(true); return DrainState::Drained; } @@ -736,7 +736,7 @@ DistIface::drain() void DistIface::drainResume() { DPRINTF(DistEthernet,"DistIFace::drainResume() called\n"); - if (this == master) + if (this == primary) syncEvent->draining(false); recvScheduler.resumeRecvTicks(); } @@ -755,7 +755,7 @@ DistIface::serialize(CheckpointOut &cp) const SERIALIZE_SCALAR(dist_iface_id_orig); recvScheduler.serializeSection(cp, "recvScheduler"); - if (this == master) { + if (this == primary) { sync->serializeSection(cp, "Sync"); } } @@ -774,7 +774,7 @@ DistIface::unserialize(CheckpointIn &cp) dist_iface_id_orig); recvScheduler.unserializeSection(cp, "recvScheduler"); - if (this == master) { + if (this == primary) { sync->unserializeSection(cp, "Sync"); } } @@ -801,8 +801,8 @@ DistIface::init(const Event *done_event, Tick link_delay) // Initialize the seed for random generator to avoid the same sequence // in all gem5 peer processes - assert(master != nullptr); - if (this == master) + assert(primary != nullptr); + if (this == primary) random_mt.init(5489 * (rank+1) + 257); } @@ -811,7 +811,7 @@ DistIface::startup() { DPRINTF(DistEthernet, "DistIface::startup() started\n"); // Schedule synchronization unless we are not a switch in pseudo_op mode. - if (this == master && (!syncStartOnPseudoOp || isSwitch)) + if (this == primary && (!syncStartOnPseudoOp || isSwitch)) syncEvent->start(); DPRINTF(DistEthernet, "DistIface::startup() done\n"); } @@ -822,7 +822,7 @@ DistIface::readyToCkpt(Tick delay, Tick period) bool ret = true; DPRINTF(DistEthernet, "DistIface::readyToCkpt() called, delay:%lu " "period:%lu\n", delay, period); - if (master) { + if (primary) { if (delay == 0) { inform("m5 checkpoint called with zero delay => triggering collaborative " "checkpoint\n"); @@ -851,38 +851,38 @@ void DistIface::toggleSync(ThreadContext *tc) { // Unforunate that we have to populate the system pointer member this way. - master->sys = tc->getSystemPtr(); + primary->sys = tc->getSystemPtr(); // The invariant for both syncing and "unsyncing" is that all threads will // stop executing intructions until the desired sync state has been reached // for all nodes. This is the easiest way to prevent deadlock (in the case // of "unsyncing") and causality errors (in the case of syncing). - if (master->syncEvent->scheduled()) { + if (primary->syncEvent->scheduled()) { inform("Request toggling syncronization off\n"); - master->sync->requestStopSync(ReqType::collective); + primary->sync->requestStopSync(ReqType::collective); // At this point, we have no clue when everyone will reach the sync // stop point. Suspend execution of all local thread contexts. // Dist-gem5 will reactivate all thread contexts when everyone has // reached the sync stop point. #if THE_ISA != NULL_ISA - for (auto *tc: master->sys->threads) { + for (auto *tc: primary->sys->threads) { if (tc->status() == ThreadContext::Active) tc->quiesce(); } #endif } else { inform("Request toggling syncronization on\n"); - master->syncEvent->start(); + primary->syncEvent->start(); // We need to suspend all CPUs until the sync point is reached by all // nodes to prevent causality errors. We can also schedule CPU // activation here, since we know exactly when the next sync will // occur. #if THE_ISA != NULL_ISA - for (auto *tc: master->sys->threads) { + for (auto *tc: primary->sys->threads) { if (tc->status() == ThreadContext::Active) - tc->quiesceTick(master->syncEvent->when() + 1); + tc->quiesceTick(primary->syncEvent->when() + 1); } #endif } @@ -894,10 +894,10 @@ DistIface::readyToExit(Tick delay) bool ret = true; DPRINTF(DistEthernet, "DistIface::readyToExit() called, delay:%lu\n", delay); - if (master) { + if (primary) { // To successfully coordinate an exit, all nodes must be synchronising - if (!master->syncEvent->scheduled()) - master->syncEvent->start(); + if (!primary->syncEvent->scheduled()) + primary->syncEvent->start(); if (delay == 0) { inform("m5 exit called with zero delay => triggering collaborative " @@ -917,8 +917,8 @@ uint64_t DistIface::rankParam() { uint64_t val; - if (master) { - val = master->rank; + if (primary) { + val = primary->rank; } else { warn("Dist-rank parameter is queried in single gem5 simulation."); val = 0; @@ -930,8 +930,8 @@ uint64_t DistIface::sizeParam() { uint64_t val; - if (master) { - val = master->size; + if (primary) { + val = primary->size; } else { warn("Dist-size parameter is queried in single gem5 simulation."); val = 1; diff --git a/src/dev/net/dist_iface.hh b/src/dev/net/dist_iface.hh index e568549c1..04843aa11 100644 --- a/src/dev/net/dist_iface.hh +++ b/src/dev/net/dist_iface.hh @@ -491,7 +491,7 @@ class DistIface : public Drainable, public Serializable */ unsigned distIfaceId; - bool isMaster; + bool isPrimary; private: /** @@ -507,10 +507,10 @@ class DistIface : public Drainable, public Serializable */ static SyncEvent *syncEvent; /** - * The very first DistIface object created becomes the master. We need - * a master to co-ordinate the global synchronisation. + * The very first DistIface object created becomes the primary interface. + * We need a primary interface to co-ordinate the global synchronisation. */ - static DistIface *master; + static DistIface *primary; /** * System pointer used to wakeup sleeping threads when stopping sync. */ @@ -635,7 +635,7 @@ class DistIface : public Drainable, public Serializable */ static uint64_t sizeParam(); /** - * Trigger the master to start/stop synchronization. + * Trigger the primary to start/stop synchronization. */ static void toggleSync(ThreadContext *tc); }; diff --git a/src/dev/net/tcp_iface.cc b/src/dev/net/tcp_iface.cc index ba2133496..cb6fecb9a 100644 --- a/src/dev/net/tcp_iface.cc +++ b/src/dev/net/tcp_iface.cc @@ -87,7 +87,7 @@ TCPIface::TCPIface(string server_name, unsigned server_port, is_switch, num_nodes), serverName(server_name), serverPort(server_port), isSwitch(is_switch), listening(false) { - if (is_switch && isMaster) { + if (is_switch && isPrimary) { while (!listen(serverPort)) { DPRINTF(DistEthernet, "TCPIface(listen): Can't bind port %d\n", serverPort); @@ -307,7 +307,7 @@ TCPIface::sendCmd(const Header &header) { DPRINTF(DistEthernetCmd, "TCPIface::sendCmd() type: %d\n", static_cast(header.msgType)); - // Global commands (i.e. sync request) are always sent by the master + // Global commands (i.e. sync request) are always sent by the primary // DistIface. The transfer method is simply implemented as point-to-point // messages for now for (auto s: sockRegistry) diff --git a/src/dev/pci/CopyEngine.py b/src/dev/pci/CopyEngine.py index abf4fbbd7..78b7e39a7 100644 --- a/src/dev/pci/CopyEngine.py +++ b/src/dev/pci/CopyEngine.py @@ -33,7 +33,7 @@ from m5.objects.PciDevice import PciDevice class CopyEngine(PciDevice): type = 'CopyEngine' cxx_header = "dev/pci/copy_engine.hh" - dma = VectorMasterPort("Copy engine DMA port") + dma = VectorRequestPort("Copy engine DMA port") VendorID = 0x8086 DeviceID = 0x1a38 Revision = 0xA2 # CM2 stepping (newest listed) diff --git a/src/dev/serial/terminal.cc b/src/dev/serial/terminal.cc index 7cfc6d745..8b420dae2 100644 --- a/src/dev/serial/terminal.cc +++ b/src/dev/serial/terminal.cc @@ -204,7 +204,7 @@ Terminal::accept() pollQueue.schedule(dataEvent); stringstream stream; - ccprintf(stream, "==== m5 slave terminal: Terminal %d ====", number); + ccprintf(stream, "==== m5 terminal: Terminal %d ====", number); // we need an actual carriage return followed by a newline for the // terminal diff --git a/src/dev/sparc/T1000.py b/src/dev/sparc/T1000.py index dee22078f..c98fb863c 100644 --- a/src/dev/sparc/T1000.py +++ b/src/dev/sparc/T1000.py @@ -112,8 +112,8 @@ class T1000(Platform): iob = Iob() # Attach I/O devices that are on chip def attachOnChipIO(self, bus): - self.iob.pio = bus.master - self.htod.pio = bus.master + self.iob.pio = bus.mem_side_ports + self.htod.pio = bus.mem_side_ports # Attach I/O devices to specified bus object. Can't do this @@ -122,17 +122,17 @@ class T1000(Platform): def attachIO(self, bus): self.hvuart.device = self.hterm self.puart0.device = self.pterm - self.fake_clk.pio = bus.master - self.fake_membnks.pio = bus.master - self.fake_l2_1.pio = bus.master - self.fake_l2_2.pio = bus.master - self.fake_l2_3.pio = bus.master - self.fake_l2_4.pio = bus.master - self.fake_l2esr_1.pio = bus.master - self.fake_l2esr_2.pio = bus.master - self.fake_l2esr_3.pio = bus.master - self.fake_l2esr_4.pio = bus.master - self.fake_ssi.pio = bus.master - self.fake_jbi.pio = bus.master - self.puart0.pio = bus.master - self.hvuart.pio = bus.master + self.fake_clk.pio = bus.mem_side_ports + self.fake_membnks.pio = bus.mem_side_ports + self.fake_l2_1.pio = bus.mem_side_ports + self.fake_l2_2.pio = bus.mem_side_ports + self.fake_l2_3.pio = bus.mem_side_ports + self.fake_l2_4.pio = bus.mem_side_ports + self.fake_l2esr_1.pio = bus.mem_side_ports + self.fake_l2esr_2.pio = bus.mem_side_ports + self.fake_l2esr_3.pio = bus.mem_side_ports + self.fake_l2esr_4.pio = bus.mem_side_ports + self.fake_ssi.pio = bus.mem_side_ports + self.fake_jbi.pio = bus.mem_side_ports + self.puart0.pio = bus.mem_side_ports + self.hvuart.pio = bus.mem_side_ports diff --git a/src/dev/storage/Ide.py b/src/dev/storage/Ide.py index 439a977e9..5edea494f 100644 --- a/src/dev/storage/Ide.py +++ b/src/dev/storage/Ide.py @@ -28,13 +28,13 @@ from m5.SimObject import SimObject from m5.params import * from m5.objects.PciDevice import PciDevice -class IdeID(Enum): vals = ['master', 'slave'] +class IdeID(Enum): vals = ['device0', 'device1'] class IdeDisk(SimObject): type = 'IdeDisk' cxx_header = "dev/storage/ide_disk.hh" delay = Param.Latency('1us', "Fixed disk delay in microseconds") - driveID = Param.IdeID('master', "Drive ID") + driveID = Param.IdeID('device0', "Drive ID") image = Param.DiskImage("Disk image") class IdeController(PciDevice): diff --git a/src/dev/storage/ide_ctrl.cc b/src/dev/storage/ide_ctrl.cc index 632144ca8..47cdd1027 100644 --- a/src/dev/storage/ide_ctrl.cc +++ b/src/dev/storage/ide_ctrl.cc @@ -77,7 +77,7 @@ IdeController::Channel::Channel( string newName, Addr _cmdSize, Addr _ctrlSize) : _name(newName), cmdAddr(0), cmdSize(_cmdSize), ctrlAddr(0), ctrlSize(_ctrlSize), - master(NULL), slave(NULL), selected(NULL) + device0(NULL), device1(NULL), selected(NULL) { bmiRegs.reset(); bmiRegs.status.dmaCap0 = 1; @@ -105,16 +105,16 @@ IdeController::IdeController(Params *p) continue; switch (i) { case 0: - primary.master = params()->disks[0]; + primary.device0 = params()->disks[0]; break; case 1: - primary.slave = params()->disks[1]; + primary.device1 = params()->disks[1]; break; case 2: - secondary.master = params()->disks[2]; + secondary.device0 = params()->disks[2]; break; case 3: - secondary.slave = params()->disks[3]; + secondary.device1 = params()->disks[3]; break; default: panic("IDE controllers support a maximum " @@ -156,9 +156,9 @@ void IdeController::setDmaComplete(IdeDisk *disk) { Channel *channel; - if (disk == primary.master || disk == primary.slave) { + if (disk == primary.device0 || disk == primary.device1) { channel = &primary; - } else if (disk == secondary.master || disk == secondary.slave) { + } else if (disk == secondary.device0 || disk == secondary.device1) { channel = &secondary; } else { panic("Unable to find disk based on pointer %#x\n", disk); diff --git a/src/dev/storage/ide_ctrl.hh b/src/dev/storage/ide_ctrl.hh index 44e8c5be9..51e1603f1 100644 --- a/src/dev/storage/ide_ctrl.hh +++ b/src/dev/storage/ide_ctrl.hh @@ -89,8 +89,13 @@ class IdeController : public PciDevice uint32_t bmidtp; } bmiRegs; - /** IDE disks connected to this controller */ - IdeDisk *master, *slave; + /** IDE disks connected to this controller + * For more details about device0 and device1 see: + * https://en.wikipedia.org/wiki/Parallel_ATA + * #Multiple_devices_on_a_cable + * + */ + IdeDisk *device0, *device1; /** Currently selected disk */ IdeDisk *selected; @@ -98,10 +103,10 @@ class IdeController : public PciDevice bool selectBit; void - select(bool selSlave) + select(bool select_device_1) { - selectBit = selSlave; - selected = selectBit ? slave : master; + selectBit = select_device_1; + selected = selectBit ? device1 : device0; } void accessCommand(Addr offset, int size, uint8_t *data, bool read); diff --git a/src/dev/storage/ide_disk.hh b/src/dev/storage/ide_disk.hh index 8a90d1c40..9f4294179 100644 --- a/src/dev/storage/ide_disk.hh +++ b/src/dev/storage/ide_disk.hh @@ -245,7 +245,7 @@ class IdeDisk : public SimObject uint32_t curPrdAddr; /** PRD entry */ PrdTableEntry curPrd; - /** Device ID (master=0/slave=1) */ + /** Device ID (device0=0/device1=1) */ int devID; /** Interrupt pending */ bool intrPending; diff --git a/src/dev/x86/I82094AA.py b/src/dev/x86/I82094AA.py index 2cb210aba..ce1f3948c 100644 --- a/src/dev/x86/I82094AA.py +++ b/src/dev/x86/I82094AA.py @@ -34,7 +34,7 @@ class I82094AA(BasicPioDevice): cxx_class = 'X86ISA::I82094AA' cxx_header = "dev/x86/i82094aa.hh" apic_id = Param.Int(1, 'APIC id for this IO APIC') - int_master = RequestPort("Port for sending interrupt messages") + int_requestor = RequestPort("Port for sending interrupt messages") int_latency = Param.Latency('1ns', \ "Latency for an interrupt to propagate through this device.") external_int_pic = Param.I8259(NULL, "External PIC, if any") diff --git a/src/dev/x86/Pc.py b/src/dev/x86/Pc.py index 4a732e601..a0a98253d 100644 --- a/src/dev/x86/Pc.py +++ b/src/dev/x86/Pc.py @@ -75,12 +75,12 @@ class Pc(Platform): def attachIO(self, bus, dma_ports = []): self.south_bridge.attachIO(bus, dma_ports) - self.i_dont_exist1.pio = bus.master - self.i_dont_exist2.pio = bus.master - self.behind_pci.pio = bus.master - self.com_1.pio = bus.master - self.fake_com_2.pio = bus.master - self.fake_com_3.pio = bus.master - self.fake_com_4.pio = bus.master - self.fake_floppy.pio = bus.master + self.i_dont_exist1.pio = bus.mem_side_ports + self.i_dont_exist2.pio = bus.mem_side_ports + self.behind_pci.pio = bus.mem_side_ports + self.com_1.pio = bus.mem_side_ports + self.fake_com_2.pio = bus.mem_side_ports + self.fake_com_3.pio = bus.mem_side_ports + self.fake_com_4.pio = bus.mem_side_ports + self.fake_floppy.pio = bus.mem_side_ports self.pci_host.pio = bus.default diff --git a/src/dev/x86/SouthBridge.py b/src/dev/x86/SouthBridge.py index 22e2d1f0c..095f88bc9 100644 --- a/src/dev/x86/SouthBridge.py +++ b/src/dev/x86/SouthBridge.py @@ -97,15 +97,15 @@ class SouthBridge(SimObject): self.speaker.i8254 = self.pit self.io_apic.external_int_pic = self.pic1 # Connect to the bus - self.cmos.pio = bus.master - self.dma1.pio = bus.master - self.ide.pio = bus.master + self.cmos.pio = bus.mem_side_ports + self.dma1.pio = bus.mem_side_ports + self.ide.pio = bus.mem_side_ports if dma_ports.count(self.ide.dma) == 0: - self.ide.dma = bus.slave - self.keyboard.pio = bus.master - self.pic1.pio = bus.master - self.pic2.pio = bus.master - self.pit.pio = bus.master - self.speaker.pio = bus.master - self.io_apic.pio = bus.master - self.io_apic.int_master = bus.slave + self.ide.dma = bus.cpu_side_ports + self.keyboard.pio = bus.mem_side_ports + self.pic1.pio = bus.mem_side_ports + self.pic2.pio = bus.mem_side_ports + self.pit.pio = bus.mem_side_ports + self.speaker.pio = bus.mem_side_ports + self.io_apic.pio = bus.mem_side_ports + self.io_apic.int_requestor = bus.cpu_side_ports diff --git a/src/dev/x86/i82094aa.cc b/src/dev/x86/i82094aa.cc index 8d91cc61a..c7817dce2 100644 --- a/src/dev/x86/i82094aa.cc +++ b/src/dev/x86/i82094aa.cc @@ -42,7 +42,7 @@ X86ISA::I82094AA::I82094AA(Params *p) : BasicPioDevice(p, 20), extIntPic(p->external_int_pic), lowestPriorityOffset(0), - intMasterPort(name() + ".int_master", this, this, p->int_latency) + intRequestPort(name() + ".int_request", this, this, p->int_latency) { // This assumes there's only one I/O APIC in the system and since the apic // id is stored in a 8-bit field with 0xff meaning broadcast, the id must @@ -71,16 +71,16 @@ X86ISA::I82094AA::init() // the piodevice init() function. BasicPioDevice::init(); - // If the master port isn't connected, we can't send interrupts anywhere. - panic_if(!intMasterPort.isConnected(), + // If the request port isn't connected, we can't send interrupts anywhere. + panic_if(!intRequestPort.isConnected(), "Int port not connected to anything!"); } Port & X86ISA::I82094AA::getPort(const std::string &if_name, PortID idx) { - if (if_name == "int_master") - return intMasterPort; + if (if_name == "int_request") + return intRequestPort; if (if_name == "inputs") return *inputs.at(idx); else @@ -242,7 +242,7 @@ X86ISA::I82094AA::signalInterrupt(int line) } for (auto id: apics) { PacketPtr pkt = buildIntTriggerPacket(id, message); - intMasterPort.sendMessage(pkt, sys->isTimingMode()); + intRequestPort.sendMessage(pkt, sys->isTimingMode()); } } } diff --git a/src/dev/x86/i82094aa.hh b/src/dev/x86/i82094aa.hh index 2c81e2787..a5263b37d 100644 --- a/src/dev/x86/i82094aa.hh +++ b/src/dev/x86/i82094aa.hh @@ -82,7 +82,7 @@ class I82094AA : public BasicPioDevice std::vector *> inputs; - IntMasterPort intMasterPort; + IntRequestPort intRequestPort; public: typedef I82094AAParams Params; diff --git a/src/dev/x86/i8259.cc b/src/dev/x86/i8259.cc index 1e081d7f8..8ba1235a4 100644 --- a/src/dev/x86/i8259.cc +++ b/src/dev/x86/i8259.cc @@ -191,7 +191,8 @@ X86ISA::I8259::write(PacketPtr pkt) case 0x2: DPRINTF(I8259, "Received initialization command word 3.\n"); if (mode == Enums::I8259Master) { - DPRINTF(I8259, "Slaves attached to IRQs:%s%s%s%s%s%s%s%s\n", + DPRINTF(I8259, "Responders attached to " + "IRQs:%s%s%s%s%s%s%s%s\n", bits(val, 0) ? " 0" : "", bits(val, 1) ? " 1" : "", bits(val, 2) ? " 2" : "", @@ -202,7 +203,7 @@ X86ISA::I8259::write(PacketPtr pkt) bits(val, 7) ? " 7" : ""); cascadeBits = val; } else { - DPRINTF(I8259, "Slave ID is %d.\n", val & mask(3)); + DPRINTF(I8259, "Responder ID is %d.\n", val & mask(3)); cascadeBits = val & mask(3); } if (expectICW4) @@ -307,10 +308,10 @@ int X86ISA::I8259::getVector() { /* - * This code only handles one slave. Since that's how the PC platform + * This code only handles one responder. Since that's how the PC platform * always uses the 8259 PIC, there shouldn't be any need for more. If - * there -is- a need for more for some reason, "slave" can become a - * vector of slaves. + * there -is- a need for more for some reason, "responder" can become a + * vector of responders. */ int line = findMsbSet(IRR); IRR &= ~(1 << line); @@ -321,7 +322,7 @@ X86ISA::I8259::getVector() ISR |= 1 << line; } if (slave && bits(cascadeBits, line)) { - DPRINTF(I8259, "Interrupt was from slave who will " + DPRINTF(I8259, "Interrupt was from responder who will " "provide the vector.\n"); return slave->getVector(); } diff --git a/src/dev/x86/i8259.hh b/src/dev/x86/i8259.hh index 85f9ef8f6..889a8cb9c 100644 --- a/src/dev/x86/i8259.hh +++ b/src/dev/x86/i8259.hh @@ -49,7 +49,7 @@ class I8259 : public BasicPioDevice std::vector *> output; std::vector *> inputs; Enums::X86I8259CascadeMode mode; - I8259 * slave; + I8259 *slave; // Interrupt Request Register uint8_t IRR; @@ -62,8 +62,9 @@ class I8259 : public BasicPioDevice uint8_t vectorOffset; bool cascadeMode; - // A bit vector of lines with slaves attached, or the slave id, depending - // on if this is a master or slave PIC. + // A bit vector of lines with responders attached, or the + // responder id, depending + // on if this is a requestor or responder PIC. uint8_t cascadeBits; bool edgeTriggered; diff --git a/src/dev/x86/intdev.hh b/src/dev/x86/intdev.hh index a681a2e22..f757fdbeb 100644 --- a/src/dev/x86/intdev.hh +++ b/src/dev/x86/intdev.hh @@ -53,12 +53,12 @@ namespace X86ISA { template -class IntSlavePort : public SimpleTimingPort +class IntResponsePort : public SimpleTimingPort { Device * device; public: - IntSlavePort(const std::string& _name, SimObject* _parent, + IntResponsePort(const std::string& _name, SimObject* _parent, Device* dev) : SimpleTimingPort(_name, _parent), device(dev) { @@ -86,7 +86,7 @@ PacketPtr buildIntPacket(Addr addr, T payload) { RequestPtr req = std::make_shared( - addr, sizeof(T), Request::UNCACHEABLE, Request::intMasterId); + addr, sizeof(T), Request::UNCACHEABLE, Request::intRequestorId); PacketPtr pkt = new Packet(req, MemCmd::WriteReq); pkt->allocate(); pkt->setRaw(payload); @@ -94,7 +94,7 @@ buildIntPacket(Addr addr, T payload) } template -class IntMasterPort : public QueuedMasterPort +class IntRequestPort : public QueuedRequestPort { private: ReqPacketQueue reqQueue; @@ -113,9 +113,9 @@ class IntMasterPort : public QueuedMasterPort static void defaultOnCompletion(PacketPtr pkt) { delete pkt; } public: - IntMasterPort(const std::string& _name, SimObject* _parent, + IntRequestPort(const std::string& _name, SimObject* _parent, Device* dev, Tick _latency) : - QueuedMasterPort(_name, _parent, reqQueue, snoopRespQueue), + QueuedRequestPort(_name, _parent, reqQueue, snoopRespQueue), reqQueue(*_parent, *this), snoopRespQueue(*_parent, *this), device(dev), latency(_latency) { diff --git a/src/gpu-compute/GPU.py b/src/gpu-compute/GPU.py index 05df84d2d..b82ad184f 100644 --- a/src/gpu-compute/GPU.py +++ b/src/gpu-compute/GPU.py @@ -159,8 +159,8 @@ class ComputeUnit(ClockedObject): coalescer_to_vrf_bus_width = Param.Int(64, "Coalescer->VRF data bus "\ "width in bytes") - memory_port = VectorMasterPort("Port to the memory system") - translation_port = VectorMasterPort('Port to the TLB hierarchy') + memory_port = VectorRequestPort("Port to the memory system") + translation_port = VectorRequestPort('Port to the TLB hierarchy') sqc_port = RequestPort("Port to the SQC (I-cache") sqc_tlb_port = RequestPort("Port to the TLB for the SQC (I-cache)") scalar_port = RequestPort("Port to the scalar data cache") diff --git a/src/gpu-compute/X86GPUTLB.py b/src/gpu-compute/X86GPUTLB.py index bd22bee56..45cb96229 100644 --- a/src/gpu-compute/X86GPUTLB.py +++ b/src/gpu-compute/X86GPUTLB.py @@ -58,8 +58,12 @@ class X86GPUTLB(ClockedObject): missLatency1 = Param.Int(5, "Latency #1 of a TLB miss") missLatency2 = Param.Int(100, "Latency #2 of a TLB miss") maxOutstandingReqs = Param.Int(64, "# of maximum outstanding requests") - slave = VectorSlavePort("Port on side closer to CPU/CU") - master = VectorMasterPort("Port on side closer to memory") + cpu_side_ports = VectorResponsePort("Ports on side closer to CPU/CU") + slave = DeprecatedParam(cpu_side_ports, + '`slave` is now called `cpu_side_ports`') + mem_side_ports = VectorRequestPort("Ports on side closer to memory") + master = DeprecatedParam(mem_side_ports, + '`master` is now called `mem_side_ports`') allocationPolicy = Param.Bool(True, "Allocate on an access") accessDistance = Param.Bool(False, "print accessDistance stats") @@ -69,6 +73,10 @@ class TLBCoalescer(ClockedObject): cxx_header = 'gpu-compute/tlb_coalescer.hh' probesPerCycle = Param.Int(2, "Number of TLB probes per cycle") coalescingWindow = Param.Int(1, "Permit coalescing across that many ticks") - slave = VectorSlavePort("Port on side closer to CPU/CU") - master = VectorMasterPort("Port on side closer to memory") + cpu_side_ports = VectorResponsePort("Port on side closer to CPU/CU") + slave = DeprecatedParam(cpu_side_ports, + '`slave` is now called `cpu_side_ports`') + mem_side_ports = VectorRequestPort("Port on side closer to memory") + master DeprecatedParam(mem_side_ports, + '`master` is now called `mem_side_ports`') disableCoalescing = Param.Bool(False,"Dispable Coalescing") diff --git a/src/gpu-compute/compute_unit.cc b/src/gpu-compute/compute_unit.cc index 920257d8d..33f5c6e54 100644 --- a/src/gpu-compute/compute_unit.cc +++ b/src/gpu-compute/compute_unit.cc @@ -95,7 +95,7 @@ ComputeUnit::ComputeUnit(const Params *p) : ClockedObject(p), countPages(p->countPages), req_tick_latency(p->mem_req_latency * p->clk_domain->clockPeriod()), resp_tick_latency(p->mem_resp_latency * p->clk_domain->clockPeriod()), - _masterId(p->system->getMasterId(this, "ComputeUnit")), + _requestorId(p->system->getRequestorId(this, "ComputeUnit")), lds(*p->localDataStore), gmTokenPort(name() + ".gmTokenPort", this), ldsPort(csprintf("%s-port", name()), this), scalarDataPort(csprintf("%s-port", name()), this), @@ -183,7 +183,7 @@ ComputeUnit::ComputeUnit(const Params *p) : ClockedObject(p), tlbPort.emplace_back(csprintf("%s-port%d", name(), i), this, i); } - // Setup tokens for slave ports. The number of tokens in memSlaveTokens + // Setup tokens for response ports. The number of tokens in memPortTokens // is the total token count for the entire vector port (i.e., this CU). memPortTokens = new TokenManager(p->max_cu_tokens); @@ -1235,7 +1235,7 @@ ComputeUnit::injectGlobalMemFence(GPUDynInstPtr gpuDynInst, if (!req) { req = std::make_shared( - 0, 0, 0, masterId(), 0, gpuDynInst->wfDynId); + 0, 0, 0, requestorId(), 0, gpuDynInst->wfDynId); } // all mem sync requests have Paddr == 0 @@ -1500,7 +1500,7 @@ ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt) RequestPtr prefetch_req = std::make_shared( vaddr + stride * pf * TheISA::PageBytes, sizeof(uint8_t), 0, - computeUnit->masterId(), + computeUnit->requestorId(), 0, 0, nullptr); PacketPtr prefetch_pkt = new Packet(prefetch_req, requestCmd); @@ -1528,7 +1528,7 @@ ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt) } // First we must convert the response cmd back to a request cmd so that - // the request can be sent through the cu's master port + // the request can be sent through the cu's request port PacketPtr new_pkt = new Packet(pkt->req, requestCmd); new_pkt->dataStatic(pkt->getPtr()); delete pkt->senderState; @@ -1749,7 +1749,7 @@ ComputeUnit::ITLBPort::recvTimingResp(PacketPtr pkt) if (success) { // pkt is reused in fetch(), don't delete it here. However, we must // reset the command to be a request so that it can be sent through - // the cu's master port + // the cu's request port assert(pkt->cmd == MemCmd::ReadResp); pkt->cmd = MemCmd::ReadReq; diff --git a/src/gpu-compute/compute_unit.hh b/src/gpu-compute/compute_unit.hh index f7484af87..fe2091d31 100644 --- a/src/gpu-compute/compute_unit.hh +++ b/src/gpu-compute/compute_unit.hh @@ -458,13 +458,13 @@ class ComputeUnit : public ClockedObject void processFetchReturn(PacketPtr pkt); void updatePageDivergenceDist(Addr addr); - MasterID masterId() { return _masterId; } + RequestorID requestorId() { return _requestorId; } bool isDone() const; bool isVectorAluIdle(uint32_t simdId) const; protected: - MasterID _masterId; + RequestorID _requestorId; LdsState &lds; @@ -628,12 +628,12 @@ class ComputeUnit : public ClockedObject void exitCallback(); - class GMTokenPort : public TokenMasterPort + class GMTokenPort : public TokenRequestPort { public: GMTokenPort(const std::string& name, SimObject *owner, PortID id = InvalidPortID) - : TokenMasterPort(name, owner, id) + : TokenRequestPort(name, owner, id) { } ~GMTokenPort() { } diff --git a/src/gpu-compute/fetch_unit.cc b/src/gpu-compute/fetch_unit.cc index 3a139f530..4e4259e74 100644 --- a/src/gpu-compute/fetch_unit.cc +++ b/src/gpu-compute/fetch_unit.cc @@ -160,7 +160,7 @@ FetchUnit::initiateFetch(Wavefront *wavefront) // set up virtual request RequestPtr req = std::make_shared( vaddr, computeUnit.cacheLineSize(), Request::INST_FETCH, - computeUnit.masterId(), 0, 0, nullptr); + computeUnit.requestorId(), 0, 0, nullptr); PacketPtr pkt = new Packet(req, MemCmd::ReadReq); diff --git a/src/gpu-compute/gpu_tlb.cc b/src/gpu-compute/gpu_tlb.cc index 513106f4a..4c3539698 100644 --- a/src/gpu-compute/gpu_tlb.cc +++ b/src/gpu-compute/gpu_tlb.cc @@ -113,14 +113,14 @@ namespace X86ISA missLatency1 = p->missLatency1; missLatency2 = p->missLatency2; - // create the slave ports based on the number of connected ports - for (size_t i = 0; i < p->port_slave_connection_count; ++i) { + // create the response ports based on the number of connected ports + for (size_t i = 0; i < p->port_cpu_side_ports_connection_count; ++i) { cpuSidePort.push_back(new CpuSidePort(csprintf("%s-port%d", name(), i), this, i)); } - // create the master ports based on the number of connected ports - for (size_t i = 0; i < p->port_master_connection_count; ++i) { + // create the request ports based on the number of connected ports + for (size_t i = 0; i < p->port_mem_side_ports_connection_count; ++i) { memSidePort.push_back(new MemSidePort(csprintf("%s-port%d", name(), i), this, i)); } @@ -136,13 +136,13 @@ namespace X86ISA Port & GpuTLB::getPort(const std::string &if_name, PortID idx) { - if (if_name == "slave") { + if (if_name == "cpu_side_ports") { if (idx >= static_cast(cpuSidePort.size())) { panic("TLBCoalescer::getPort: unknown index %d\n", idx); } return *cpuSidePort[idx]; - } else if (if_name == "master") { + } else if (if_name == "mem_side_ports") { if (idx >= static_cast(memSidePort.size())) { panic("TLBCoalescer::getPort: unknown index %d\n", idx); } @@ -930,7 +930,7 @@ namespace X86ISA Addr paddr = local_entry->paddr | (vaddr & (page_size - 1)); DPRINTF(GPUTLB, "Translated %#x -> %#x.\n", vaddr, paddr); - // Since this packet will be sent through the cpu side slave port, + // Since this packet will be sent through the cpu side port, // it must be converted to a response pkt if it is not one already if (pkt->isRequest()) { pkt->makeTimingResponse(); @@ -1324,7 +1324,7 @@ namespace X86ISA AddrRangeList GpuTLB::CpuSidePort::getAddrRanges() const { - // currently not checked by the master + // currently not checked by the requestor AddrRangeList ranges; return ranges; diff --git a/src/gpu-compute/shader.cc b/src/gpu-compute/shader.cc index 9ebbd3cf4..cc039d2f7 100644 --- a/src/gpu-compute/shader.cc +++ b/src/gpu-compute/shader.cc @@ -206,7 +206,7 @@ Shader::prepareInvalidate(HSAQueueEntry *task) { // create a request to hold INV info; the request's fields will // be updated in cu before use auto req = std::make_shared(0, 0, 0, - cuList[i_cu]->masterId(), + cuList[i_cu]->requestorId(), 0, -1); _dispatcher.updateInvCounter(kernId, +1); @@ -457,7 +457,7 @@ Shader::AccessMem(uint64_t address, void *ptr, uint32_t size, int cu_id, RequestPtr req = std::make_shared( gen.addr(), gen.size(), 0, - cuList[0]->masterId(), 0, 0, nullptr); + cuList[0]->requestorId(), 0, 0, nullptr); doFunctionalAccess(req, cmd, data_buf, suppress_func_errors, cu_id); data_buf += gen.size(); diff --git a/src/gpu-compute/tlb_coalescer.cc b/src/gpu-compute/tlb_coalescer.cc index 08eccd8d8..da4030b72 100644 --- a/src/gpu-compute/tlb_coalescer.cc +++ b/src/gpu-compute/tlb_coalescer.cc @@ -52,14 +52,14 @@ TLBCoalescer::TLBCoalescer(const Params *p) "Cleanup issuedTranslationsTable hashmap", false, Event::Maximum_Pri) { - // create the slave ports based on the number of connected ports - for (size_t i = 0; i < p->port_slave_connection_count; ++i) { + // create the response ports based on the number of connected ports + for (size_t i = 0; i < p->port_cpu_side_ports_connection_count; ++i) { cpuSidePort.push_back(new CpuSidePort(csprintf("%s-port%d", name(), i), this, i)); } - // create the master ports based on the number of connected ports - for (size_t i = 0; i < p->port_master_connection_count; ++i) { + // create the request ports based on the number of connected ports + for (size_t i = 0; i < p->port_mem_side_ports_connection_count; ++i) { memSidePort.push_back(new MemSidePort(csprintf("%s-port%d", name(), i), this, i)); } @@ -68,13 +68,13 @@ TLBCoalescer::TLBCoalescer(const Params *p) Port & TLBCoalescer::getPort(const std::string &if_name, PortID idx) { - if (if_name == "slave") { + if (if_name == "cpu_side_ports") { if (idx >= static_cast(cpuSidePort.size())) { panic("TLBCoalescer::getPort: unknown index %d\n", idx); } return *cpuSidePort[idx]; - } else if (if_name == "master") { + } else if (if_name == "mem_side_ports") { if (idx >= static_cast(memSidePort.size())) { panic("TLBCoalescer::getPort: unknown index %d\n", idx); } @@ -359,7 +359,7 @@ TLBCoalescer::CpuSidePort::recvFunctional(PacketPtr pkt) AddrRangeList TLBCoalescer::CpuSidePort::getAddrRanges() const { - // currently not checked by the master + // currently not checked by the requestor AddrRangeList ranges; return ranges; diff --git a/src/gpu-compute/tlb_coalescer.hh b/src/gpu-compute/tlb_coalescer.hh index defdc865d..4ab76f613 100644 --- a/src/gpu-compute/tlb_coalescer.hh +++ b/src/gpu-compute/tlb_coalescer.hh @@ -191,9 +191,9 @@ class TLBCoalescer : public ClockedObject } }; - // Coalescer slave ports on the cpu Side + // Coalescer response ports on the cpu Side std::vector cpuSidePort; - // Coalescer master ports on the memory side + // Coalescer request ports on the memory side std::vector memSidePort; Port &getPort(const std::string &if_name, diff --git a/src/learning_gem5/part2/SimpleCache.py b/src/learning_gem5/part2/SimpleCache.py index 40892b5db..ad94b5094 100644 --- a/src/learning_gem5/part2/SimpleCache.py +++ b/src/learning_gem5/part2/SimpleCache.py @@ -35,7 +35,7 @@ class SimpleCache(ClockedObject): # Vector port example. Both the instruction and data ports connect to this # port which is automatically split out into two ports. - cpu_side = VectorSlavePort("CPU side port, receives requests") + cpu_side = VectorResponsePort("CPU side port, receives requests") mem_side = RequestPort("Memory side port, sends requests") latency = Param.Cycles(1, "Cycles taken on a hit or to resolve a miss") diff --git a/src/learning_gem5/part2/simple_cache.cc b/src/learning_gem5/part2/simple_cache.cc index ad5e6631d..3a3cfe6dd 100644 --- a/src/learning_gem5/part2/simple_cache.cc +++ b/src/learning_gem5/part2/simple_cache.cc @@ -45,7 +45,8 @@ SimpleCache::SimpleCache(SimpleCacheParams *params) : // automatically created depending on the name of the vector port and // holds the number of connections to this port name for (int i = 0; i < params->port_cpu_side_connection_count; ++i) { - cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i), i, this); + cpuPorts.emplace_back(name() + csprintf(".cpu_side[%d]", i), + i, this); } } diff --git a/src/learning_gem5/part2/simple_cache.hh b/src/learning_gem5/part2/simple_cache.hh index 6dae1e41a..2f39f3d00 100644 --- a/src/learning_gem5/part2/simple_cache.hh +++ b/src/learning_gem5/part2/simple_cache.hh @@ -86,7 +86,7 @@ class SimpleCache : public ClockedObject /** * Get a list of the non-overlapping address ranges the owner is - * responsible for. All slave ports must override this function + * responsible for. All response ports must override this function * and return a populated list with at least one item. * * @return a list of ranges responded to @@ -101,14 +101,14 @@ class SimpleCache : public ClockedObject protected: /** - * Receive an atomic request packet from the master port. + * Receive an atomic request packet from the request port. * No need to implement in this simple cache. */ Tick recvAtomic(PacketPtr pkt) override { panic("recvAtomic unimpl."); } /** - * Receive a functional request packet from the master port. + * Receive a functional request packet from the request port. * Performs a "debug" access updating/reading the data in place. * * @param packet the requestor sent. @@ -116,7 +116,7 @@ class SimpleCache : public ClockedObject void recvFunctional(PacketPtr pkt) override; /** - * Receive a timing request from the master port. + * Receive a timing request from the request port. * * @param the packet that the requestor sent * @return whether this object can consume to packet. If false, we @@ -126,9 +126,9 @@ class SimpleCache : public ClockedObject bool recvTimingReq(PacketPtr pkt) override; /** - * Called by the master port if sendTimingResp was called on this - * slave port (causing recvTimingResp to be called on the master - * port) and was unsuccesful. + * Called by the request port if sendTimingResp was called on this + * response port (causing recvTimingResp to be called on the request + * port) and was unsuccessful. */ void recvRespRetry() override; }; @@ -165,19 +165,19 @@ class SimpleCache : public ClockedObject protected: /** - * Receive a timing response from the slave port. + * Receive a timing response from the response port. */ bool recvTimingResp(PacketPtr pkt) override; /** - * Called by the slave port if sendTimingReq was called on this - * master port (causing recvTimingReq to be called on the slave + * Called by the response port if sendTimingReq was called on this + * request port (causing recvTimingReq to be called on the response * port) and was unsuccesful. */ void recvReqRetry() override; /** - * Called to receive an address range change from the peer slave + * Called to receive an address range change from the peer response * port. The default implementation ignores the change and does * nothing. Override this function in a derived class if the owner * needs to be aware of the address ranges, e.g. in an diff --git a/src/mem/AddrMapper.py b/src/mem/AddrMapper.py index 52d7ef829..60ef3bed1 100644 --- a/src/mem/AddrMapper.py +++ b/src/mem/AddrMapper.py @@ -37,10 +37,10 @@ from m5.params import * from m5.SimObject import SimObject # An address mapper changes the packet addresses in going from the -# slave port side of the mapper to the master port side. When the -# slave port is queried for the address ranges, it also performs the +# response port side of the mapper to the request port side. When the +# response port is queried for the address ranges, it also performs the # necessary range updates. Note that snoop requests that travel from -# the master port (i.e. the memory side) to the slave port are +# the request port (i.e. the memory side) to the response port are # currently not modified. class AddrMapper(SimObject): type = 'AddrMapper' @@ -48,9 +48,14 @@ class AddrMapper(SimObject): abstract = True # one port in each direction - master = RequestPort("Master port") - slave = ResponsePort("Slave port") - + mem_side_port = RequestPort("This port sends requests and " + "receives responses") + master = DeprecatedParam(mem_side_port, + '`master` is now called `mem_side_port`') + cpu_side_port = ResponsePort("This port receives requests and " + "sends responses") + slave = DeprecatedParam(cpu_side_port, + '`slave` is now called `cpu_side_port`') # Range address mapper that maps a set of original ranges to a set of # remapped ranges, where a specific range is of the same size diff --git a/src/mem/Bridge.py b/src/mem/Bridge.py index a89e7f993..b4eb1d0db 100644 --- a/src/mem/Bridge.py +++ b/src/mem/Bridge.py @@ -42,8 +42,16 @@ from m5.objects.ClockedObject import ClockedObject class Bridge(ClockedObject): type = 'Bridge' cxx_header = "mem/bridge.hh" - slave = ResponsePort('Slave port') - master = RequestPort('Master port') + + mem_side_port = RequestPort("This port sends requests and " + "receives responses") + master = DeprecatedParam(mem_side_port, + '`master` is now called `mem_side_port`') + cpu_side_port = ResponsePort("This port receives requests and " + "sends responses") + slave = DeprecatedParam(cpu_side_port, + '`slave` is now called `cpu_side_port`') + req_size = Param.Unsigned(16, "The number of requests to buffer") resp_size = Param.Unsigned(16, "The number of responses to buffer") delay = Param.Latency('0ns', "The latency of this bridge") diff --git a/src/mem/CommMonitor.py b/src/mem/CommMonitor.py index 0fd884d7f..851e5a327 100644 --- a/src/mem/CommMonitor.py +++ b/src/mem/CommMonitor.py @@ -47,8 +47,14 @@ class CommMonitor(SimObject): system = Param.System(Parent.any, "System that the monitor belongs to.") # one port in each direction - master = RequestPort("Master port") - slave = ResponsePort("Slave port") + mem_side_port = RequestPort("This port sends requests and " + "receives responses") + master = DeprecatedParam(mem_side_port, + '`master` is now called `mem_side_port`') + cpu_side_port = ResponsePort("This port receives requests and " + "sends responses") + slave = DeprecatedParam(cpu_side_port, + '`slave` is now called `cpu_side_port`') # control the sample period window length of this monitor sample_period = Param.Clock("1ms", "Sample period for histograms") diff --git a/src/mem/DRAMSim2.py b/src/mem/DRAMSim2.py index bf5143ff6..d5147b182 100644 --- a/src/mem/DRAMSim2.py +++ b/src/mem/DRAMSim2.py @@ -42,7 +42,7 @@ class DRAMSim2(AbstractMemory): cxx_header = "mem/dramsim2.hh" # A single port for now - port = SlavePort("Slave port") + port = ResponsePort("This port sends responses and receives requests") deviceConfigFile = Param.String("ini/DDR3_micron_32M_8B_x8_sg15.ini", "Device configuration file") diff --git a/src/mem/MemChecker.py b/src/mem/MemChecker.py index 714ea79fd..69dae5236 100644 --- a/src/mem/MemChecker.py +++ b/src/mem/MemChecker.py @@ -46,10 +46,14 @@ class MemCheckerMonitor(SimObject): cxx_header = "mem/mem_checker_monitor.hh" # one port in each direction - master = RequestPort("Master port") - slave = ResponsePort("Slave port") - cpu_side = ResponsePort("Alias for slave") - mem_side = RequestPort("Alias for master") + mem_side_port = RequestPort("This port sends requests and receives " + "responses") + master = DeprecatedParam(mem_side_port,"`master` is now called " + "`mem_side_port`") + cpu_side_port = ResponsePort("This port receives requests and sends " + "responses") + slave = DeprecatedParam(cpu_side_port,"`slave` is now called " + "`cpu_side_port`") warn_only = Param.Bool(False, "Warn about violations only") memchecker = Param.MemChecker("Instance shared with other monitors") diff --git a/src/mem/MemCtrl.py b/src/mem/MemCtrl.py index e0f34241e..6736bb03f 100644 --- a/src/mem/MemCtrl.py +++ b/src/mem/MemCtrl.py @@ -56,7 +56,7 @@ class MemCtrl(QoSMemCtrl): # single-ported on the system interface side, instantiate with a # bus in front of the controller for multiple ports - port = SlavePort("Slave port") + port = ResponsePort("This port responds to memory requests") # Interface to volatile, DRAM media dram = Param.DRAMInterface(NULL, "DRAM interface") diff --git a/src/mem/MemDelay.py b/src/mem/MemDelay.py index 7ffb60810..4b322af98 100644 --- a/src/mem/MemDelay.py +++ b/src/mem/MemDelay.py @@ -41,8 +41,14 @@ class MemDelay(ClockedObject): cxx_header = 'mem/mem_delay.hh' abstract = True - master = RequestPort("Master port") - slave = ResponsePort("Slave port") + mem_side_port = RequestPort("This port sends requests and " + "receives responses") + master = DeprecatedParam(mem_side_port, + '`master` is now called `mem_side_port`') + cpu_side_port = ResponsePort("This port receives requests and " + "sends responses") + slave = DeprecatedParam(cpu_side_port, + '`slave` is now called `cpu_side_port`') class SimpleMemDelay(MemDelay): type = 'SimpleMemDelay' diff --git a/src/mem/SerialLink.py b/src/mem/SerialLink.py index 2174bc7a6..7cde69fc6 100644 --- a/src/mem/SerialLink.py +++ b/src/mem/SerialLink.py @@ -46,8 +46,14 @@ from m5.objects.ClockedObject import ClockedObject class SerialLink(ClockedObject): type = 'SerialLink' cxx_header = "mem/serial_link.hh" - slave = ResponsePort('Slave port') - master = RequestPort('Master port') + mem_side_port = RequestPort("This port sends requests and " + "receives responses") + master = DeprecatedParam(mem_side_port, + '`master` is now called `mem_side_port`') + cpu_side_port = ResponsePort("This port receives requests and " + "sends responses") + slave = DeprecatedParam(cpu_side_port, + '`slave` is now called `cpu_side_port`') req_size = Param.Unsigned(16, "The number of requests to buffer") resp_size = Param.Unsigned(16, "The number of responses to buffer") delay = Param.Latency('0ns', "The latency of this serial_link") diff --git a/src/mem/SimpleMemory.py b/src/mem/SimpleMemory.py index 8f3c1e5cc..6e4b91547 100644 --- a/src/mem/SimpleMemory.py +++ b/src/mem/SimpleMemory.py @@ -42,7 +42,7 @@ from m5.objects.AbstractMemory import * class SimpleMemory(AbstractMemory): type = 'SimpleMemory' cxx_header = "mem/simple_mem.hh" - port = SlavePort("Slave ports") + port = ResponsePort("This port sends responses and receives requests") latency = Param.Latency('30ns', "Request to response latency") latency_var = Param.Latency('0ns', "Request to response latency variance") # The memory bandwidth limit default is set to 12.8GB/s which is diff --git a/src/mem/XBar.py b/src/mem/XBar.py index af135876d..c1625843a 100644 --- a/src/mem/XBar.py +++ b/src/mem/XBar.py @@ -48,8 +48,14 @@ class BaseXBar(ClockedObject): abstract = True cxx_header = "mem/xbar.hh" - slave = VectorSlavePort("Vector port for connecting masters") - master = VectorMasterPort("Vector port for connecting slaves") + cpu_side_ports = VectorResponsePort("Vector port for connecting " + "mem side ports") + slave = DeprecatedParam(cpu_side_ports, + '`slave` is now called `cpu_side_ports`') + mem_side_ports = VectorRequestPort("Vector port for connecting " + "cpu side ports") + master = DeprecatedParam(mem_side_ports, + '`master` is now called `mem_side_ports`') # Latencies governing the time taken for the variuos paths a # packet has through the crossbar. Note that the crossbar itself @@ -68,7 +74,7 @@ class BaseXBar(ClockedObject): forward_latency = Param.Cycles("Forward latency") response_latency = Param.Cycles("Response latency") - # The XBar uses one Layer per master. Each Layer forwards a packet + # The XBar uses one Layer per requestor. Each Layer forwards a packet # to its destination and is occupied for header_latency + size / # width cycles header_latency = Param.Cycles(1, "Header latency") @@ -77,8 +83,8 @@ class BaseXBar(ClockedObject): width = Param.Unsigned("Datapath width per port (bytes)") # The default port can be left unconnected, or be used to connect - # a default slave port - default = RequestPort("Port for connecting an optional default slave") + # a default response port + default = RequestPort("Port for connecting an optional default responder") # The default port can be used unconditionally, or based on # address range, in which case it may overlap with other @@ -134,7 +140,7 @@ class SnoopFilter(SimObject): # Sanity check on max capacity to track, adjust if needed. max_capacity = Param.MemorySize('8MB', "Maximum capacity of snoop filter") -# We use a coherent crossbar to connect multiple masters to the L2 +# We use a coherent crossbar to connect multiple requestors to the L2 # caches. Normally this crossbar would be part of the cache itself. class L2XBar(CoherentXBar): # 256-bit crossbar by default @@ -159,7 +165,7 @@ class L2XBar(CoherentXBar): # One of the key coherent crossbar instances is the system # interconnect, tying together the CPU clusters, GPUs, and any I/O -# coherent masters, and DRAM controllers. +# coherent requestors, and DRAM controllers. class SystemXBar(CoherentXBar): # 128-bit crossbar by default width = 16 diff --git a/src/mem/abstract_mem.cc b/src/mem/abstract_mem.cc index f1e9dbab6..a5730c78d 100644 --- a/src/mem/abstract_mem.cc +++ b/src/mem/abstract_mem.cc @@ -145,54 +145,54 @@ AbstractMemory::MemStats::regStats() System *sys = mem.system(); assert(sys); - const auto max_masters = sys->maxMasters(); + const auto max_requestors = sys->maxRequestors(); bytesRead - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - bytesRead.subname(i, sys->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + bytesRead.subname(i, sys->getRequestorName(i)); } bytesInstRead - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - bytesInstRead.subname(i, sys->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + bytesInstRead.subname(i, sys->getRequestorName(i)); } bytesWritten - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - bytesWritten.subname(i, sys->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + bytesWritten.subname(i, sys->getRequestorName(i)); } numReads - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - numReads.subname(i, sys->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + numReads.subname(i, sys->getRequestorName(i)); } numWrites - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - numWrites.subname(i, sys->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + numWrites.subname(i, sys->getRequestorName(i)); } numOther - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - numOther.subname(i, sys->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + numOther.subname(i, sys->getRequestorName(i)); } bwRead @@ -200,8 +200,8 @@ AbstractMemory::MemStats::regStats() .prereq(bytesRead) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - bwRead.subname(i, sys->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + bwRead.subname(i, sys->getRequestorName(i)); } bwInstRead @@ -209,8 +209,8 @@ AbstractMemory::MemStats::regStats() .prereq(bytesInstRead) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - bwInstRead.subname(i, sys->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + bwInstRead.subname(i, sys->getRequestorName(i)); } bwWrite @@ -218,8 +218,8 @@ AbstractMemory::MemStats::regStats() .prereq(bytesWritten) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - bwWrite.subname(i, sys->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + bwWrite.subname(i, sys->getRequestorName(i)); } bwTotal @@ -227,8 +227,8 @@ AbstractMemory::MemStats::regStats() .prereq(bwTotal) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - bwTotal.subname(i, sys->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + bwTotal.subname(i, sys->getRequestorName(i)); } bwRead = bytesRead / simSeconds; @@ -324,10 +324,10 @@ AbstractMemory::checkLockedAddrList(PacketPtr pkt) i->contextId, paddr); ContextID owner_cid = i->contextId; assert(owner_cid != InvalidContextID); - ContextID requester_cid = req->hasContextId() ? + ContextID requestor_cid = req->hasContextId() ? req->contextId() : InvalidContextID; - if (owner_cid != requester_cid) { + if (owner_cid != requestor_cid) { ThreadContext* ctx = system()->threads[owner_cid]; TheISA::globalClearExclusive(ctx); } @@ -350,14 +350,15 @@ tracePacket(System *sys, const char *label, PacketPtr pkt) if (size == 1 || size == 2 || size == 4 || size == 8) { ByteOrder byte_order = sys->getGuestByteOrder(); DPRINTF(MemoryAccess,"%s from %s of size %i on address %#x data " - "%#x %c\n", label, sys->getMasterName(pkt->req->masterId()), + "%#x %c\n", label, sys->getRequestorName(pkt->req-> + requestorId()), size, pkt->getAddr(), size, pkt->getAddr(), pkt->getUintX(byte_order), pkt->req->isUncacheable() ? 'U' : 'C'); return; } #endif DPRINTF(MemoryAccess, "%s from %s of size %i on address %#x %c\n", - label, sys->getMasterName(pkt->req->masterId()), + label, sys->getRequestorName(pkt->req->requestorId()), size, pkt->getAddr(), pkt->req->isUncacheable() ? 'U' : 'C'); DDUMP(MemoryAccess, pkt->getConstPtr(), pkt->getSize()); } @@ -424,7 +425,7 @@ AbstractMemory::access(PacketPtr pkt) assert(!pkt->req->isInstFetch()); TRACE_PACKET("Read/Write"); - stats.numOther[pkt->req->masterId()]++; + stats.numOther[pkt->req->requestorId()]++; } } else if (pkt->isRead()) { assert(!pkt->isWrite()); @@ -438,10 +439,10 @@ AbstractMemory::access(PacketPtr pkt) pkt->setData(host_addr); } TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read"); - stats.numReads[pkt->req->masterId()]++; - stats.bytesRead[pkt->req->masterId()] += pkt->getSize(); + stats.numReads[pkt->req->requestorId()]++; + stats.bytesRead[pkt->req->requestorId()] += pkt->getSize(); if (pkt->req->isInstFetch()) - stats.bytesInstRead[pkt->req->masterId()] += pkt->getSize(); + stats.bytesInstRead[pkt->req->requestorId()] += pkt->getSize(); } else if (pkt->isInvalidate() || pkt->isClean()) { assert(!pkt->isWrite()); // in a fastmem system invalidating and/or cleaning packets @@ -457,8 +458,8 @@ AbstractMemory::access(PacketPtr pkt) } assert(!pkt->req->isInstFetch()); TRACE_PACKET("Write"); - stats.numWrites[pkt->req->masterId()]++; - stats.bytesWritten[pkt->req->masterId()] += pkt->getSize(); + stats.numWrites[pkt->req->requestorId()]++; + stats.bytesWritten[pkt->req->requestorId()] += pkt->getSize(); } } else { panic("Unexpected packet %s", pkt->print()); diff --git a/src/mem/abstract_mem.hh b/src/mem/abstract_mem.hh index 616fd0e26..fe41ddceb 100644 --- a/src/mem/abstract_mem.hh +++ b/src/mem/abstract_mem.hh @@ -159,7 +159,7 @@ class AbstractMemory : public ClockedObject } /** Pointer to the System object. - * This is used for getting the number of masters in the system which is + * This is used for getting the number of requestors in the system which is * needed when registering stats */ System *_system; diff --git a/src/mem/addr_mapper.cc b/src/mem/addr_mapper.cc index 4facd5743..08a5cacd3 100644 --- a/src/mem/addr_mapper.cc +++ b/src/mem/addr_mapper.cc @@ -39,25 +39,25 @@ AddrMapper::AddrMapper(const AddrMapperParams* p) : SimObject(p), - masterPort(name() + "-master", *this), - slavePort(name() + "-slave", *this) + memSidePort(name() + "-mem_side_port", *this), + cpuSidePort(name() + "-cpu_side_port", *this) { } void AddrMapper::init() { - if (!slavePort.isConnected() || !masterPort.isConnected()) + if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) fatal("Address mapper is not connected on both sides.\n"); } Port & AddrMapper::getPort(const std::string &if_name, PortID idx) { - if (if_name == "master") { - return masterPort; - } else if (if_name == "slave") { - return slavePort; + if (if_name == "mem_side_port") { + return memSidePort; + } else if (if_name == "cpu_side_port") { + return cpuSidePort; } else { return SimObject::getPort(if_name, idx); } @@ -68,7 +68,7 @@ AddrMapper::recvFunctional(PacketPtr pkt) { Addr orig_addr = pkt->getAddr(); pkt->setAddr(remapAddr(orig_addr)); - masterPort.sendFunctional(pkt); + memSidePort.sendFunctional(pkt); pkt->setAddr(orig_addr); } @@ -77,7 +77,7 @@ AddrMapper::recvFunctionalSnoop(PacketPtr pkt) { Addr orig_addr = pkt->getAddr(); pkt->setAddr(remapAddr(orig_addr)); - slavePort.sendFunctionalSnoop(pkt); + cpuSidePort.sendFunctionalSnoop(pkt); pkt->setAddr(orig_addr); } @@ -86,7 +86,7 @@ AddrMapper::recvAtomic(PacketPtr pkt) { Addr orig_addr = pkt->getAddr(); pkt->setAddr(remapAddr(orig_addr)); - Tick ret_tick = masterPort.sendAtomic(pkt); + Tick ret_tick = memSidePort.sendAtomic(pkt); pkt->setAddr(orig_addr); return ret_tick; } @@ -96,7 +96,7 @@ AddrMapper::recvAtomicSnoop(PacketPtr pkt) { Addr orig_addr = pkt->getAddr(); pkt->setAddr(remapAddr(orig_addr)); - Tick ret_tick = slavePort.sendAtomicSnoop(pkt); + Tick ret_tick = cpuSidePort.sendAtomicSnoop(pkt); pkt->setAddr(orig_addr); return ret_tick; } @@ -115,7 +115,7 @@ AddrMapper::recvTimingReq(PacketPtr pkt) pkt->setAddr(remapAddr(orig_addr)); // Attempt to send the packet - bool successful = masterPort.sendTimingReq(pkt); + bool successful = memSidePort.sendTimingReq(pkt); // If not successful, restore the address and sender state if (!successful) { @@ -147,7 +147,7 @@ AddrMapper::recvTimingResp(PacketPtr pkt) pkt->setAddr(receivedState->origAddr); // Attempt to send the packet - bool successful = slavePort.sendTimingResp(pkt); + bool successful = cpuSidePort.sendTimingResp(pkt); // If packet successfully sent, delete the sender state, otherwise // restore state @@ -165,19 +165,19 @@ AddrMapper::recvTimingResp(PacketPtr pkt) void AddrMapper::recvTimingSnoopReq(PacketPtr pkt) { - slavePort.sendTimingSnoopReq(pkt); + cpuSidePort.sendTimingSnoopReq(pkt); } bool AddrMapper::recvTimingSnoopResp(PacketPtr pkt) { - return masterPort.sendTimingSnoopResp(pkt); + return memSidePort.sendTimingSnoopResp(pkt); } bool AddrMapper::isSnooping() const { - if (slavePort.isSnooping()) + if (cpuSidePort.isSnooping()) fatal("AddrMapper doesn't support remapping of snooping requests\n"); return false; } @@ -185,19 +185,19 @@ AddrMapper::isSnooping() const void AddrMapper::recvReqRetry() { - slavePort.sendRetryReq(); + cpuSidePort.sendRetryReq(); } void AddrMapper::recvRespRetry() { - masterPort.sendRetryResp(); + memSidePort.sendRetryResp(); } void AddrMapper::recvRangeChange() { - slavePort.sendRangeChange(); + cpuSidePort.sendRangeChange(); } RangeAddrMapper::RangeAddrMapper(const RangeAddrMapperParams* p) : diff --git a/src/mem/addr_mapper.hh b/src/mem/addr_mapper.hh index 5e680a834..39db25a87 100644 --- a/src/mem/addr_mapper.hh +++ b/src/mem/addr_mapper.hh @@ -45,10 +45,10 @@ /** * An address mapper changes the packet addresses in going from the - * slave port side of the mapper to the master port side. When the - * slave port is queried for the address ranges, it also performs the + * response port side of the mapper to the request port side. When the + * response port is queried for the address ranges, it also performs the * necessary range updates. Note that snoop requests that travel from - * the master port (i.e. the memory side) to the slave port are + * the request port (i.e. the memory side) to the response port are * currently not modified. */ @@ -98,12 +98,12 @@ class AddrMapper : public SimObject }; - class MapperMasterPort : public RequestPort + class MapperRequestPort : public RequestPort { public: - MapperMasterPort(const std::string& _name, AddrMapper& _mapper) + MapperRequestPort(const std::string& _name, AddrMapper& _mapper) : RequestPort(_name, &_mapper), mapper(_mapper) { } @@ -150,15 +150,15 @@ class AddrMapper : public SimObject }; - /** Instance of master port, facing the memory side */ - MapperMasterPort masterPort; + /** Instance of request port, facing the memory side */ + MapperRequestPort memSidePort; - class MapperSlavePort : public ResponsePort + class MapperResponsePort : public ResponsePort { public: - MapperSlavePort(const std::string& _name, AddrMapper& _mapper) + MapperResponsePort(const std::string& _name, AddrMapper& _mapper) : ResponsePort(_name, &_mapper), mapper(_mapper) { } @@ -200,8 +200,8 @@ class AddrMapper : public SimObject }; - /** Instance of slave port, i.e. on the CPU side */ - MapperSlavePort slavePort; + /** Instance of response port, i.e. on the CPU side */ + MapperResponsePort cpuSidePort; void recvFunctional(PacketPtr pkt); diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc index 3cf61f508..ae1b8eeb6 100644 --- a/src/mem/bridge.cc +++ b/src/mem/bridge.cc @@ -40,8 +40,8 @@ /** * @file - * Implementation of a memory-mapped bridge that connects a master - * and a slave through a request and response queue. + * Implementation of a memory-mapped bridge that connects a requestor + * and a responder through a request and response queue. */ #include "mem/bridge.hh" @@ -50,23 +50,25 @@ #include "debug/Bridge.hh" #include "params/Bridge.hh" -Bridge::BridgeSlavePort::BridgeSlavePort(const std::string& _name, +Bridge::BridgeResponsePort::BridgeResponsePort(const std::string& _name, Bridge& _bridge, - BridgeMasterPort& _masterPort, + BridgeRequestPort& _memSidePort, Cycles _delay, int _resp_limit, std::vector _ranges) - : ResponsePort(_name, &_bridge), bridge(_bridge), masterPort(_masterPort), - delay(_delay), ranges(_ranges.begin(), _ranges.end()), + : ResponsePort(_name, &_bridge), bridge(_bridge), + memSidePort(_memSidePort), delay(_delay), + ranges(_ranges.begin(), _ranges.end()), outstandingResponses(0), retryReq(false), respQueueLimit(_resp_limit), sendEvent([this]{ trySendTiming(); }, _name) { } -Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name, +Bridge::BridgeRequestPort::BridgeRequestPort(const std::string& _name, Bridge& _bridge, - BridgeSlavePort& _slavePort, + BridgeResponsePort& _cpuSidePort, Cycles _delay, int _req_limit) - : RequestPort(_name, &_bridge), bridge(_bridge), slavePort(_slavePort), + : RequestPort(_name, &_bridge), bridge(_bridge), + cpuSidePort(_cpuSidePort), delay(_delay), reqQueueLimit(_req_limit), sendEvent([this]{ trySendTiming(); }, _name) { @@ -74,9 +76,9 @@ Bridge::BridgeMasterPort::BridgeMasterPort(const std::string& _name, Bridge::Bridge(Params *p) : ClockedObject(p), - slavePort(p->name + ".slave", *this, masterPort, + cpuSidePort(p->name + ".cpu_side_port", *this, memSidePort, ticksToCycles(p->delay), p->resp_size, p->ranges), - masterPort(p->name + ".master", *this, slavePort, + memSidePort(p->name + ".mem_side_port", *this, cpuSidePort, ticksToCycles(p->delay), p->req_size) { } @@ -84,10 +86,10 @@ Bridge::Bridge(Params *p) Port & Bridge::getPort(const std::string &if_name, PortID idx) { - if (if_name == "master") - return masterPort; - else if (if_name == "slave") - return slavePort; + if (if_name == "mem_side_port") + return memSidePort; + else if (if_name == "cpu_side_port") + return cpuSidePort; else // pass it along to our super class return ClockedObject::getPort(if_name, idx); @@ -97,29 +99,29 @@ void Bridge::init() { // make sure both sides are connected and have the same block size - if (!slavePort.isConnected() || !masterPort.isConnected()) + if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) fatal("Both ports of a bridge must be connected.\n"); - // notify the master side of our address ranges - slavePort.sendRangeChange(); + // notify the request side of our address ranges + cpuSidePort.sendRangeChange(); } bool -Bridge::BridgeSlavePort::respQueueFull() const +Bridge::BridgeResponsePort::respQueueFull() const { return outstandingResponses == respQueueLimit; } bool -Bridge::BridgeMasterPort::reqQueueFull() const +Bridge::BridgeRequestPort::reqQueueFull() const { return transmitList.size() == reqQueueLimit; } bool -Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt) +Bridge::BridgeRequestPort::recvTimingResp(PacketPtr pkt) { - // all checks are done when the request is accepted on the slave + // all checks are done when the request is accepted on the response // side, so we are guaranteed to have space for the response DPRINTF(Bridge, "recvTimingResp: %s addr 0x%x\n", pkt->cmdString(), pkt->getAddr()); @@ -132,14 +134,14 @@ Bridge::BridgeMasterPort::recvTimingResp(PacketPtr pkt) Tick receive_delay = pkt->headerDelay + pkt->payloadDelay; pkt->headerDelay = pkt->payloadDelay = 0; - slavePort.schedTimingResp(pkt, bridge.clockEdge(delay) + + cpuSidePort.schedTimingResp(pkt, bridge.clockEdge(delay) + receive_delay); return true; } bool -Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt) +Bridge::BridgeResponsePort::recvTimingReq(PacketPtr pkt) { DPRINTF(Bridge, "recvTimingReq: %s addr 0x%x\n", pkt->cmdString(), pkt->getAddr()); @@ -157,7 +159,7 @@ Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt) transmitList.size(), outstandingResponses); // if the request queue is full then there is no hope - if (masterPort.reqQueueFull()) { + if (memSidePort.reqQueueFull()) { DPRINTF(Bridge, "Request queue full\n"); retryReq = true; } else { @@ -186,20 +188,20 @@ Bridge::BridgeSlavePort::recvTimingReq(PacketPtr pkt) Tick receive_delay = pkt->headerDelay + pkt->payloadDelay; pkt->headerDelay = pkt->payloadDelay = 0; - masterPort.schedTimingReq(pkt, bridge.clockEdge(delay) + + memSidePort.schedTimingReq(pkt, bridge.clockEdge(delay) + receive_delay); } } // remember that we are now stalling a packet and that we have to - // tell the sending master to retry once space becomes available, + // tell the sending requestor to retry once space becomes available, // we make no distinction whether the stalling is due to the // request queue or response queue being full return !retryReq; } void -Bridge::BridgeSlavePort::retryStalledReq() +Bridge::BridgeResponsePort::retryStalledReq() { if (retryReq) { DPRINTF(Bridge, "Request waiting for retry, now retrying\n"); @@ -209,7 +211,7 @@ Bridge::BridgeSlavePort::retryStalledReq() } void -Bridge::BridgeMasterPort::schedTimingReq(PacketPtr pkt, Tick when) +Bridge::BridgeRequestPort::schedTimingReq(PacketPtr pkt, Tick when) { // If we're about to put this packet at the head of the queue, we // need to schedule an event to do the transmit. Otherwise there @@ -226,7 +228,7 @@ Bridge::BridgeMasterPort::schedTimingReq(PacketPtr pkt, Tick when) void -Bridge::BridgeSlavePort::schedTimingResp(PacketPtr pkt, Tick when) +Bridge::BridgeResponsePort::schedTimingResp(PacketPtr pkt, Tick when) { // If we're about to put this packet at the head of the queue, we // need to schedule an event to do the transmit. Otherwise there @@ -240,7 +242,7 @@ Bridge::BridgeSlavePort::schedTimingResp(PacketPtr pkt, Tick when) } void -Bridge::BridgeMasterPort::trySendTiming() +Bridge::BridgeRequestPort::trySendTiming() { assert(!transmitList.empty()); @@ -270,7 +272,7 @@ Bridge::BridgeMasterPort::trySendTiming() // then send a retry at this point, also note that if the // request we stalled was waiting for the response queue // rather than the request queue we might stall it again - slavePort.retryStalledReq(); + cpuSidePort.retryStalledReq(); } // if the send failed, then we try again once we receive a retry, @@ -278,7 +280,7 @@ Bridge::BridgeMasterPort::trySendTiming() } void -Bridge::BridgeSlavePort::trySendTiming() +Bridge::BridgeResponsePort::trySendTiming() { assert(!transmitList.empty()); @@ -310,7 +312,7 @@ Bridge::BridgeSlavePort::trySendTiming() // if there is space in the request queue and we were stalling // a request, it will definitely be possible to accept it now // since there is guaranteed space in the response queue - if (!masterPort.reqQueueFull() && retryReq) { + if (!memSidePort.reqQueueFull() && retryReq) { DPRINTF(Bridge, "Request waiting for retry, now retrying\n"); retryReq = false; sendRetryReq(); @@ -322,28 +324,28 @@ Bridge::BridgeSlavePort::trySendTiming() } void -Bridge::BridgeMasterPort::recvReqRetry() +Bridge::BridgeRequestPort::recvReqRetry() { trySendTiming(); } void -Bridge::BridgeSlavePort::recvRespRetry() +Bridge::BridgeResponsePort::recvRespRetry() { trySendTiming(); } Tick -Bridge::BridgeSlavePort::recvAtomic(PacketPtr pkt) +Bridge::BridgeResponsePort::recvAtomic(PacketPtr pkt) { panic_if(pkt->cacheResponding(), "Should not see packets where cache " "is responding"); - return delay * bridge.clockPeriod() + masterPort.sendAtomic(pkt); + return delay * bridge.clockPeriod() + memSidePort.sendAtomic(pkt); } void -Bridge::BridgeSlavePort::recvFunctional(PacketPtr pkt) +Bridge::BridgeResponsePort::recvFunctional(PacketPtr pkt) { pkt->pushLabel(name()); @@ -355,19 +357,19 @@ Bridge::BridgeSlavePort::recvFunctional(PacketPtr pkt) } } - // also check the master port's request queue - if (masterPort.trySatisfyFunctional(pkt)) { + // also check the request port's request queue + if (memSidePort.trySatisfyFunctional(pkt)) { return; } pkt->popLabel(); // fall through if pkt still not satisfied - masterPort.sendFunctional(pkt); + memSidePort.sendFunctional(pkt); } bool -Bridge::BridgeMasterPort::trySatisfyFunctional(PacketPtr pkt) +Bridge::BridgeRequestPort::trySatisfyFunctional(PacketPtr pkt) { bool found = false; auto i = transmitList.begin(); @@ -384,7 +386,7 @@ Bridge::BridgeMasterPort::trySatisfyFunctional(PacketPtr pkt) } AddrRangeList -Bridge::BridgeSlavePort::getAddrRanges() const +Bridge::BridgeResponsePort::getAddrRanges() const { return ranges; } diff --git a/src/mem/bridge.hh b/src/mem/bridge.hh index dca863daa..2b03e136d 100644 --- a/src/mem/bridge.hh +++ b/src/mem/bridge.hh @@ -40,8 +40,8 @@ /** * @file - * Declaration of a memory-mapped bridge that connects a master - * and a slave through a request and response queue. + * Declaration of a memory-mapped bridge that connects a requestor + * and a responder through a request and response queue. */ #ifndef __MEM_BRIDGE_HH__ @@ -56,11 +56,11 @@ /** * A bridge is used to interface two different crossbars (or in general a - * memory-mapped master and slave), with buffering for requests and + * memory-mapped requestor and responder), with buffering for requests and * responses. The bridge has a fixed delay for packets passing through * it and responds to a fixed set of address ranges. * - * The bridge comprises a slave port and a master port, that buffer + * The bridge comprises a response port and a request port, that buffer * outgoing responses and requests respectively. Buffer space is * reserved when a request arrives, also reserving response space * before forwarding the request. If there is no space present, then @@ -87,16 +87,16 @@ class Bridge : public ClockedObject { } }; - // Forward declaration to allow the slave port to have a pointer - class BridgeMasterPort; + // Forward declaration to allow the response port to have a pointer + class BridgeRequestPort; /** * The port on the side that receives requests and sends - * responses. The slave port has a set of address ranges that it - * is responsible for. The slave port also has a buffer for the + * responses. The response port has a set of address ranges that it + * is responsible for. The response port also has a buffer for the * responses not yet sent. */ - class BridgeSlavePort : public ResponsePort + class BridgeResponsePort : public ResponsePort { private: @@ -105,9 +105,9 @@ class Bridge : public ClockedObject Bridge& bridge; /** - * Master port on the other side of the bridge. + * Request port on the other side of the bridge. */ - BridgeMasterPort& masterPort; + BridgeRequestPort& memSidePort; /** Minimum request delay though this bridge. */ const Cycles delay; @@ -158,17 +158,18 @@ class Bridge : public ClockedObject public: /** - * Constructor for the BridgeSlavePort. + * Constructor for the BridgeResponsePort. * * @param _name the port name including the owner * @param _bridge the structural owner - * @param _masterPort the master port on the other side of the bridge + * @param _memSidePort the request port on the other + * side of the bridge * @param _delay the delay in cycles from receiving to sending * @param _resp_limit the size of the response queue * @param _ranges a number of address ranges to forward */ - BridgeSlavePort(const std::string& _name, Bridge& _bridge, - BridgeMasterPort& _masterPort, Cycles _delay, + BridgeResponsePort(const std::string& _name, Bridge& _bridge, + BridgeRequestPort& _memSidePort, Cycles _delay, int _resp_limit, std::vector _ranges); /** @@ -213,10 +214,10 @@ class Bridge : public ClockedObject /** * Port on the side that forwards requests and receives - * responses. The master port has a buffer for the requests not + * responses. The request port has a buffer for the requests not * yet sent. */ - class BridgeMasterPort : public RequestPort + class BridgeRequestPort : public RequestPort { private: @@ -225,9 +226,9 @@ class Bridge : public ClockedObject Bridge& bridge; /** - * The slave port on the other side of the bridge. + * The response port on the other side of the bridge. */ - BridgeSlavePort& slavePort; + BridgeResponsePort& cpuSidePort; /** Minimum delay though this bridge. */ const Cycles delay; @@ -256,16 +257,17 @@ class Bridge : public ClockedObject public: /** - * Constructor for the BridgeMasterPort. + * Constructor for the BridgeRequestPort. * * @param _name the port name including the owner * @param _bridge the structural owner - * @param _slavePort the slave port on the other side of the bridge + * @param _cpuSidePort the response port on the other side of + * the bridge * @param _delay the delay in cycles from receiving to sending * @param _req_limit the size of the request queue */ - BridgeMasterPort(const std::string& _name, Bridge& _bridge, - BridgeSlavePort& _slavePort, Cycles _delay, + BridgeRequestPort(const std::string& _name, Bridge& _bridge, + BridgeResponsePort& _cpuSidePort, Cycles _delay, int _req_limit); /** @@ -305,11 +307,11 @@ class Bridge : public ClockedObject void recvReqRetry(); }; - /** Slave port of the bridge. */ - BridgeSlavePort slavePort; + /** Response port of the bridge. */ + BridgeResponsePort cpuSidePort; - /** Master port of the bridge. */ - BridgeMasterPort masterPort; + /** Request port of the bridge. */ + BridgeRequestPort memSidePort; public: diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc index a1fd15129..c42071453 100644 --- a/src/mem/cache/base.cc +++ b/src/mem/cache/base.cc @@ -63,10 +63,10 @@ using namespace std; -BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, +BaseCache::CacheResponsePort::CacheResponsePort(const std::string &_name, BaseCache *_cache, const std::string &_label) - : QueuedSlavePort(_name, _cache, queue), + : QueuedResponsePort(_name, _cache, queue), queue(*_cache, *this, true, _label), blocked(false), mustSendRetry(false), sendRetryEvent([this]{ processSendRetry(); }, _name) @@ -75,8 +75,8 @@ BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) : ClockedObject(p), - cpuSidePort (p->name + ".cpu_side", this, "CpuSidePort"), - memSidePort(p->name + ".mem_side", this, "MemSidePort"), + cpuSidePort (p->name + ".cpu_side_port", this, "CpuSidePort"), + memSidePort(p->name + ".mem_side_port", this, "MemSidePort"), mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below tags(p->tags), @@ -114,7 +114,7 @@ BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) // buffer before committing to an MSHR // forward snoops is overridden in init() once we can query - // whether the connected master is actually snooping or not + // whether the connected requestor is actually snooping or not tempBlock = new TempCacheBlk(blkSize); @@ -129,7 +129,7 @@ BaseCache::~BaseCache() } void -BaseCache::CacheSlavePort::setBlocked() +BaseCache::CacheResponsePort::setBlocked() { assert(!blocked); DPRINTF(CachePort, "Port is blocking new requests\n"); @@ -144,7 +144,7 @@ BaseCache::CacheSlavePort::setBlocked() } void -BaseCache::CacheSlavePort::clearBlocked() +BaseCache::CacheResponsePort::clearBlocked() { assert(blocked); DPRINTF(CachePort, "Port is accepting new requests\n"); @@ -156,7 +156,7 @@ BaseCache::CacheSlavePort::clearBlocked() } void -BaseCache::CacheSlavePort::processSendRetry() +BaseCache::CacheResponsePort::processSendRetry() { DPRINTF(CachePort, "Port is sending retry\n"); @@ -270,8 +270,8 @@ BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, DPRINTF(Cache, "%s coalescing MSHR for %s\n", __func__, pkt->print()); - assert(pkt->req->masterId() < system->maxMasters()); - stats.cmdStats(pkt).mshr_hits[pkt->req->masterId()]++; + assert(pkt->req->requestorId() < system->maxRequestors()); + stats.cmdStats(pkt).mshr_hits[pkt->req->requestorId()]++; // We use forward_time here because it is the same // considering new targets. We have multiple @@ -294,8 +294,8 @@ BaseCache::handleTimingReqMiss(PacketPtr pkt, MSHR *mshr, CacheBlk *blk, } } else { // no MSHR - assert(pkt->req->masterId() < system->maxMasters()); - stats.cmdStats(pkt).mshr_misses[pkt->req->masterId()]++; + assert(pkt->req->requestorId() < system->maxRequestors()); + stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++; if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean) { // We use forward_time here because there is an @@ -441,13 +441,13 @@ BaseCache::recvTimingResp(PacketPtr pkt) const QueueEntry::Target *initial_tgt = mshr->getTarget(); const Tick miss_latency = curTick() - initial_tgt->recvTime; if (pkt->req->isUncacheable()) { - assert(pkt->req->masterId() < system->maxMasters()); + assert(pkt->req->requestorId() < system->maxRequestors()); stats.cmdStats(initial_tgt->pkt) - .mshr_uncacheable_lat[pkt->req->masterId()] += miss_latency; + .mshr_uncacheable_lat[pkt->req->requestorId()] += miss_latency; } else { - assert(pkt->req->masterId() < system->maxMasters()); + assert(pkt->req->requestorId() < system->maxRequestors()); stats.cmdStats(initial_tgt->pkt) - .mshr_miss_latency[pkt->req->masterId()] += miss_latency; + .mshr_miss_latency[pkt->req->requestorId()] += miss_latency; } PacketList writebacks; @@ -774,8 +774,8 @@ BaseCache::getNextQueueEntry() !writeBuffer.findMatch(pf_addr, pkt->isSecure())) { // Update statistic on number of prefetches issued // (hwpf_mshr_misses) - assert(pkt->req->masterId() < system->maxMasters()); - stats.cmdStats(pkt).mshr_misses[pkt->req->masterId()]++; + assert(pkt->req->requestorId() < system->maxRequestors()); + stats.cmdStats(pkt).mshr_misses[pkt->req->requestorId()]++; // allocate an MSHR and return it, note // that we send the packet straight away, so do not @@ -910,7 +910,7 @@ BaseCache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, bool, bool) // satisfying a string of Read and ReadEx requests from // upper-level caches, a Read will mark the block as shared but we // can satisfy a following ReadEx anyway since we can rely on the - // Read requester(s) to have buffered the ReadEx snoop and to + // Read requestor(s) to have buffered the ReadEx snoop and to // invalidate their blocks after receiving them. // assert(!pkt->needsWritable() || blk->isWritable()); assert(pkt->getOffset(blkSize) + pkt->getSize() <= blkSize); @@ -1489,10 +1489,10 @@ BaseCache::writebackBlk(CacheBlk *blk) "Writeback from read-only cache"); assert(blk && blk->isValid() && (blk->isDirty() || writebackClean)); - stats.writebacks[Request::wbMasterId]++; + stats.writebacks[Request::wbRequestorId]++; RequestPtr req = std::make_shared( - regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); + regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId); if (blk->isSecure()) req->setFlags(Request::SECURE); @@ -1534,7 +1534,7 @@ PacketPtr BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) { RequestPtr req = std::make_shared( - regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); + regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId); if (blk->isSecure()) { req->setFlags(Request::SECURE); @@ -1607,7 +1607,7 @@ BaseCache::writebackVisitor(CacheBlk &blk) assert(blk.isValid()); RequestPtr request = std::make_shared( - regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId); + regenerateBlkAddr(&blk), blkSize, 0, Request::funcRequestorId); request->taskId(blk.task_id); if (blk.isSecure()) { @@ -1869,121 +1869,121 @@ BaseCache::CacheCmdStats::regStatsFromParent() Stats::Group::regStats(); System *system = cache.system; - const auto max_masters = system->maxMasters(); + const auto max_requestors = system->maxRequestors(); hits - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - hits.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + hits.subname(i, system->getRequestorName(i)); } // Miss statistics misses - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - misses.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + misses.subname(i, system->getRequestorName(i)); } // Miss latency statistics missLatency - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - missLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + missLatency.subname(i, system->getRequestorName(i)); } // access formulas accesses.flags(total | nozero | nonan); accesses = hits + misses; - for (int i = 0; i < max_masters; i++) { - accesses.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + accesses.subname(i, system->getRequestorName(i)); } // miss rate formulas missRate.flags(total | nozero | nonan); missRate = misses / accesses; - for (int i = 0; i < max_masters; i++) { - missRate.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + missRate.subname(i, system->getRequestorName(i)); } // miss latency formulas avgMissLatency.flags(total | nozero | nonan); avgMissLatency = missLatency / misses; - for (int i = 0; i < max_masters; i++) { - avgMissLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + avgMissLatency.subname(i, system->getRequestorName(i)); } // MSHR statistics // MSHR hit statistics mshr_hits - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - mshr_hits.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + mshr_hits.subname(i, system->getRequestorName(i)); } // MSHR miss statistics mshr_misses - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - mshr_misses.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + mshr_misses.subname(i, system->getRequestorName(i)); } // MSHR miss latency statistics mshr_miss_latency - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - mshr_miss_latency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + mshr_miss_latency.subname(i, system->getRequestorName(i)); } // MSHR uncacheable statistics mshr_uncacheable - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - mshr_uncacheable.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + mshr_uncacheable.subname(i, system->getRequestorName(i)); } // MSHR miss latency statistics mshr_uncacheable_lat - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - mshr_uncacheable_lat.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + mshr_uncacheable_lat.subname(i, system->getRequestorName(i)); } // MSHR miss rate formulas mshrMissRate.flags(total | nozero | nonan); mshrMissRate = mshr_misses / accesses; - for (int i = 0; i < max_masters; i++) { - mshrMissRate.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + mshrMissRate.subname(i, system->getRequestorName(i)); } // mshrMiss latency formulas avgMshrMissLatency.flags(total | nozero | nonan); avgMshrMissLatency = mshr_miss_latency / mshr_misses; - for (int i = 0; i < max_masters; i++) { - avgMshrMissLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + avgMshrMissLatency.subname(i, system->getRequestorName(i)); } // mshrUncacheable latency formulas avgMshrUncacheableLatency.flags(total | nozero | nonan); avgMshrUncacheableLatency = mshr_uncacheable_lat / mshr_uncacheable; - for (int i = 0; i < max_masters; i++) { - avgMshrUncacheableLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + avgMshrUncacheableLatency.subname(i, system->getRequestorName(i)); } } @@ -2064,7 +2064,7 @@ BaseCache::CacheStats::regStats() Stats::Group::regStats(); System *system = cache.system; - const auto max_masters = system->maxMasters(); + const auto max_requestors = system->maxRequestors(); for (auto &cs : cmd) cs->regStatsFromParent(); @@ -2084,74 +2084,74 @@ BaseCache::CacheStats::regStats() demandHits.flags(total | nozero | nonan); demandHits = SUM_DEMAND(hits); - for (int i = 0; i < max_masters; i++) { - demandHits.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandHits.subname(i, system->getRequestorName(i)); } overallHits.flags(total | nozero | nonan); overallHits = demandHits + SUM_NON_DEMAND(hits); - for (int i = 0; i < max_masters; i++) { - overallHits.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallHits.subname(i, system->getRequestorName(i)); } demandMisses.flags(total | nozero | nonan); demandMisses = SUM_DEMAND(misses); - for (int i = 0; i < max_masters; i++) { - demandMisses.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandMisses.subname(i, system->getRequestorName(i)); } overallMisses.flags(total | nozero | nonan); overallMisses = demandMisses + SUM_NON_DEMAND(misses); - for (int i = 0; i < max_masters; i++) { - overallMisses.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallMisses.subname(i, system->getRequestorName(i)); } demandMissLatency.flags(total | nozero | nonan); demandMissLatency = SUM_DEMAND(missLatency); - for (int i = 0; i < max_masters; i++) { - demandMissLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandMissLatency.subname(i, system->getRequestorName(i)); } overallMissLatency.flags(total | nozero | nonan); overallMissLatency = demandMissLatency + SUM_NON_DEMAND(missLatency); - for (int i = 0; i < max_masters; i++) { - overallMissLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallMissLatency.subname(i, system->getRequestorName(i)); } demandAccesses.flags(total | nozero | nonan); demandAccesses = demandHits + demandMisses; - for (int i = 0; i < max_masters; i++) { - demandAccesses.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandAccesses.subname(i, system->getRequestorName(i)); } overallAccesses.flags(total | nozero | nonan); overallAccesses = overallHits + overallMisses; - for (int i = 0; i < max_masters; i++) { - overallAccesses.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallAccesses.subname(i, system->getRequestorName(i)); } demandMissRate.flags(total | nozero | nonan); demandMissRate = demandMisses / demandAccesses; - for (int i = 0; i < max_masters; i++) { - demandMissRate.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandMissRate.subname(i, system->getRequestorName(i)); } overallMissRate.flags(total | nozero | nonan); overallMissRate = overallMisses / overallAccesses; - for (int i = 0; i < max_masters; i++) { - overallMissRate.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallMissRate.subname(i, system->getRequestorName(i)); } demandAvgMissLatency.flags(total | nozero | nonan); demandAvgMissLatency = demandMissLatency / demandMisses; - for (int i = 0; i < max_masters; i++) { - demandAvgMissLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandAvgMissLatency.subname(i, system->getRequestorName(i)); } overallAvgMissLatency.flags(total | nozero | nonan); overallAvgMissLatency = overallMissLatency / overallMisses; - for (int i = 0; i < max_masters; i++) { - overallAvgMissLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallAvgMissLatency.subname(i, system->getRequestorName(i)); } blocked_cycles.init(NUM_BLOCKED_CAUSES); @@ -2176,55 +2176,55 @@ BaseCache::CacheStats::regStats() unusedPrefetches.flags(nozero); writebacks - .init(max_masters) + .init(max_requestors) .flags(total | nozero | nonan) ; - for (int i = 0; i < max_masters; i++) { - writebacks.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + writebacks.subname(i, system->getRequestorName(i)); } demandMshrHits.flags(total | nozero | nonan); demandMshrHits = SUM_DEMAND(mshr_hits); - for (int i = 0; i < max_masters; i++) { - demandMshrHits.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandMshrHits.subname(i, system->getRequestorName(i)); } overallMshrHits.flags(total | nozero | nonan); overallMshrHits = demandMshrHits + SUM_NON_DEMAND(mshr_hits); - for (int i = 0; i < max_masters; i++) { - overallMshrHits.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallMshrHits.subname(i, system->getRequestorName(i)); } demandMshrMisses.flags(total | nozero | nonan); demandMshrMisses = SUM_DEMAND(mshr_misses); - for (int i = 0; i < max_masters; i++) { - demandMshrMisses.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandMshrMisses.subname(i, system->getRequestorName(i)); } overallMshrMisses.flags(total | nozero | nonan); overallMshrMisses = demandMshrMisses + SUM_NON_DEMAND(mshr_misses); - for (int i = 0; i < max_masters; i++) { - overallMshrMisses.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallMshrMisses.subname(i, system->getRequestorName(i)); } demandMshrMissLatency.flags(total | nozero | nonan); demandMshrMissLatency = SUM_DEMAND(mshr_miss_latency); - for (int i = 0; i < max_masters; i++) { - demandMshrMissLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandMshrMissLatency.subname(i, system->getRequestorName(i)); } overallMshrMissLatency.flags(total | nozero | nonan); overallMshrMissLatency = demandMshrMissLatency + SUM_NON_DEMAND(mshr_miss_latency); - for (int i = 0; i < max_masters; i++) { - overallMshrMissLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallMshrMissLatency.subname(i, system->getRequestorName(i)); } overallMshrUncacheable.flags(total | nozero | nonan); overallMshrUncacheable = SUM_DEMAND(mshr_uncacheable) + SUM_NON_DEMAND(mshr_uncacheable); - for (int i = 0; i < max_masters; i++) { - overallMshrUncacheable.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallMshrUncacheable.subname(i, system->getRequestorName(i)); } @@ -2232,39 +2232,39 @@ BaseCache::CacheStats::regStats() overallMshrUncacheableLatency = SUM_DEMAND(mshr_uncacheable_lat) + SUM_NON_DEMAND(mshr_uncacheable_lat); - for (int i = 0; i < max_masters; i++) { - overallMshrUncacheableLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallMshrUncacheableLatency.subname(i, system->getRequestorName(i)); } demandMshrMissRate.flags(total | nozero | nonan); demandMshrMissRate = demandMshrMisses / demandAccesses; - for (int i = 0; i < max_masters; i++) { - demandMshrMissRate.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandMshrMissRate.subname(i, system->getRequestorName(i)); } overallMshrMissRate.flags(total | nozero | nonan); overallMshrMissRate = overallMshrMisses / overallAccesses; - for (int i = 0; i < max_masters; i++) { - overallMshrMissRate.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallMshrMissRate.subname(i, system->getRequestorName(i)); } demandAvgMshrMissLatency.flags(total | nozero | nonan); demandAvgMshrMissLatency = demandMshrMissLatency / demandMshrMisses; - for (int i = 0; i < max_masters; i++) { - demandAvgMshrMissLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + demandAvgMshrMissLatency.subname(i, system->getRequestorName(i)); } overallAvgMshrMissLatency.flags(total | nozero | nonan); overallAvgMshrMissLatency = overallMshrMissLatency / overallMshrMisses; - for (int i = 0; i < max_masters; i++) { - overallAvgMshrMissLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallAvgMshrMissLatency.subname(i, system->getRequestorName(i)); } overallAvgMshrUncacheableLatency.flags(total | nozero | nonan); overallAvgMshrUncacheableLatency = overallMshrUncacheableLatency / overallMshrUncacheable; - for (int i = 0; i < max_masters; i++) { - overallAvgMshrUncacheableLatency.subname(i, system->getMasterName(i)); + for (int i = 0; i < max_requestors; i++) { + overallAvgMshrUncacheableLatency.subname(i, system->getRequestorName(i)); } dataExpansions.flags(nozero | nonan); @@ -2291,7 +2291,7 @@ BaseCache::CpuSidePort::recvTimingSnoopResp(PacketPtr pkt) assert(pkt->isResponse()); - // Express snoop responses from master to slave, e.g., from L1 to L2 + // Express snoop responses from requestor to responder, e.g., from L1 to L2 cache->recvTimingSnoopResp(pkt); return true; } @@ -2365,7 +2365,7 @@ BaseCache::CpuSidePort::getAddrRanges() const BaseCache:: CpuSidePort::CpuSidePort(const std::string &_name, BaseCache *_cache, const std::string &_label) - : CacheSlavePort(_name, _cache, _label), cache(_cache) + : CacheResponsePort(_name, _cache, _label), cache(_cache) { } @@ -2452,7 +2452,7 @@ BaseCache::CacheReqPacketQueue::sendDeferredPacket() BaseCache::MemSidePort::MemSidePort(const std::string &_name, BaseCache *_cache, const std::string &_label) - : CacheMasterPort(_name, _cache, _reqQueue, _snoopRespQueue), + : CacheRequestPort(_name, _cache, _reqQueue, _snoopRespQueue), _reqQueue(*_cache, *this, _snoopRespQueue, _label), _snoopRespQueue(*_cache, *this, true, _label), cache(_cache) { diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh index c129661a1..998648492 100644 --- a/src/mem/cache/base.hh +++ b/src/mem/cache/base.hh @@ -111,7 +111,7 @@ class BaseCache : public ClockedObject protected: /** - * A cache master port is used for the memory-side port of the + * A cache request port is used for the memory-side port of the * cache, and in addition to the basic timing port that only sends * response packets through a transmit list, it also offers the * ability to schedule and send request packets (requests & @@ -119,7 +119,7 @@ class BaseCache : public ClockedObject * and the sendDeferredPacket of the timing port is modified to * consider both the transmit list and the requests from the MSHR. */ - class CacheMasterPort : public QueuedMasterPort + class CacheRequestPort : public QueuedRequestPort { public: @@ -136,10 +136,10 @@ class BaseCache : public ClockedObject protected: - CacheMasterPort(const std::string &_name, BaseCache *_cache, + CacheRequestPort(const std::string &_name, BaseCache *_cache, ReqPacketQueue &_reqQueue, SnoopRespPacketQueue &_snoopRespQueue) : - QueuedMasterPort(_name, _cache, _reqQueue, _snoopRespQueue) + QueuedRequestPort(_name, _cache, _reqQueue, _snoopRespQueue) { } /** @@ -202,10 +202,10 @@ class BaseCache : public ClockedObject /** - * The memory-side port extends the base cache master port with + * The memory-side port extends the base cache request port with * access functions for functional, atomic and timing snoops. */ - class MemSidePort : public CacheMasterPort + class MemSidePort : public CacheRequestPort { private: @@ -234,14 +234,14 @@ class BaseCache : public ClockedObject }; /** - * A cache slave port is used for the CPU-side port of the cache, + * A cache response port is used for the CPU-side port of the cache, * and it is basically a simple timing port that uses a transmit - * list for responses to the CPU (or connected master). In + * list for responses to the CPU (or connected requestor). In * addition, it has the functionality to block the port for * incoming requests. If blocked, the port will issue a retry once * unblocked. */ - class CacheSlavePort : public QueuedSlavePort + class CacheResponsePort : public QueuedResponsePort { public: @@ -256,7 +256,7 @@ class BaseCache : public ClockedObject protected: - CacheSlavePort(const std::string &_name, BaseCache *_cache, + CacheResponsePort(const std::string &_name, BaseCache *_cache, const std::string &_label); /** A normal packet queue used to store responses. */ @@ -275,10 +275,10 @@ class BaseCache : public ClockedObject }; /** - * The CPU-side port extends the base cache slave port with access + * The CPU-side port extends the base cache response port with access * functions for functional, atomic and timing requests. */ - class CpuSidePort : public CacheSlavePort + class CpuSidePort : public CacheResponsePort { private: @@ -1154,7 +1154,7 @@ class BaseCache : public ClockedObject /** * Marks the access path of the cache as blocked for the given cause. This - * also sets the blocked flag in the slave interface. + * also sets the blocked flag in the response interface. * @param cause The reason for the cache blocking. */ void setBlocked(BlockedCause cause) @@ -1219,8 +1219,8 @@ class BaseCache : public ClockedObject void incMissCount(PacketPtr pkt) { - assert(pkt->req->masterId() < system->maxMasters()); - stats.cmdStats(pkt).misses[pkt->req->masterId()]++; + assert(pkt->req->requestorId() < system->maxRequestors()); + stats.cmdStats(pkt).misses[pkt->req->requestorId()]++; pkt->req->incAccessDepth(); if (missCount) { --missCount; @@ -1230,8 +1230,8 @@ class BaseCache : public ClockedObject } void incHitCount(PacketPtr pkt) { - assert(pkt->req->masterId() < system->maxMasters()); - stats.cmdStats(pkt).hits[pkt->req->masterId()]++; + assert(pkt->req->requestorId() < system->maxRequestors()); + stats.cmdStats(pkt).hits[pkt->req->requestorId()]++; } /** diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc index 6fb6f1123..b4f4238ec 100644 --- a/src/mem/cache/cache.cc +++ b/src/mem/cache/cache.cc @@ -96,7 +96,7 @@ Cache::satisfyRequest(PacketPtr pkt, CacheBlk *blk, } else if (blk->isWritable() && !pending_downgrade && !pkt->hasSharers() && pkt->cmd != MemCmd::ReadCleanReq) { - // we can give the requester a writable copy on a read + // we can give the requestor a writable copy on a read // request if: // - we have a writable copy at this level (& below) // - we don't have a pending snoop from below @@ -326,7 +326,7 @@ Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, // should have flushed and have no valid block assert(!blk || !blk->isValid()); - stats.cmdStats(pkt).mshr_uncacheable[pkt->req->masterId()]++; + stats.cmdStats(pkt).mshr_uncacheable[pkt->req->requestorId()]++; if (pkt->isWrite()) { allocateWriteBuffer(pkt, forward_time); @@ -371,9 +371,9 @@ Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time, if (!mshr) { // copy the request and create a new SoftPFReq packet RequestPtr req = std::make_shared(pkt->req->getPaddr(), - pkt->req->getSize(), - pkt->req->getFlags(), - pkt->req->masterId()); + pkt->req->getSize(), + pkt->req->getFlags(), + pkt->req->requestorId()); pf = new Packet(req, pkt->cmd); pf->allocate(); assert(pf->matchAddr(pkt)); @@ -774,9 +774,9 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk) assert(!tgt_pkt->req->isUncacheable()); - assert(tgt_pkt->req->masterId() < system->maxMasters()); + assert(tgt_pkt->req->requestorId() < system->maxRequestors()); stats.cmdStats(tgt_pkt) - .missLatency[tgt_pkt->req->masterId()] += + .missLatency[tgt_pkt->req->requestorId()] += completion_time - target.recvTime; } else if (pkt->cmd == MemCmd::UpgradeFailResp) { // failed StoreCond upgrade @@ -912,7 +912,7 @@ Cache::cleanEvictBlk(CacheBlk *blk) // Creating a zero sized write, a message to the snoop filter RequestPtr req = std::make_shared( - regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); + regenerateBlkAddr(blk), blkSize, 0, Request::wbRequestorId); if (blk->isSecure()) req->setFlags(Request::SECURE); @@ -1006,7 +1006,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, if (forwardSnoops) { // first propagate snoop upward to see if anyone above us wants to // handle it. save & restore packet src since it will get - // rewritten to be relative to cpu-side bus (if any) + // rewritten to be relative to CPU-side bus (if any) if (is_timing) { // copy the packet so that we can clear any flags before // forwarding it upwards, we also allocate data (passing @@ -1026,7 +1026,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, // If this request is a prefetch or clean evict and an upper level // signals block present, make sure to propagate the block - // presence to the requester. + // presence to the requestor. if (snoopPkt.isBlockCached()) { pkt->setBlockCached(); } @@ -1044,7 +1044,7 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing, cpuSidePort.sendAtomicSnoop(pkt); if (!already_responded && pkt->cacheResponding()) { // cache-to-cache response from some upper cache: - // forward response to original requester + // forward response to original requestor assert(pkt->isResponse()); } } diff --git a/src/mem/cache/cache_blk.cc b/src/mem/cache/cache_blk.cc index c4730caeb..4d7e4086b 100644 --- a/src/mem/cache/cache_blk.cc +++ b/src/mem/cache/cache_blk.cc @@ -44,7 +44,7 @@ void CacheBlk::insert(const Addr tag, const bool is_secure, - const int src_master_ID, const uint32_t task_ID) + const int src_requestor_ID, const uint32_t task_ID) { // Make sure that the block has been properly invalidated assert(status == 0); @@ -53,7 +53,7 @@ CacheBlk::insert(const Addr tag, const bool is_secure, this->tag = tag; // Set source requestor ID - srcMasterId = src_master_ID; + srcRequestorId = src_requestor_ID; // Set task ID task_id = task_ID; diff --git a/src/mem/cache/cache_blk.hh b/src/mem/cache/cache_blk.hh index 427d7bbfe..99f854594 100644 --- a/src/mem/cache/cache_blk.hh +++ b/src/mem/cache/cache_blk.hh @@ -114,7 +114,7 @@ class CacheBlk : public ReplaceableEntry unsigned refCount; /** holds the source requestor ID for this block. */ - int srcMasterId; + int srcRequestorId; /** * Tick on which the block was inserted in the cache. Its value is only @@ -215,7 +215,7 @@ class CacheBlk : public ReplaceableEntry status = 0; whenReady = MaxTick; refCount = 0; - srcMasterId = Request::invldMasterId; + srcRequestorId = Request::invldRequestorId; lockList.clear(); } @@ -296,11 +296,11 @@ class CacheBlk : public ReplaceableEntry * * @param tag Block address tag. * @param is_secure Whether the block is in secure space or not. - * @param src_master_ID The source requestor ID. + * @param src_requestor_ID The source requestor ID. * @param task_ID The new task ID. */ virtual void insert(const Addr tag, const bool is_secure, - const int src_master_ID, const uint32_t task_ID); + const int src_requestor_ID, const uint32_t task_ID); /** * Track the fact that a local locked was issued to the @@ -469,7 +469,8 @@ class TempCacheBlk final : public CacheBlk } void insert(const Addr addr, const bool is_secure, - const int src_master_ID=0, const uint32_t task_ID=0) override + const int src_requestor_ID=0, const uint32_t task_ID=0) + override { // Make sure that the block has been properly invalidated assert(status == 0); diff --git a/src/mem/cache/noncoherent_cache.cc b/src/mem/cache/noncoherent_cache.cc index 01c70723d..5ca1da0cf 100644 --- a/src/mem/cache/noncoherent_cache.cc +++ b/src/mem/cache/noncoherent_cache.cc @@ -228,7 +228,7 @@ void NoncoherentCache::functionalAccess(PacketPtr pkt, bool from_cpu_side) { panic_if(!from_cpu_side, "Non-coherent cache received functional snoop" - " request\n"); + " request\n"); BaseCache::functionalAccess(pkt, from_cpu_side); } @@ -269,8 +269,8 @@ NoncoherentCache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, completion_time += clockEdge(responseLatency) + (transfer_offset ? pkt->payloadDelay : 0); - assert(tgt_pkt->req->masterId() < system->maxMasters()); - stats.cmdStats(tgt_pkt).missLatency[tgt_pkt->req->masterId()] += + assert(tgt_pkt->req->requestorId() < system->maxRequestors()); + stats.cmdStats(tgt_pkt).missLatency[tgt_pkt->req->requestorId()] += completion_time - target.recvTime; tgt_pkt->makeTimingResponse(); diff --git a/src/mem/cache/prefetch/Prefetcher.py b/src/mem/cache/prefetch/Prefetcher.py index c16302832..758803fbc 100644 --- a/src/mem/cache/prefetch/Prefetcher.py +++ b/src/mem/cache/prefetch/Prefetcher.py @@ -163,7 +163,7 @@ class StridePrefetcher(QueuedPrefetcher): confidence_threshold = Param.Percent(50, "Prefetch generation confidence threshold") - use_master_id = Param.Bool(True, "Use master id based history") + use_requestor_id = Param.Bool(True, "Use requestor id based history") degree = Param.Int(4, "Number of prefetches to generate") diff --git a/src/mem/cache/prefetch/base.cc b/src/mem/cache/prefetch/base.cc index 4e484e5e6..a35be335a 100644 --- a/src/mem/cache/prefetch/base.cc +++ b/src/mem/cache/prefetch/base.cc @@ -57,7 +57,7 @@ namespace Prefetcher { Base::PrefetchInfo::PrefetchInfo(PacketPtr pkt, Addr addr, bool miss) : address(addr), pc(pkt->req->hasPC() ? pkt->req->getPC() : 0), - masterId(pkt->req->masterId()), validPC(pkt->req->hasPC()), + requestorId(pkt->req->requestorId()), validPC(pkt->req->hasPC()), secure(pkt->isSecure()), size(pkt->req->getSize()), write(pkt->isWrite()), paddress(pkt->req->getPaddr()), cacheMiss(miss) { @@ -72,9 +72,10 @@ Base::PrefetchInfo::PrefetchInfo(PacketPtr pkt, Addr addr, bool miss) } Base::PrefetchInfo::PrefetchInfo(PrefetchInfo const &pfi, Addr addr) - : address(addr), pc(pfi.pc), masterId(pfi.masterId), validPC(pfi.validPC), - secure(pfi.secure), size(pfi.size), write(pfi.write), - paddress(pfi.paddress), cacheMiss(pfi.cacheMiss), data(nullptr) + : address(addr), pc(pfi.pc), requestorId(pfi.requestorId), + validPC(pfi.validPC), secure(pfi.secure), size(pfi.size), + write(pfi.write), paddress(pfi.paddress), cacheMiss(pfi.cacheMiss), + data(nullptr) { } @@ -92,7 +93,8 @@ Base::Base(const BasePrefetcherParams *p) : ClockedObject(p), listeners(), cache(nullptr), blkSize(p->block_size), lBlkSize(floorLog2(blkSize)), onMiss(p->on_miss), onRead(p->on_read), onWrite(p->on_write), onData(p->on_data), onInst(p->on_inst), - masterId(p->sys->getMasterId(this)), pageBytes(p->sys->getPageBytes()), + requestorId(p->sys->getRequestorId(this)), + pageBytes(p->sys->getPageBytes()), prefetchOnAccess(p->prefetch_on_access), useVirtualAddresses(p->use_virtual_addresses), prefetchStats(this), issuedPrefetches(0), diff --git a/src/mem/cache/prefetch/base.hh b/src/mem/cache/prefetch/base.hh index 4afacf0d7..cb52b5731 100644 --- a/src/mem/cache/prefetch/base.hh +++ b/src/mem/cache/prefetch/base.hh @@ -93,7 +93,7 @@ class Base : public ClockedObject /** The program counter that generated this address. */ Addr pc; /** The requestor ID that generated this address. */ - MasterID masterId; + RequestorID requestorId; /** Validity bit for the PC of this address. */ bool validPC; /** Whether this address targets the secure memory space. */ @@ -151,9 +151,9 @@ class Base : public ClockedObject * Gets the requestor ID that generated this address * @return the requestor ID that generated this address */ - MasterID getMasterId() const + RequestorID getRequestorId() const { - return masterId; + return requestorId; } /** @@ -280,7 +280,7 @@ class Base : public ClockedObject const bool onInst; /** Request id for prefetches */ - const MasterID masterId; + const RequestorID requestorId; const Addr pageBytes; diff --git a/src/mem/cache/prefetch/queued.cc b/src/mem/cache/prefetch/queued.cc index 6b89c2b16..90491a7e7 100644 --- a/src/mem/cache/prefetch/queued.cc +++ b/src/mem/cache/prefetch/queued.cc @@ -51,10 +51,12 @@ namespace Prefetcher { void Queued::DeferredPacket::createPkt(Addr paddr, unsigned blk_size, - MasterID mid, bool tag_prefetch, + RequestorID requestor_id, + bool tag_prefetch, Tick t) { /* Create a prefetch memory request */ - RequestPtr req = std::make_shared(paddr, blk_size, 0, mid); + RequestPtr req = std::make_shared(paddr, blk_size, + 0, requestor_id); if (pfInfo.isSecure()) { req->setFlags(Request::SECURE); @@ -277,7 +279,7 @@ Queued::translationComplete(DeferredPacket *dp, bool failed) } else { Tick pf_time = curTick() + clockPeriod() * latency; it->createPkt(it->translationRequest->getPaddr(), blkSize, - masterId, tagPrefetch, pf_time); + requestorId, tagPrefetch, pf_time); addToQueue(pfq, *it); } } else { @@ -328,7 +330,7 @@ Queued::createPrefetchRequest(Addr addr, PrefetchInfo const &pfi, PacketPtr pkt) { RequestPtr translation_req = std::make_shared( - addr, blkSize, pkt->req->getFlags(), masterId, pfi.getPC(), + addr, blkSize, pkt->req->getFlags(), requestorId, pfi.getPC(), pkt->req->contextId()); translation_req->setFlags(Request::PREFETCH); return translation_req; @@ -417,7 +419,8 @@ Queued::insert(const PacketPtr &pkt, PrefetchInfo &new_pfi, DeferredPacket dpp(this, new_pfi, 0, priority); if (has_target_pa) { Tick pf_time = curTick() + clockPeriod() * latency; - dpp.createPkt(target_paddr, blkSize, masterId, tagPrefetch, pf_time); + dpp.createPkt(target_paddr, blkSize, requestorId, tagPrefetch, + pf_time); DPRINTF(HWPrefetch, "Prefetch queued. " "addr:%#x priority: %3d tick:%lld.\n", new_pfi.getAddr(), priority, pf_time); diff --git a/src/mem/cache/prefetch/queued.hh b/src/mem/cache/prefetch/queued.hh index 96cf311c8..0627c5c04 100644 --- a/src/mem/cache/prefetch/queued.hh +++ b/src/mem/cache/prefetch/queued.hh @@ -101,12 +101,13 @@ class Queued : public Base * Create the associated memory packet * @param paddr physical address of this packet * @param blk_size block size used by the prefetcher - * @param mid Requester ID of the access that generated this prefetch + * @param requestor_id Requestor ID of the access that generated + * this prefetch * @param tag_prefetch flag to indicate if the packet needs to be * tagged * @param t time when the prefetch becomes ready */ - void createPkt(Addr paddr, unsigned blk_size, MasterID mid, + void createPkt(Addr paddr, unsigned blk_size, RequestorID requestor_id, bool tag_prefetch, Tick t); /** diff --git a/src/mem/cache/prefetch/stride.cc b/src/mem/cache/prefetch/stride.cc index 36773c6aa..9b5894370 100644 --- a/src/mem/cache/prefetch/stride.cc +++ b/src/mem/cache/prefetch/stride.cc @@ -77,7 +77,7 @@ Stride::Stride(const StridePrefetcherParams *p) : Queued(p), initConfidence(p->confidence_counter_bits, p->initial_confidence), threshConf(p->confidence_threshold/100.0), - useMasterId(p->use_master_id), + useRequestorId(p->use_requestor_id), degree(p->degree), pcTableInfo(p->table_assoc, p->table_entries, p->table_indexing_policy, p->table_replacement_policy) @@ -124,10 +124,10 @@ Stride::calculatePrefetch(const PrefetchInfo &pfi, Addr pf_addr = pfi.getAddr(); Addr pc = pfi.getPC(); bool is_secure = pfi.isSecure(); - MasterID master_id = useMasterId ? pfi.getMasterId() : 0; + RequestorID requestor_id = useRequestorId ? pfi.getRequestorId() : 0; // Get corresponding pc table - PCTable* pcTable = findTable(master_id); + PCTable* pcTable = findTable(requestor_id); // Search for entry in the pc table StrideEntry *entry = pcTable->findEntry(pc, is_secure); diff --git a/src/mem/cache/prefetch/stride.hh b/src/mem/cache/prefetch/stride.hh index 72e335b2d..13215c340 100644 --- a/src/mem/cache/prefetch/stride.hh +++ b/src/mem/cache/prefetch/stride.hh @@ -94,7 +94,7 @@ class Stride : public Queued /** Confidence threshold for prefetch generation. */ const double threshConf; - const bool useMasterId; + const bool useRequestorId; const int degree; diff --git a/src/mem/cache/tags/base.cc b/src/mem/cache/tags/base.cc index faad7be05..32c6d2951 100644 --- a/src/mem/cache/tags/base.cc +++ b/src/mem/cache/tags/base.cc @@ -105,12 +105,12 @@ BaseTags::insertBlock(const PacketPtr pkt, CacheBlk *blk) // to insert the new one // Deal with what we are bringing in - MasterID master_id = pkt->req->masterId(); - assert(master_id < system->maxMasters()); - stats.occupancies[master_id]++; + RequestorID requestor_id = pkt->req->requestorId(); + assert(requestor_id < system->maxRequestors()); + stats.occupancies[requestor_id]++; - // Insert block with tag, src master id and task id - blk->insert(extractTag(pkt->getAddr()), pkt->isSecure(), master_id, + // Insert block with tag, src requestor id and task id + blk->insert(extractTag(pkt->getAddr()), pkt->isSecure(), requestor_id, pkt->req->taskId()); // Check if cache warm up is done @@ -240,16 +240,16 @@ BaseTags::BaseTagStats::regStats() avgRefs = totalRefs / sampledRefs; occupancies - .init(system->maxMasters()) + .init(system->maxRequestors()) .flags(nozero | nonan) ; - for (int i = 0; i < system->maxMasters(); i++) { - occupancies.subname(i, system->getMasterName(i)); + for (int i = 0; i < system->maxRequestors(); i++) { + occupancies.subname(i, system->getRequestorName(i)); } avgOccs.flags(nozero | total); - for (int i = 0; i < system->maxMasters(); i++) { - avgOccs.subname(i, system->getMasterName(i)); + for (int i = 0; i < system->maxRequestors(); i++) { + avgOccs.subname(i, system->getRequestorName(i)); } avgOccs = occupancies / Stats::constant(tags.numBlocks); diff --git a/src/mem/cache/tags/base.hh b/src/mem/cache/tags/base.hh index 5f0246205..5e0af20bd 100644 --- a/src/mem/cache/tags/base.hh +++ b/src/mem/cache/tags/base.hh @@ -253,7 +253,7 @@ class BaseTags : public ClockedObject assert(blk); assert(blk->isValid()); - stats.occupancies[blk->srcMasterId]--; + stats.occupancies[blk->srcRequestorId]--; stats.totalRefs += blk->refCount; stats.sampledRefs++; diff --git a/src/mem/cache/tags/sector_blk.cc b/src/mem/cache/tags/sector_blk.cc index cea3d6529..e914cef1a 100644 --- a/src/mem/cache/tags/sector_blk.cc +++ b/src/mem/cache/tags/sector_blk.cc @@ -92,7 +92,7 @@ SectorSubBlk::invalidate() void SectorSubBlk::insert(const Addr tag, const bool is_secure, - const int src_master_ID, const uint32_t task_ID) + const int src_requestor_ID, const uint32_t task_ID) { // Make sure it is not overwriting another sector panic_if((_sectorBlk && _sectorBlk->isValid()) && @@ -100,7 +100,7 @@ SectorSubBlk::insert(const Addr tag, const bool is_secure, (_sectorBlk->isSecure() != is_secure)), "Overwriting valid sector!"); - CacheBlk::insert(tag, is_secure, src_master_ID, task_ID); + CacheBlk::insert(tag, is_secure, src_requestor_ID, task_ID); // Set sector tag _sectorBlk->setTag(tag); diff --git a/src/mem/cache/tags/sector_blk.hh b/src/mem/cache/tags/sector_blk.hh index 0a9087eee..5538aa117 100644 --- a/src/mem/cache/tags/sector_blk.hh +++ b/src/mem/cache/tags/sector_blk.hh @@ -122,11 +122,11 @@ class SectorSubBlk : public CacheBlk * * @param tag Block address tag. * @param is_secure Whether the block is in secure space or not. - * @param src_master_ID The source requestor ID. + * @param src_requestor_ID The source requestor ID. * @param task_ID The new task ID. */ - void insert(const Addr tag, const bool is_secure, const int src_master_ID, - const uint32_t task_ID) override; + void insert(const Addr tag, const bool is_secure, const int + src_requestor_ID, const uint32_t task_ID) override; /** * Pretty-print sector offset and other CacheBlk information. diff --git a/src/mem/coherent_xbar.cc b/src/mem/coherent_xbar.cc index c5778fb6e..037bd32b4 100644 --- a/src/mem/coherent_xbar.cc +++ b/src/mem/coherent_xbar.cc @@ -63,27 +63,27 @@ CoherentXBar::CoherentXBar(const CoherentXBarParams *p) snoopTraffic(this, "snoopTraffic", "Total snoop traffic (bytes)"), snoopFanout(this, "snoop_fanout", "Request fanout histogram") { - // create the ports based on the size of the master and slave - // vector ports, and the presence of the default port, the ports - // are enumerated starting from zero - for (int i = 0; i < p->port_master_connection_count; ++i) { - std::string portName = csprintf("%s.master[%d]", name(), i); - RequestPort* bp = new CoherentXBarMasterPort(portName, *this, i); - masterPorts.push_back(bp); + // create the ports based on the size of the memory-side port and + // CPU-side port vector ports, and the presence of the default port, + // the ports are enumerated starting from zero + for (int i = 0; i < p->port_mem_side_ports_connection_count; ++i) { + std::string portName = csprintf("%s.mem_side_port[%d]", name(), i); + RequestPort* bp = new CoherentXBarRequestPort(portName, *this, i); + memSidePorts.push_back(bp); reqLayers.push_back(new ReqLayer(*bp, *this, csprintf("reqLayer%d", i))); snoopLayers.push_back( new SnoopRespLayer(*bp, *this, csprintf("snoopLayer%d", i))); } - // see if we have a default slave device connected and if so add - // our corresponding master port + // see if we have a default CPU-side-port device connected and if so add + // our corresponding memory-side port if (p->port_default_connection_count) { - defaultPortID = masterPorts.size(); + defaultPortID = memSidePorts.size(); std::string portName = name() + ".default"; - RequestPort* bp = new CoherentXBarMasterPort(portName, *this, + RequestPort* bp = new CoherentXBarRequestPort(portName, *this, defaultPortID); - masterPorts.push_back(bp); + memSidePorts.push_back(bp); reqLayers.push_back(new ReqLayer(*bp, *this, csprintf("reqLayer%d", defaultPortID))); snoopLayers.push_back(new SnoopRespLayer(*bp, *this, @@ -91,11 +91,12 @@ CoherentXBar::CoherentXBar(const CoherentXBarParams *p) defaultPortID))); } - // create the slave ports, once again starting at zero - for (int i = 0; i < p->port_slave_connection_count; ++i) { - std::string portName = csprintf("%s.slave[%d]", name(), i); - QueuedSlavePort* bp = new CoherentXBarSlavePort(portName, *this, i); - slavePorts.push_back(bp); + // create the CPU-side ports, once again starting at zero + for (int i = 0; i < p->port_cpu_side_ports_connection_count; ++i) { + std::string portName = csprintf("%s.cpu_side_port[%d]", name(), i); + QueuedResponsePort* bp = new CoherentXBarResponsePort(portName, + *this, i); + cpuSidePorts.push_back(bp); respLayers.push_back(new RespLayer(*bp, *this, csprintf("respLayer%d", i))); snoopRespPorts.push_back(new SnoopRespPort(*bp, *this)); @@ -119,12 +120,13 @@ CoherentXBar::init() { BaseXBar::init(); - // iterate over our slave ports and determine which of our - // neighbouring master ports are snooping and add them as snoopers - for (const auto& p: slavePorts) { - // check if the connected master port is snooping + // iterate over our CPU-side ports and determine which of our + // neighbouring memory-side ports are snooping and add them as snoopers + for (const auto& p: cpuSidePorts) { + // check if the connected memory-side port is snooping if (p->isSnooping()) { - DPRINTF(AddrRanges, "Adding snooping master %s\n", p->getPeer()); + DPRINTF(AddrRanges, "Adding snooping requestor %s\n", + p->getPeer()); snoopPorts.push_back(p); } } @@ -132,17 +134,17 @@ CoherentXBar::init() if (snoopPorts.empty()) warn("CoherentXBar %s has no snooping ports attached!\n", name()); - // inform the snoop filter about the slave ports so it can create + // inform the snoop filter about the CPU-side ports so it can create // its own internal representation if (snoopFilter) - snoopFilter->setSlavePorts(slavePorts); + snoopFilter->setCPUSidePorts(cpuSidePorts); } bool -CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) +CoherentXBar::recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id) { // determine the source port based on the id - ResponsePort *src_port = slavePorts[slave_port_id]; + ResponsePort *src_port = cpuSidePorts[cpu_side_port_id]; // remember if the packet is an express snoop bool is_express_snoop = pkt->isExpressSnoop(); @@ -152,11 +154,12 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) assert(is_express_snoop == cache_responding); // determine the destination based on the destination address range - PortID master_port_id = findPort(pkt->getAddrRange()); + PortID mem_side_port_id = findPort(pkt->getAddrRange()); // test if the crossbar should be considered occupied for the current // port, and exclude express snoops from the check - if (!is_express_snoop && !reqLayers[master_port_id]->tryTiming(src_port)) { + if (!is_express_snoop && + !reqLayers[mem_side_port_id]->tryTiming(src_port)) { DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__, src_port->name(), pkt->print()); return false; @@ -197,12 +200,12 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) // before snooping we need to make sure that the memory // below is not busy and the cache clean request can be // forwarded to it - if (!masterPorts[master_port_id]->tryTiming(pkt)) { + if (!memSidePorts[mem_side_port_id]->tryTiming(pkt)) { DPRINTF(CoherentXBar, "%s: src %s packet %s RETRY\n", __func__, src_port->name(), pkt->print()); // update the layer state and schedule an idle event - reqLayers[master_port_id]->failedTiming(src_port, + reqLayers[mem_side_port_id]->failedTiming(src_port, clockEdge(Cycles(1))); return false; } @@ -231,10 +234,10 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) if (!sf_res.first.empty()) pkt->setBlockCached(); } else { - forwardTiming(pkt, slave_port_id, sf_res.first); + forwardTiming(pkt, cpu_side_port_id, sf_res.first); } } else { - forwardTiming(pkt, slave_port_id); + forwardTiming(pkt, cpu_side_port_id); } // add the snoop delay to our header delay, and then reset it @@ -282,7 +285,7 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) } // since it is a normal request, attempt to send the packet - success = masterPorts[master_port_id]->sendTimingReq(pkt); + success = memSidePorts[mem_side_port_id]->sendTimingReq(pkt); } else { // no need to forward, turn this packet around and respond // directly @@ -311,7 +314,7 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) src_port->name(), pkt->print()); // update the layer state and schedule an idle event - reqLayers[master_port_id]->failedTiming(src_port, + reqLayers[mem_side_port_id]->failedTiming(src_port, clockEdge(Cycles(1))); } else { // express snoops currently bypass the crossbar state entirely @@ -333,7 +336,7 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) // remember where to route the normal response to if (expect_response || expect_snoop_resp) { assert(routeTo.find(pkt->req) == routeTo.end()); - routeTo[pkt->req] = slave_port_id; + routeTo[pkt->req] = cpu_side_port_id; panic_if(routeTo.size() > maxRoutingTableSizeCheck, "%s: Routing table exceeds %d packets\n", @@ -341,12 +344,12 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) } // update the layer state and schedule an idle event - reqLayers[master_port_id]->succeededTiming(packetFinishTime); + reqLayers[mem_side_port_id]->succeededTiming(packetFinishTime); } // stats updates only consider packets that were successfully sent - pktCount[slave_port_id][master_port_id]++; - pktSize[slave_port_id][master_port_id] += pkt_size; + pktCount[cpu_side_port_id][mem_side_port_id]++; + pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size; transDist[pkt_cmd]++; if (is_express_snoop) { @@ -361,7 +364,7 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) // normally we respond to the packet we just received if we need to PacketPtr rsp_pkt = pkt; - PortID rsp_port_id = slave_port_id; + PortID rsp_port_id = cpu_side_port_id; // If this is the destination of the cache clean operation the // crossbar is responsible for responding. This crossbar will @@ -401,7 +404,7 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) outstandingCMO.emplace(pkt->id, deferred_rsp); if (!pkt->isWrite()) { assert(routeTo.find(pkt->req) == routeTo.end()); - routeTo[pkt->req] = slave_port_id; + routeTo[pkt->req] = cpu_side_port_id; panic_if(routeTo.size() > maxRoutingTableSizeCheck, "%s: Routing table exceeds %d packets\n", @@ -419,7 +422,7 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) if (snoopFilter && !system->bypassCaches()) { // let the snoop filter inspect the response and update its state - snoopFilter->updateResponse(rsp_pkt, *slavePorts[rsp_port_id]); + snoopFilter->updateResponse(rsp_pkt, *cpuSidePorts[rsp_port_id]); } // we send the response after the current packet, even if the @@ -429,28 +432,28 @@ CoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) Tick response_time = clockEdge() + pkt->headerDelay; rsp_pkt->headerDelay = 0; - slavePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time); + cpuSidePorts[rsp_port_id]->schedTimingResp(rsp_pkt, response_time); } return success; } bool -CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id) +CoherentXBar::recvTimingResp(PacketPtr pkt, PortID mem_side_port_id) { // determine the source port based on the id - RequestPort *src_port = masterPorts[master_port_id]; + RequestPort *src_port = memSidePorts[mem_side_port_id]; // determine the destination const auto route_lookup = routeTo.find(pkt->req); assert(route_lookup != routeTo.end()); - const PortID slave_port_id = route_lookup->second; - assert(slave_port_id != InvalidPortID); - assert(slave_port_id < respLayers.size()); + const PortID cpu_side_port_id = route_lookup->second; + assert(cpu_side_port_id != InvalidPortID); + assert(cpu_side_port_id < respLayers.size()); // test if the crossbar should be considered occupied for the // current port - if (!respLayers[slave_port_id]->tryTiming(src_port)) { + if (!respLayers[cpu_side_port_id]->tryTiming(src_port)) { DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__, src_port->name(), pkt->print()); return false; @@ -475,33 +478,34 @@ CoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id) if (snoopFilter && !system->bypassCaches()) { // let the snoop filter inspect the response and update its state - snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]); + snoopFilter->updateResponse(pkt, *cpuSidePorts[cpu_side_port_id]); } - // send the packet through the destination slave port and pay for + // send the packet through the destination CPU-side port and pay for // any outstanding header delay Tick latency = pkt->headerDelay; pkt->headerDelay = 0; - slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency); + cpuSidePorts[cpu_side_port_id]->schedTimingResp(pkt, curTick() + + latency); // remove the request from the routing table routeTo.erase(route_lookup); - respLayers[slave_port_id]->succeededTiming(packetFinishTime); + respLayers[cpu_side_port_id]->succeededTiming(packetFinishTime); // stats updates - pktCount[slave_port_id][master_port_id]++; - pktSize[slave_port_id][master_port_id] += pkt_size; + pktCount[cpu_side_port_id][mem_side_port_id]++; + pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size; transDist[pkt_cmd]++; return true; } void -CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id) +CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID mem_side_port_id) { DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, - masterPorts[master_port_id]->name(), pkt->print()); + memSidePorts[mem_side_port_id]->name(), pkt->print()); // update stats here as we know the forwarding will succeed unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0; @@ -530,8 +534,8 @@ CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id) // of the snoop filter pkt->headerDelay += sf_res.second * clockPeriod(); DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n", - __func__, masterPorts[master_port_id]->name(), pkt->print(), - sf_res.first.size(), sf_res.second); + __func__, memSidePorts[mem_side_port_id]->name(), + pkt->print(), sf_res.first.size(), sf_res.second); // forward to all snoopers forwardTiming(pkt, InvalidPortID, sf_res.first); @@ -546,22 +550,22 @@ CoherentXBar::recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id) // if we can expect a response, remember how to route it if (!cache_responding && pkt->cacheResponding()) { assert(routeTo.find(pkt->req) == routeTo.end()); - routeTo[pkt->req] = master_port_id; + routeTo[pkt->req] = mem_side_port_id; } - // a snoop request came from a connected slave device (one of - // our master ports), and if it is not coming from the slave + // a snoop request came from a connected CPU-side-port device (one of + // our memory-side ports), and if it is not coming from the CPU-side-port // device responsible for the address range something is // wrong, hence there is nothing further to do as the packet // would be going back to where it came from - assert(findPort(pkt->getAddrRange()) == master_port_id); + assert(findPort(pkt->getAddrRange()) == mem_side_port_id); } bool -CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id) +CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID cpu_side_port_id) { // determine the source port based on the id - ResponsePort* src_port = slavePorts[slave_port_id]; + ResponsePort* src_port = cpuSidePorts[cpu_side_port_id]; // get the destination const auto route_lookup = routeTo.find(pkt->req); @@ -588,8 +592,8 @@ CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id) return false; } } else { - // get the master port that mirrors this slave port internally - RequestPort* snoop_port = snoopRespPorts[slave_port_id]; + // get the memory-side port that mirrors this CPU-side port internally + RequestPort* snoop_port = snoopRespPorts[cpu_side_port_id]; assert(dest_port_id < respLayers.size()); if (!respLayers[dest_port_id]->tryTiming(snoop_port)) { DPRINTF(CoherentXBar, "%s: src %s packet %s BUSY\n", __func__, @@ -629,44 +633,48 @@ CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id) if (snoopFilter) { // update the probe filter so that it can properly track the line - snoopFilter->updateSnoopForward(pkt, *slavePorts[slave_port_id], - *masterPorts[dest_port_id]); + snoopFilter->updateSnoopForward(pkt, + *cpuSidePorts[cpu_side_port_id], + *memSidePorts[dest_port_id]); } bool success M5_VAR_USED = - masterPorts[dest_port_id]->sendTimingSnoopResp(pkt); - pktCount[slave_port_id][dest_port_id]++; - pktSize[slave_port_id][dest_port_id] += pkt_size; + memSidePorts[dest_port_id]->sendTimingSnoopResp(pkt); + pktCount[cpu_side_port_id][dest_port_id]++; + pktSize[cpu_side_port_id][dest_port_id] += pkt_size; assert(success); snoopLayers[dest_port_id]->succeededTiming(packetFinishTime); } else { - // we got a snoop response on one of our slave ports, - // i.e. from a coherent master connected to the crossbar, and + // we got a snoop response on one of our CPU-side ports, + // i.e. from a coherent requestor connected to the crossbar, and // since we created the snoop request as part of recvTiming, // this should now be a normal response again outstandingSnoop.erase(pkt->req); - // this is a snoop response from a coherent master, hence it + // this is a snoop response from a coherent requestor, hence it // should never go back to where the snoop response came from, // but instead to where the original request came from - assert(slave_port_id != dest_port_id); + assert(cpu_side_port_id != dest_port_id); if (snoopFilter) { - // update the probe filter so that it can properly track the line - snoopFilter->updateSnoopResponse(pkt, *slavePorts[slave_port_id], - *slavePorts[dest_port_id]); + // update the probe filter so that it can properly track + // the line + snoopFilter->updateSnoopResponse(pkt, + *cpuSidePorts[cpu_side_port_id], + *cpuSidePorts[dest_port_id]); } DPRINTF(CoherentXBar, "%s: src %s packet %s FWD RESP\n", __func__, src_port->name(), pkt->print()); - // as a normal response, it should go back to a master through - // one of our slave ports, we also pay for any outstanding + // as a normal response, it should go back to a requestor through + // one of our CPU-side ports, we also pay for any outstanding // header latency Tick latency = pkt->headerDelay; pkt->headerDelay = 0; - slavePorts[dest_port_id]->schedTimingResp(pkt, curTick() + latency); + cpuSidePorts[dest_port_id]->schedTimingResp(pkt, + curTick() + latency); respLayers[dest_port_id]->succeededTiming(packetFinishTime); } @@ -684,8 +692,8 @@ CoherentXBar::recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id) void -CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id, - const std::vector& dests) +CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_cpu_side_port_id, + const std::vector& dests) { DPRINTF(CoherentXBar, "%s for %s\n", __func__, pkt->print()); @@ -695,12 +703,12 @@ CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id, unsigned fanout = 0; for (const auto& p: dests) { - // we could have gotten this request from a snooping master - // (corresponding to our own slave port that is also in + // we could have gotten this request from a snooping requestor + // (corresponding to our own CPU-side port that is also in // snoopPorts) and should not send it back to where it came // from - if (exclude_slave_port_id == InvalidPortID || - p->getId() != exclude_slave_port_id) { + if (exclude_cpu_side_port_id == InvalidPortID || + p->getId() != exclude_cpu_side_port_id) { // cache is not allowed to refuse snoop p->sendTimingSnoopReq(pkt); fanout++; @@ -712,20 +720,20 @@ CoherentXBar::forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id, } void -CoherentXBar::recvReqRetry(PortID master_port_id) +CoherentXBar::recvReqRetry(PortID mem_side_port_id) { // responses and snoop responses never block on forwarding them, // so the retry will always be coming from a port to which we // tried to forward a request - reqLayers[master_port_id]->recvRetry(); + reqLayers[mem_side_port_id]->recvRetry(); } Tick -CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, +CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id, MemBackdoorPtr *backdoor) { DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, - slavePorts[slave_port_id]->name(), pkt->print()); + cpuSidePorts[cpu_side_port_id]->name(), pkt->print()); unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0; unsigned int pkt_cmd = pkt->cmdToIndex(); @@ -747,11 +755,12 @@ CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, if (snoopFilter) { // check with the snoop filter where to forward this packet auto sf_res = - snoopFilter->lookupRequest(pkt, *slavePorts[slave_port_id]); + snoopFilter->lookupRequest(pkt, + *cpuSidePorts [cpu_side_port_id]); snoop_response_latency += sf_res.second * clockPeriod(); DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n", - __func__, slavePorts[slave_port_id]->name(), pkt->print(), - sf_res.first.size(), sf_res.second); + __func__, cpuSidePorts[cpu_side_port_id]->name(), + pkt->print(), sf_res.first.size(), sf_res.second); // let the snoop filter know about the success of the send // operation, and do it even before sending it onwards to @@ -768,11 +777,11 @@ CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, if (!sf_res.first.empty()) pkt->setBlockCached(); } else { - snoop_result = forwardAtomic(pkt, slave_port_id, InvalidPortID, - sf_res.first); + snoop_result = forwardAtomic(pkt, cpu_side_port_id, + InvalidPortID, sf_res.first); } } else { - snoop_result = forwardAtomic(pkt, slave_port_id); + snoop_result = forwardAtomic(pkt, cpu_side_port_id); } snoop_response_cmd = snoop_result.first; snoop_response_latency += snoop_result.second; @@ -785,7 +794,7 @@ CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, // even if we had a snoop response, we must continue and also // perform the actual request at the destination - PortID master_port_id = findPort(pkt->getAddrRange()); + PortID mem_side_port_id = findPort(pkt->getAddrRange()); if (sink_packet) { DPRINTF(CoherentXBar, "%s: Not forwarding %s\n", __func__, @@ -800,10 +809,10 @@ CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, } // forward the request to the appropriate destination - auto master = masterPorts[master_port_id]; + auto mem_side_port = memSidePorts[mem_side_port_id]; response_latency = backdoor ? - master->sendAtomicBackdoor(pkt, *backdoor) : - master->sendAtomic(pkt); + mem_side_port->sendAtomicBackdoor(pkt, *backdoor) : + mem_side_port->sendAtomic(pkt); } else { // if it does not need a response we sink the packet above assert(pkt->needsResponse()); @@ -813,14 +822,14 @@ CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, } // stats updates for the request - pktCount[slave_port_id][master_port_id]++; - pktSize[slave_port_id][master_port_id] += pkt_size; + pktCount[cpu_side_port_id][mem_side_port_id]++; + pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size; transDist[pkt_cmd]++; // if lower levels have replied, tell the snoop filter if (!system->bypassCaches() && snoopFilter && pkt->isResponse()) { - snoopFilter->updateResponse(pkt, *slavePorts[slave_port_id]); + snoopFilter->updateResponse(pkt, *cpuSidePorts[cpu_side_port_id]); } // if we got a response from a snooper, restore it here @@ -861,8 +870,8 @@ CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, pkt_cmd = pkt->cmdToIndex(); // stats updates - pktCount[slave_port_id][master_port_id]++; - pktSize[slave_port_id][master_port_id] += pkt_size; + pktCount[cpu_side_port_id][mem_side_port_id]++; + pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size; transDist[pkt_cmd]++; } @@ -872,10 +881,10 @@ CoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, } Tick -CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id) +CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID mem_side_port_id) { DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, - masterPorts[master_port_id]->name(), pkt->print()); + memSidePorts[mem_side_port_id]->name(), pkt->print()); // add the request snoop data unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0; @@ -889,9 +898,9 @@ CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id) auto sf_res = snoopFilter->lookupSnoop(pkt); snoop_response_latency += sf_res.second * clockPeriod(); DPRINTF(CoherentXBar, "%s: src %s packet %s SF size: %i lat: %i\n", - __func__, masterPorts[master_port_id]->name(), pkt->print(), - sf_res.first.size(), sf_res.second); - snoop_result = forwardAtomic(pkt, InvalidPortID, master_port_id, + __func__, memSidePorts[mem_side_port_id]->name(), + pkt->print(), sf_res.first.size(), sf_res.second); + snoop_result = forwardAtomic(pkt, InvalidPortID, mem_side_port_id, sf_res.first); } else { snoop_result = forwardAtomic(pkt, InvalidPortID); @@ -913,9 +922,9 @@ CoherentXBar::recvAtomicSnoop(PacketPtr pkt, PortID master_port_id) } std::pair -CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id, - PortID source_master_port_id, - const std::vector& dests) +CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_cpu_side_port_id, + PortID source_mem_side_port_id, + const std::vector& dests) { // the packet may be changed on snoops, record the original // command to enable us to restore it between snoops so that @@ -930,12 +939,12 @@ CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id, unsigned fanout = 0; for (const auto& p: dests) { - // we could have gotten this request from a snooping master - // (corresponding to our own slave port that is also in + // we could have gotten this request from a snooping memory-side port + // (corresponding to our own CPU-side port that is also in // snoopPorts) and should not send it back to where it came // from - if (exclude_slave_port_id != InvalidPortID && - p->getId() == exclude_slave_port_id) + if (exclude_cpu_side_port_id != InvalidPortID && + p->getId() == exclude_cpu_side_port_id) continue; Tick latency = p->sendAtomicSnoop(pkt); @@ -959,16 +968,16 @@ CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id, if (snoopFilter) { // Handle responses by the snoopers and differentiate between // responses to requests from above and snoops from below - if (source_master_port_id != InvalidPortID) { + if (source_mem_side_port_id != InvalidPortID) { // Getting a response for a snoop from below - assert(exclude_slave_port_id == InvalidPortID); + assert(exclude_cpu_side_port_id == InvalidPortID); snoopFilter->updateSnoopForward(pkt, *p, - *masterPorts[source_master_port_id]); + *memSidePorts[source_mem_side_port_id]); } else { // Getting a response for a request from above - assert(source_master_port_id == InvalidPortID); + assert(source_mem_side_port_id == InvalidPortID); snoopFilter->updateSnoopResponse(pkt, *p, - *slavePorts[exclude_slave_port_id]); + *cpuSidePorts[exclude_cpu_side_port_id]); } } // restore original packet state for remaining snoopers @@ -984,24 +993,25 @@ CoherentXBar::forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id, } void -CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id) +CoherentXBar::recvFunctional(PacketPtr pkt, PortID cpu_side_port_id) { if (!pkt->isPrint()) { // don't do DPRINTFs on PrintReq as it clutters up the output DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, - slavePorts[slave_port_id]->name(), pkt->print()); + cpuSidePorts[cpu_side_port_id]->name(), pkt->print()); } if (!system->bypassCaches()) { // forward to all snoopers but the source - forwardFunctional(pkt, slave_port_id); + forwardFunctional(pkt, cpu_side_port_id); } // there is no need to continue if the snooping has found what we // were looking for and the packet is already a response if (!pkt->isResponse()) { - // since our slave ports are queued ports we need to check them as well - for (const auto& p : slavePorts) { + // since our CPU-side ports are queued ports we need to check + // them as well + for (const auto& p : cpuSidePorts) { // if we find a response that has the data, then the // downstream caches/memories may be out of date, so simply stop // here @@ -1014,20 +1024,20 @@ CoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id) PortID dest_id = findPort(pkt->getAddrRange()); - masterPorts[dest_id]->sendFunctional(pkt); + memSidePorts[dest_id]->sendFunctional(pkt); } } void -CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id) +CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID mem_side_port_id) { if (!pkt->isPrint()) { // don't do DPRINTFs on PrintReq as it clutters up the output DPRINTF(CoherentXBar, "%s: src %s packet %s\n", __func__, - masterPorts[master_port_id]->name(), pkt->print()); + memSidePorts[mem_side_port_id]->name(), pkt->print()); } - for (const auto& p : slavePorts) { + for (const auto& p : cpuSidePorts) { if (p->trySatisfyFunctional(pkt)) { if (pkt->needsResponse()) pkt->makeResponse(); @@ -1040,18 +1050,18 @@ CoherentXBar::recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id) } void -CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id) +CoherentXBar::forwardFunctional(PacketPtr pkt, PortID exclude_cpu_side_port_id) { // snoops should only happen if the system isn't bypassing caches assert(!system->bypassCaches()); for (const auto& p: snoopPorts) { - // we could have gotten this request from a snooping master - // (corresponding to our own slave port that is also in + // we could have gotten this request from a snooping requestor + // (corresponding to our own CPU-side port that is also in // snoopPorts) and should not send it back to where it came // from - if (exclude_slave_port_id == InvalidPortID || - p->getId() != exclude_slave_port_id) + if (exclude_cpu_side_port_id == InvalidPortID || + p->getId() != exclude_cpu_side_port_id) p->sendFunctionalSnoop(pkt); // if we get a response we are done diff --git a/src/mem/coherent_xbar.hh b/src/mem/coherent_xbar.hh index 300fc0c40..81e2dc4cd 100644 --- a/src/mem/coherent_xbar.hh +++ b/src/mem/coherent_xbar.hh @@ -55,7 +55,7 @@ /** * A coherent crossbar connects a number of (potentially) snooping - * masters and slaves, and routes the request and response packets + * requestors and responders, and routes the request and response packets * based on the address, and also forwards all requests to the * snoopers and deals with the snoop responses. * @@ -78,11 +78,11 @@ class CoherentXBar : public BaseXBar std::vector snoopLayers; /** - * Declaration of the coherent crossbar slave port type, one will - * be instantiated for each of the master ports connecting to the + * Declaration of the coherent crossbar CPU-side port type, one will + * be instantiated for each of the mem_side_ports connecting to the * crossbar. */ - class CoherentXBarSlavePort : public QueuedSlavePort + class CoherentXBarResponsePort : public QueuedResponsePort { private: @@ -95,9 +95,9 @@ class CoherentXBar : public BaseXBar public: - CoherentXBarSlavePort(const std::string &_name, + CoherentXBarResponsePort(const std::string &_name, CoherentXBar &_xbar, PortID _id) - : QueuedSlavePort(_name, &_xbar, queue, _id), xbar(_xbar), + : QueuedResponsePort(_name, &_xbar, queue, _id), xbar(_xbar), queue(_xbar, *this) { } @@ -142,11 +142,11 @@ class CoherentXBar : public BaseXBar }; /** - * Declaration of the coherent crossbar master port type, one will be - * instantiated for each of the slave interfaces connecting to the + * Declaration of the coherent crossbar memory-side port type, one will be + * instantiated for each of the CPU-side-port interfaces connecting to the * crossbar. */ - class CoherentXBarMasterPort : public RequestPort + class CoherentXBarRequestPort : public RequestPort { private: /** A reference to the crossbar to which this port belongs. */ @@ -154,7 +154,7 @@ class CoherentXBar : public BaseXBar public: - CoherentXBarMasterPort(const std::string &_name, + CoherentXBarRequestPort(const std::string &_name, CoherentXBar &_xbar, PortID _id) : RequestPort(_name, &_xbar, _id), xbar(_xbar) { } @@ -163,7 +163,7 @@ class CoherentXBar : public BaseXBar /** * Determine if this port should be considered a snooper. For - * a coherent crossbar master port this is always true. + * a coherent crossbar memory-side port this is always true. * * @return a boolean that is true if this port is snooping */ @@ -200,8 +200,8 @@ class CoherentXBar : public BaseXBar /** * Internal class to bridge between an incoming snoop response - * from a slave port and forwarding it through an outgoing slave - * port. It is effectively a dangling master port. + * from a CPU-side port and forwarding it through an outgoing + * CPU-side port. It is effectively a dangling memory-side port. */ class SnoopRespPort : public RequestPort { @@ -209,26 +209,27 @@ class CoherentXBar : public BaseXBar private: /** The port which we mirror internally. */ - QueuedSlavePort& slavePort; + QueuedResponsePort& cpuSidePort; public: /** - * Create a snoop response port that mirrors a given slave port. + * Create a snoop response port that mirrors a given CPU-side port. */ - SnoopRespPort(QueuedSlavePort& slave_port, CoherentXBar& _xbar) : - RequestPort(slave_port.name() + ".snoopRespPort", &_xbar), - slavePort(slave_port) { } + SnoopRespPort(QueuedResponsePort& cpu_side_port, + CoherentXBar& _xbar) : + RequestPort(cpu_side_port.name() + ".snoopRespPort", &_xbar), + cpuSidePort(cpu_side_port) { } /** * Override the sending of retries and pass them on through - * the mirrored slave port. + * the mirrored CPU-side port. */ void sendRetryResp() override { // forward it as a snoop response retry - slavePort.sendRetrySnoopResp(); + cpuSidePort.sendRetrySnoopResp(); } void @@ -247,7 +248,7 @@ class CoherentXBar : public BaseXBar std::vector snoopRespPorts; - std::vector snoopPorts; + std::vector snoopPorts; /** * Store the outstanding requests that we are expecting snoop @@ -294,94 +295,95 @@ class CoherentXBar : public BaseXBar */ std::unique_ptr pendingDelete; - bool recvTimingReq(PacketPtr pkt, PortID slave_port_id); - bool recvTimingResp(PacketPtr pkt, PortID master_port_id); - void recvTimingSnoopReq(PacketPtr pkt, PortID master_port_id); - bool recvTimingSnoopResp(PacketPtr pkt, PortID slave_port_id); - void recvReqRetry(PortID master_port_id); + bool recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id); + bool recvTimingResp(PacketPtr pkt, PortID mem_side_port_id); + void recvTimingSnoopReq(PacketPtr pkt, PortID mem_side_port_id); + bool recvTimingSnoopResp(PacketPtr pkt, PortID cpu_side_port_id); + void recvReqRetry(PortID mem_side_port_id); /** * Forward a timing packet to our snoopers, potentially excluding - * one of the connected coherent masters to avoid sending a packet + * one of the connected coherent requestors to avoid sending a packet * back to where it came from. * * @param pkt Packet to forward - * @param exclude_slave_port_id Id of slave port to exclude + * @param exclude_cpu_side_port_id Id of CPU-side port to exclude */ void - forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id) + forwardTiming(PacketPtr pkt, PortID exclude_cpu_side_port_id) { - forwardTiming(pkt, exclude_slave_port_id, snoopPorts); + forwardTiming(pkt, exclude_cpu_side_port_id, snoopPorts); } /** * Forward a timing packet to a selected list of snoopers, potentially - * excluding one of the connected coherent masters to avoid sending a packet - * back to where it came from. + * excluding one of the connected coherent requestors to avoid sending + * a packet back to where it came from. * * @param pkt Packet to forward - * @param exclude_slave_port_id Id of slave port to exclude + * @param exclude_cpu_side_port_id Id of CPU-side port to exclude * @param dests Vector of destination ports for the forwarded pkt */ - void forwardTiming(PacketPtr pkt, PortID exclude_slave_port_id, - const std::vector& dests); + void forwardTiming(PacketPtr pkt, PortID exclude_cpu_side_port_id, + const std::vector& dests); - Tick recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, + Tick recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id, MemBackdoorPtr *backdoor=nullptr); - Tick recvAtomicSnoop(PacketPtr pkt, PortID master_port_id); + Tick recvAtomicSnoop(PacketPtr pkt, PortID mem_side_port_id); /** * Forward an atomic packet to our snoopers, potentially excluding - * one of the connected coherent masters to avoid sending a packet + * one of the connected coherent requestors to avoid sending a packet * back to where it came from. * * @param pkt Packet to forward - * @param exclude_slave_port_id Id of slave port to exclude + * @param exclude_cpu_side_port_id Id of CPU-side port to exclude * * @return a pair containing the snoop response and snoop latency */ std::pair - forwardAtomic(PacketPtr pkt, PortID exclude_slave_port_id) + forwardAtomic(PacketPtr pkt, PortID exclude_cpu_side_port_id) { - return forwardAtomic(pkt, exclude_slave_port_id, InvalidPortID, + return forwardAtomic(pkt, exclude_cpu_side_port_id, InvalidPortID, snoopPorts); } /** * Forward an atomic packet to a selected list of snoopers, potentially - * excluding one of the connected coherent masters to avoid sending a packet - * back to where it came from. + * excluding one of the connected coherent requestors to avoid sending a + * packet back to where it came from. * * @param pkt Packet to forward - * @param exclude_slave_port_id Id of slave port to exclude - * @param source_master_port_id Id of the master port for snoops from below + * @param exclude_cpu_side_port_id Id of CPU-side port to exclude + * @param source_mem_side_port_id Id of the memory-side port for + * snoops from below * @param dests Vector of destination ports for the forwarded pkt * * @return a pair containing the snoop response and snoop latency */ std::pair forwardAtomic(PacketPtr pkt, - PortID exclude_slave_port_id, - PortID source_master_port_id, - const std::vector& + PortID exclude_cpu_side_port_id, + PortID source_mem_side_port_id, + const std::vector& dests); - /** Function called by the port when the crossbar is recieving a Functional + /** Function called by the port when the crossbar is receiving a Functional transaction.*/ - void recvFunctional(PacketPtr pkt, PortID slave_port_id); + void recvFunctional(PacketPtr pkt, PortID cpu_side_port_id); - /** Function called by the port when the crossbar is recieving a functional + /** Function called by the port when the crossbar is receiving a functional snoop transaction.*/ - void recvFunctionalSnoop(PacketPtr pkt, PortID master_port_id); + void recvFunctionalSnoop(PacketPtr pkt, PortID mem_side_port_id); /** * Forward a functional packet to our snoopers, potentially - * excluding one of the connected coherent masters to avoid + * excluding one of the connected coherent requestors to avoid * sending a packet back to where it came from. * * @param pkt Packet to forward - * @param exclude_slave_port_id Id of slave port to exclude + * @param exclude_cpu_side_port_id Id of CPU-side port to exclude */ - void forwardFunctional(PacketPtr pkt, PortID exclude_slave_port_id); + void forwardFunctional(PacketPtr pkt, PortID exclude_cpu_side_port_id); /** * Determine if the crossbar should sink the packet, as opposed to diff --git a/src/mem/comm_monitor.cc b/src/mem/comm_monitor.cc index e9a5d2c57..14df955c1 100644 --- a/src/mem/comm_monitor.cc +++ b/src/mem/comm_monitor.cc @@ -45,8 +45,8 @@ CommMonitor::CommMonitor(Params* params) : SimObject(params), - masterPort(name() + "-master", *this), - slavePort(name() + "-slave", *this), + memSidePort(name() + "-mem_side_port", *this), + cpuSidePort(name() + "-cpu_side_port", *this), samplePeriodicEvent([this]{ samplePeriodic(); }, name()), samplePeriodTicks(params->sample_period), samplePeriod(params->sample_period / SimClock::Float::s), @@ -67,7 +67,7 @@ void CommMonitor::init() { // make sure both sides of the monitor are connected - if (!slavePort.isConnected() || !masterPort.isConnected()) + if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) fatal("Communication monitor is not connected on both sides.\n"); } @@ -81,10 +81,10 @@ CommMonitor::regProbePoints() Port & CommMonitor::getPort(const std::string &if_name, PortID idx) { - if (if_name == "master") { - return masterPort; - } else if (if_name == "slave") { - return slavePort; + if (if_name == "mem_side_port") { + return memSidePort; + } else if (if_name == "cpu_side_port") { + return cpuSidePort; } else { return SimObject::getPort(if_name, idx); } @@ -93,13 +93,13 @@ CommMonitor::getPort(const std::string &if_name, PortID idx) void CommMonitor::recvFunctional(PacketPtr pkt) { - masterPort.sendFunctional(pkt); + memSidePort.sendFunctional(pkt); } void CommMonitor::recvFunctionalSnoop(PacketPtr pkt) { - slavePort.sendFunctionalSnoop(pkt); + cpuSidePort.sendFunctionalSnoop(pkt); } CommMonitor::MonitorStats::MonitorStats(Stats::Group *parent, @@ -344,7 +344,7 @@ CommMonitor::recvAtomic(PacketPtr pkt) ProbePoints::PacketInfo req_pkt_info(pkt); ppPktReq->notify(req_pkt_info); - const Tick delay(masterPort.sendAtomic(pkt)); + const Tick delay(memSidePort.sendAtomic(pkt)); stats.updateReqStats(req_pkt_info, true, expects_response); if (expects_response) @@ -360,7 +360,7 @@ CommMonitor::recvAtomic(PacketPtr pkt) Tick CommMonitor::recvAtomicSnoop(PacketPtr pkt) { - return slavePort.sendAtomicSnoop(pkt); + return cpuSidePort.sendAtomicSnoop(pkt); } bool @@ -385,7 +385,7 @@ CommMonitor::recvTimingReq(PacketPtr pkt) } // Attempt to send the packet - bool successful = masterPort.sendTimingReq(pkt); + bool successful = memSidePort.sendTimingReq(pkt); // If not successful, restore the sender state if (!successful && expects_response && !stats.disableLatencyHists) { @@ -428,7 +428,7 @@ CommMonitor::recvTimingResp(PacketPtr pkt) } // Attempt to send the packet - bool successful = slavePort.sendTimingResp(pkt); + bool successful = cpuSidePort.sendTimingResp(pkt); if (!stats.disableLatencyHists) { // If packet successfully send, sample value of latency, @@ -456,57 +456,57 @@ CommMonitor::recvTimingResp(PacketPtr pkt) void CommMonitor::recvTimingSnoopReq(PacketPtr pkt) { - slavePort.sendTimingSnoopReq(pkt); + cpuSidePort.sendTimingSnoopReq(pkt); } bool CommMonitor::recvTimingSnoopResp(PacketPtr pkt) { - return masterPort.sendTimingSnoopResp(pkt); + return memSidePort.sendTimingSnoopResp(pkt); } void CommMonitor::recvRetrySnoopResp() { - slavePort.sendRetrySnoopResp(); + cpuSidePort.sendRetrySnoopResp(); } bool CommMonitor::isSnooping() const { - // check if the connected master port is snooping - return slavePort.isSnooping(); + // check if the connected request port is snooping + return cpuSidePort.isSnooping(); } AddrRangeList CommMonitor::getAddrRanges() const { - // get the address ranges of the connected slave port - return masterPort.getAddrRanges(); + // get the address ranges of the connected CPU-side port + return memSidePort.getAddrRanges(); } void CommMonitor::recvReqRetry() { - slavePort.sendRetryReq(); + cpuSidePort.sendRetryReq(); } void CommMonitor::recvRespRetry() { - masterPort.sendRetryResp(); + memSidePort.sendRetryResp(); } bool CommMonitor::tryTiming(PacketPtr pkt) { - return masterPort.tryTiming(pkt); + return memSidePort.tryTiming(pkt); } void CommMonitor::recvRangeChange() { - slavePort.sendRangeChange(); + cpuSidePort.sendRangeChange(); } void diff --git a/src/mem/comm_monitor.hh b/src/mem/comm_monitor.hh index 33cf4d8ec..ed6936246 100644 --- a/src/mem/comm_monitor.hh +++ b/src/mem/comm_monitor.hh @@ -112,17 +112,17 @@ class CommMonitor : public SimObject }; /** - * This is the master port of the communication monitor. All recv + * This is the request port of the communication monitor. All recv * functions call a function in CommMonitor, where the - * send function of the slave port is called. Besides this, these + * send function of the CPU-side port is called. Besides this, these * functions can also perform actions for capturing statistics. */ - class MonitorMasterPort : public RequestPort + class MonitorRequestPort : public RequestPort { public: - MonitorMasterPort(const std::string& _name, CommMonitor& _mon) + MonitorRequestPort(const std::string& _name, CommMonitor& _mon) : RequestPort(_name, &_mon), mon(_mon) { } @@ -174,21 +174,21 @@ class CommMonitor : public SimObject }; - /** Instance of master port, facing the memory side */ - MonitorMasterPort masterPort; + /** Instance of request port, facing the memory side */ + MonitorRequestPort memSidePort; /** - * This is the slave port of the communication monitor. All recv + * This is the CPU-side port of the communication monitor. All recv * functions call a function in CommMonitor, where the - * send function of the master port is called. Besides this, these + * send function of the request port is called. Besides this, these * functions can also perform actions for capturing statistics. */ - class MonitorSlavePort : public ResponsePort + class MonitorResponsePort : public ResponsePort { public: - MonitorSlavePort(const std::string& _name, CommMonitor& _mon) + MonitorResponsePort(const std::string& _name, CommMonitor& _mon) : ResponsePort(_name, &_mon), mon(_mon) { } @@ -235,8 +235,8 @@ class CommMonitor : public SimObject }; - /** Instance of slave port, i.e. on the CPU side */ - MonitorSlavePort slavePort; + /** Instance of response port, i.e. on the CPU side */ + MonitorResponsePort cpuSidePort; void recvFunctional(PacketPtr pkt); diff --git a/src/mem/dramsim2.cc b/src/mem/dramsim2.cc index 2307df061..b09138cef 100644 --- a/src/mem/dramsim2.cc +++ b/src/mem/dramsim2.cc @@ -254,7 +254,7 @@ DRAMSim2::accessAndRespond(PacketPtr pkt) // response access(pkt); - // turn packet around to go back to requester if response expected + // turn packet around to go back to requestor if response expected if (needsResponse) { // access already turned the packet into a response assert(pkt->isResponse()); @@ -352,7 +352,7 @@ DRAMSim2::drain() DRAMSim2::MemoryPort::MemoryPort(const std::string& _name, DRAMSim2& _memory) - : SlavePort(_name, &_memory), memory(_memory) + : ResponsePort(_name, &_memory), memory(_memory) { } AddrRangeList diff --git a/src/mem/dramsim2.hh b/src/mem/dramsim2.hh index 9503d9b41..3259e8513 100644 --- a/src/mem/dramsim2.hh +++ b/src/mem/dramsim2.hh @@ -59,7 +59,7 @@ class DRAMSim2 : public AbstractMemory * having unbounded storage that is implicitly created in the port * itself. */ - class MemoryPort : public SlavePort + class MemoryPort : public ResponsePort { private: diff --git a/src/mem/dramsim3.cc b/src/mem/dramsim3.cc index fc2eaca21..4b2f79676 100644 --- a/src/mem/dramsim3.cc +++ b/src/mem/dramsim3.cc @@ -258,7 +258,7 @@ DRAMsim3::accessAndRespond(PacketPtr pkt) // response access(pkt); - // turn packet around to go back to requester if response expected + // turn packet around to go back to requestor if response expected if (needsResponse) { // access already turned the packet into a response assert(pkt->isResponse()); @@ -352,7 +352,7 @@ DRAMsim3::drain() DRAMsim3::MemoryPort::MemoryPort(const std::string& _name, DRAMsim3& _memory) - : SlavePort(_name, &_memory), memory(_memory) + : ResponsePort(_name, &_memory), memory(_memory) { } AddrRangeList diff --git a/src/mem/dramsim3.hh b/src/mem/dramsim3.hh index 1b4a8a6f5..fc3cd1a28 100644 --- a/src/mem/dramsim3.hh +++ b/src/mem/dramsim3.hh @@ -61,7 +61,7 @@ class DRAMsim3 : public AbstractMemory * having unbounded storage that is implicitly created in the port * itself. */ - class MemoryPort : public SlavePort + class MemoryPort : public ResponsePort { private: diff --git a/src/mem/external_master.cc b/src/mem/external_master.cc index 1a1ec4092..81b7a52c3 100644 --- a/src/mem/external_master.cc +++ b/src/mem/external_master.cc @@ -53,7 +53,7 @@ ExternalMaster::ExternalMaster(ExternalMasterParams *params) : portName(params->name + ".port"), portType(params->port_type), portData(params->port_data), - masterId(params->system->getMasterId(this)) + id(params->system->getRequestorId(this)) {} Port & diff --git a/src/mem/external_master.hh b/src/mem/external_master.hh index d7aec112f..a4d5b031c 100644 --- a/src/mem/external_master.hh +++ b/src/mem/external_master.hh @@ -39,7 +39,7 @@ * @file * * ExternalMaster is a memory object representing a binding from - * a gem5 slave to a master port in a system external to gem5. + * a gem5 responder to a request port in a system external to gem5. * * During initialisation, a `handler' for the port type specified in the * port's port_type parameter is found from the registered port handlers @@ -128,7 +128,7 @@ class ExternalMaster : public SimObject void init() override; - const MasterID masterId; + const RequestorID id; }; diff --git a/src/mem/external_slave.hh b/src/mem/external_slave.hh index 0a5e01a48..c0f01f85e 100644 --- a/src/mem/external_slave.hh +++ b/src/mem/external_slave.hh @@ -39,7 +39,7 @@ * @file * * ExternalSlave is a memory object representing a binding from - * a gem5 master to a slave port in a system external to gem5. + * a gem5 requestor to a response port in a system external to gem5. * * During initialisation, a `handler' for the port type specified in the * port's port_type parameter is found from the registered port handlers diff --git a/src/mem/hmc_controller.cc b/src/mem/hmc_controller.cc index e7b3f6a4f..20f672535 100644 --- a/src/mem/hmc_controller.cc +++ b/src/mem/hmc_controller.cc @@ -6,10 +6,10 @@ HMCController::HMCController(const HMCControllerParams* p) : NoncoherentXBar(p), - n_master_ports(p->port_master_connection_count), + numMemSidePorts(p->port_mem_side_ports_connection_count), rr_counter(0) { - assert(p->port_slave_connection_count == 1); + assert(p->port_cpu_side_ports_connection_count == 1); } HMCController* @@ -18,43 +18,43 @@ HMCControllerParams::create() return new HMCController(this); } -// Since this module is a load distributor, all its master ports have the same +// Since this module is a load distributor, all its request ports have the same // range so we should keep only one of the ranges and ignore the others -void HMCController::recvRangeChange(PortID master_port_id) +void HMCController::recvRangeChange(PortID mem_side_port_id) { - if (master_port_id == 0) + if (mem_side_port_id == 0) { gotAllAddrRanges = true; - BaseXBar::recvRangeChange(master_port_id); + BaseXBar::recvRangeChange(mem_side_port_id); } else - gotAddrRanges[master_port_id] = true; + gotAddrRanges[mem_side_port_id] = true; } int HMCController::rotate_counter() { int current_value = rr_counter; rr_counter++; - if (rr_counter == n_master_ports) + if (rr_counter == numMemSidePorts) rr_counter = 0; return current_value; } -bool HMCController::recvTimingReq(PacketPtr pkt, PortID slave_port_id) +bool HMCController::recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id) { // determine the source port based on the id - ResponsePort *src_port = slavePorts[slave_port_id]; + ResponsePort *src_port = cpuSidePorts[cpu_side_port_id]; // we should never see express snoops on a non-coherent component assert(!pkt->isExpressSnoop()); // For now, this is a simple round robin counter, for distribution the // load among the serial links - PortID master_port_id = rotate_counter(); + PortID mem_side_port_id = rotate_counter(); // test if the layer should be considered occupied for the current // port - if (!reqLayers[master_port_id]->tryTiming(src_port)) { + if (!reqLayers[mem_side_port_id]->tryTiming(src_port)) { DPRINTF(HMCController, "recvTimingReq: src %s %s 0x%x BUSY\n", src_port->name(), pkt->cmdString(), pkt->getAddr()); return false; @@ -86,7 +86,7 @@ bool HMCController::recvTimingReq(PacketPtr pkt, PortID slave_port_id) !pkt->cacheResponding(); // since it is a normal request, attempt to send the packet - bool success = masterPorts[master_port_id]->sendTimingReq(pkt); + bool success = memSidePorts[mem_side_port_id]->sendTimingReq(pkt); if (!success) { DPRINTF(HMCController, "recvTimingReq: src %s %s 0x%x RETRY\n", @@ -96,7 +96,7 @@ bool HMCController::recvTimingReq(PacketPtr pkt, PortID slave_port_id) pkt->headerDelay = old_header_delay; // occupy until the header is sent - reqLayers[master_port_id]->failedTiming(src_port, + reqLayers[mem_side_port_id]->failedTiming(src_port, clockEdge(Cycles(1))); return false; @@ -105,14 +105,14 @@ bool HMCController::recvTimingReq(PacketPtr pkt, PortID slave_port_id) // remember where to route the response to if (expect_response) { assert(routeTo.find(pkt->req) == routeTo.end()); - routeTo[pkt->req] = slave_port_id; + routeTo[pkt->req] = cpu_side_port_id; } - reqLayers[master_port_id]->succeededTiming(packetFinishTime); + reqLayers[mem_side_port_id]->succeededTiming(packetFinishTime); // stats updates - pktCount[slave_port_id][master_port_id]++; - pktSize[slave_port_id][master_port_id] += pkt_size; + pktCount[cpu_side_port_id][mem_side_port_id]++; + pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size; transDist[pkt_cmd]++; return true; diff --git a/src/mem/hmc_controller.hh b/src/mem/hmc_controller.hh index 2ab3101aa..8206ee007 100644 --- a/src/mem/hmc_controller.hh +++ b/src/mem/hmc_controller.hh @@ -80,14 +80,14 @@ private: // Receive range change only on one of the ports (because they all have // the same range) - virtual void recvRangeChange(PortID master_port_id); + virtual void recvRangeChange(PortID mem_side_port_id); - // Receive a request and distribute it among slave ports + // Receive a request and distribute it among response ports // Simply forwards the packet to the next serial link based on a // Round-robin counter - virtual bool recvTimingReq(PacketPtr pkt, PortID slave_port_id); + virtual bool recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id); - int n_master_ports; + int numMemSidePorts; // The round-robin counter int rr_counter; diff --git a/src/mem/mem_checker_monitor.cc b/src/mem/mem_checker_monitor.cc index f5800cfb1..82ca83b09 100644 --- a/src/mem/mem_checker_monitor.cc +++ b/src/mem/mem_checker_monitor.cc @@ -46,8 +46,8 @@ MemCheckerMonitor::MemCheckerMonitor(Params* params) : SimObject(params), - masterPort(name() + "-master", *this), - slavePort(name() + "-slave", *this), + memSidePort(name() + "-memSidePort", *this), + cpuSidePort(name() + "-cpuSidePort", *this), warnOnly(params->warn_only), memchecker(params->memchecker) {} @@ -65,17 +65,17 @@ void MemCheckerMonitor::init() { // make sure both sides of the monitor are connected - if (!slavePort.isConnected() || !masterPort.isConnected()) + if (!cpuSidePort.isConnected() || !memSidePort.isConnected()) fatal("Communication monitor is not connected on both sides.\n"); } Port & MemCheckerMonitor::getPort(const std::string &if_name, PortID idx) { - if (if_name == "master" || if_name == "mem_side") { - return masterPort; - } else if (if_name == "slave" || if_name == "cpu_side") { - return slavePort; + if (if_name == "request" || if_name == "mem_side_port") { + return memSidePort; + } else if (if_name == "response" || if_name == "cpu_side_port") { + return cpuSidePort; } else { return SimObject::getPort(if_name, idx); } @@ -92,7 +92,7 @@ MemCheckerMonitor::recvFunctional(PacketPtr pkt) // reads/writes to these location from other devices we do not see. memchecker->reset(addr, size); - masterPort.sendFunctional(pkt); + memSidePort.sendFunctional(pkt); DPRINTF(MemCheckerMonitor, "Forwarded functional access: addr = %#llx, size = %d\n", @@ -108,7 +108,7 @@ MemCheckerMonitor::recvFunctionalSnoop(PacketPtr pkt) // See above. memchecker->reset(addr, size); - slavePort.sendFunctionalSnoop(pkt); + cpuSidePort.sendFunctionalSnoop(pkt); DPRINTF(MemCheckerMonitor, "Received functional snoop: addr = %#llx, size = %d\n", @@ -164,7 +164,7 @@ MemCheckerMonitor::recvTimingReq(PacketPtr pkt) } // Attempt to send the packet - bool successful = masterPort.sendTimingReq(pkt); + bool successful = memSidePort.sendTimingReq(pkt); // If not successful, restore the sender state if (!successful && expects_response && (is_read || is_write)) { @@ -180,9 +180,9 @@ MemCheckerMonitor::recvTimingReq(PacketPtr pkt) // At the time where we push the sender-state, we do not yet know // the serial the MemChecker class will assign to this request. We // cannot call startRead at the time we push the sender-state, as - // the masterPort may not be successful in executing sendTimingReq, - // and in case of a failure, we must not modify the state of the - // MemChecker. + // the memSidePort may not be successful in executing + // sendTimingReq, and in case of a failure, we must not + // modify the state of the MemChecker. // // Once we know that sendTimingReq was successful, we can set the // serial of the newly constructed sender-state. This is legal, as @@ -256,7 +256,7 @@ MemCheckerMonitor::recvTimingResp(PacketPtr pkt) } // Attempt to send the packet - bool successful = slavePort.sendTimingResp(pkt); + bool successful = cpuSidePort.sendTimingResp(pkt); // If packet successfully send, complete transaction in MemChecker // instance, and delete sender state, otherwise restore state. @@ -318,43 +318,43 @@ MemCheckerMonitor::recvTimingResp(PacketPtr pkt) void MemCheckerMonitor::recvTimingSnoopReq(PacketPtr pkt) { - slavePort.sendTimingSnoopReq(pkt); + cpuSidePort.sendTimingSnoopReq(pkt); } bool MemCheckerMonitor::recvTimingSnoopResp(PacketPtr pkt) { - return masterPort.sendTimingSnoopResp(pkt); + return memSidePort.sendTimingSnoopResp(pkt); } bool MemCheckerMonitor::isSnooping() const { - // check if the connected master port is snooping - return slavePort.isSnooping(); + // check if the connected memSidePort is snooping + return cpuSidePort.isSnooping(); } AddrRangeList MemCheckerMonitor::getAddrRanges() const { - // get the address ranges of the connected slave port - return masterPort.getAddrRanges(); + // get the address ranges of the connected cpuSidePort + return memSidePort.getAddrRanges(); } void MemCheckerMonitor::recvReqRetry() { - slavePort.sendRetryReq(); + cpuSidePort.sendRetryReq(); } void MemCheckerMonitor::recvRespRetry() { - masterPort.sendRetryResp(); + memSidePort.sendRetryResp(); } void MemCheckerMonitor::recvRangeChange() { - slavePort.sendRangeChange(); + cpuSidePort.sendRangeChange(); } diff --git a/src/mem/mem_checker_monitor.hh b/src/mem/mem_checker_monitor.hh index c2fb80d59..8e5dab1a2 100644 --- a/src/mem/mem_checker_monitor.hh +++ b/src/mem/mem_checker_monitor.hh @@ -83,17 +83,17 @@ class MemCheckerMonitor : public SimObject }; /** - * This is the master port of the communication monitor. All recv + * This is the request port of the communication monitor. All recv * functions call a function in MemCheckerMonitor, where the - * send function of the slave port is called. Besides this, these + * send function of the response port is called. Besides this, these * functions can also perform actions for capturing statistics. */ - class MonitorMasterPort : public RequestPort + class MonitorRequestPort : public RequestPort { public: - MonitorMasterPort(const std::string& _name, MemCheckerMonitor& _mon) + MonitorRequestPort(const std::string& _name, MemCheckerMonitor& _mon) : RequestPort(_name, &_mon), mon(_mon) { } @@ -140,21 +140,21 @@ class MemCheckerMonitor : public SimObject }; - /** Instance of master port, facing the memory side */ - MonitorMasterPort masterPort; + /** Instance of request port, facing the memory side */ + MonitorRequestPort memSidePort; /** - * This is the slave port of the communication monitor. All recv + * This is the response port of the communication monitor. All recv * functions call a function in MemCheckerMonitor, where the - * send function of the master port is called. Besides this, these + * send function of the request port is called. Besides this, these * functions can also perform actions for capturing statistics. */ - class MonitorSlavePort : public ResponsePort + class MonitorResponsePort : public ResponsePort { public: - MonitorSlavePort(const std::string& _name, MemCheckerMonitor& _mon) + MonitorResponsePort(const std::string& _name, MemCheckerMonitor& _mon) : ResponsePort(_name, &_mon), mon(_mon) { } @@ -196,8 +196,8 @@ class MemCheckerMonitor : public SimObject }; - /** Instance of slave port, i.e. on the CPU side */ - MonitorSlavePort slavePort; + /** Instance of response port, i.e. on the CPU side */ + MonitorResponsePort cpuSidePort; void recvFunctional(PacketPtr pkt); diff --git a/src/mem/mem_ctrl.cc b/src/mem/mem_ctrl.cc index 66d3c2ab9..1c0d4b1b6 100644 --- a/src/mem/mem_ctrl.cc +++ b/src/mem/mem_ctrl.cc @@ -204,7 +204,7 @@ MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram) base_addr + pkt->getSize()) - addr; stats.readPktSize[ceilLog2(size)]++; stats.readBursts++; - stats.masterReadAccesses[pkt->masterId()]++; + stats.requestorReadAccesses[pkt->requestorId()]++; // First check write buffer to see if the data is already at // the controller @@ -267,7 +267,7 @@ MemCtrl::addToReadQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram) readQueue[mem_pkt->qosValue()].push_back(mem_pkt); // log packet - logRequest(MemCtrl::READ, pkt->masterId(), pkt->qosValue(), + logRequest(MemCtrl::READ, pkt->requestorId(), pkt->qosValue(), mem_pkt->addr, 1); // Update stats @@ -314,7 +314,7 @@ MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram) base_addr + pkt->getSize()) - addr; stats.writePktSize[ceilLog2(size)]++; stats.writeBursts++; - stats.masterWriteAccesses[pkt->masterId()]++; + stats.requestorWriteAccesses[pkt->requestorId()]++; // see if we can merge with an existing item in the write // queue and keep track of whether we have merged or not @@ -341,7 +341,7 @@ MemCtrl::addToWriteQueue(PacketPtr pkt, unsigned int pkt_count, bool is_dram) isInWriteQueue.insert(burstAlign(addr, is_dram)); // log packet - logRequest(MemCtrl::WRITE, pkt->masterId(), pkt->qosValue(), + logRequest(MemCtrl::WRITE, pkt->requestorId(), pkt->qosValue(), mem_pkt->addr, 1); assert(totalWriteQueueSize == isInWriteQueue.size()); @@ -498,7 +498,7 @@ MemCtrl::processRespondEvent() if (mem_pkt->burstHelper->burstsServiced == mem_pkt->burstHelper->burstCount) { // we have now serviced all children packets of a system packet - // so we can now respond to the requester + // so we can now respond to the requestor // @todo we probably want to have a different front end and back // end latency for split packets accessAndRespond(mem_pkt->pkt, frontendLatency + backendLatency); @@ -635,7 +635,7 @@ MemCtrl::accessAndRespond(PacketPtr pkt, Tick static_latency) pkt->print()); } - // turn packet around to go back to requester if response expected + // turn packet around to go back to requestor if response expected if (needsResponse) { // access already turned the packet into a response assert(pkt->isResponse()); @@ -842,13 +842,13 @@ MemCtrl::doBurstAccess(MemPacket* mem_pkt) if (mem_pkt->isRead()) { ++readsThisTime; // Update latency stats - stats.masterReadTotalLat[mem_pkt->masterId()] += + stats.requestorReadTotalLat[mem_pkt->requestorId()] += mem_pkt->readyTime - mem_pkt->entryTime; - stats.masterReadBytes[mem_pkt->masterId()] += mem_pkt->size; + stats.requestorReadBytes[mem_pkt->requestorId()] += mem_pkt->size; } else { ++writesThisTime; - stats.masterWriteBytes[mem_pkt->masterId()] += mem_pkt->size; - stats.masterWriteTotalLat[mem_pkt->masterId()] += + stats.requestorWriteBytes[mem_pkt->requestorId()] += mem_pkt->size; + stats.requestorWriteTotalLat[mem_pkt->requestorId()] += mem_pkt->readyTime - mem_pkt->entryTime; } } @@ -1005,13 +1005,13 @@ MemCtrl::processNextReqEvent() assert(mem_pkt->readyTime >= curTick()); // log the response - logResponse(MemCtrl::READ, (*to_read)->masterId(), + logResponse(MemCtrl::READ, (*to_read)->requestorId(), mem_pkt->qosValue(), mem_pkt->getAddr(), 1, mem_pkt->readyTime - mem_pkt->entryTime); // Insert into response queue. It will be sent back to the - // requester at its readyTime + // requestor at its readyTime if (respQueue.empty()) { assert(!respondEvent.scheduled()); schedule(respondEvent, mem_pkt->readyTime); @@ -1090,7 +1090,7 @@ MemCtrl::processNextReqEvent() isInWriteQueue.erase(burstAlign(mem_pkt->addr, mem_pkt->isDram())); // log the response - logResponse(MemCtrl::WRITE, mem_pkt->masterId(), + logResponse(MemCtrl::WRITE, mem_pkt->requestorId(), mem_pkt->qosValue(), mem_pkt->getAddr(), 1, mem_pkt->readyTime - mem_pkt->entryTime); @@ -1220,24 +1220,24 @@ MemCtrl::CtrlStats::CtrlStats(MemCtrl &_ctrl) ADD_STAT(totGap, "Total gap between requests"), ADD_STAT(avgGap, "Average gap between requests"), - ADD_STAT(masterReadBytes, "Per-master bytes read from memory"), - ADD_STAT(masterWriteBytes, "Per-master bytes write to memory"), - ADD_STAT(masterReadRate, - "Per-master bytes read from memory rate (Bytes/sec)"), - ADD_STAT(masterWriteRate, - "Per-master bytes write to memory rate (Bytes/sec)"), - ADD_STAT(masterReadAccesses, - "Per-master read serviced memory accesses"), - ADD_STAT(masterWriteAccesses, - "Per-master write serviced memory accesses"), - ADD_STAT(masterReadTotalLat, - "Per-master read total memory access latency"), - ADD_STAT(masterWriteTotalLat, - "Per-master write total memory access latency"), - ADD_STAT(masterReadAvgLat, - "Per-master read average memory access latency"), - ADD_STAT(masterWriteAvgLat, - "Per-master write average memory access latency") + ADD_STAT(requestorReadBytes, "Per-requestor bytes read from memory"), + ADD_STAT(requestorWriteBytes, "Per-requestor bytes write to memory"), + ADD_STAT(requestorReadRate, + "Per-requestor bytes read from memory rate (Bytes/sec)"), + ADD_STAT(requestorWriteRate, + "Per-requestor bytes write to memory rate (Bytes/sec)"), + ADD_STAT(requestorReadAccesses, + "Per-requestor read serviced memory accesses"), + ADD_STAT(requestorWriteAccesses, + "Per-requestor write serviced memory accesses"), + ADD_STAT(requestorReadTotalLat, + "Per-requestor read total memory access latency"), + ADD_STAT(requestorWriteTotalLat, + "Per-requestor write total memory access latency"), + ADD_STAT(requestorReadAvgLat, + "Per-requestor read average memory access latency"), + ADD_STAT(requestorWriteAvgLat, + "Per-requestor write average memory access latency") { } @@ -1248,7 +1248,7 @@ MemCtrl::CtrlStats::regStats() using namespace Stats; assert(ctrl.system()); - const auto max_masters = ctrl.system()->maxMasters(); + const auto max_requestors = ctrl.system()->maxRequestors(); avgRdQLen.precision(2); avgWrQLen.precision(2); @@ -1270,60 +1270,60 @@ MemCtrl::CtrlStats::regStats() avgWrBWSys.precision(2); avgGap.precision(2); - // per-master bytes read and written to memory - masterReadBytes - .init(max_masters) + // per-requestor bytes read and written to memory + requestorReadBytes + .init(max_requestors) .flags(nozero | nonan); - masterWriteBytes - .init(max_masters) + requestorWriteBytes + .init(max_requestors) .flags(nozero | nonan); - // per-master bytes read and written to memory rate - masterReadRate + // per-requestor bytes read and written to memory rate + requestorReadRate .flags(nozero | nonan) .precision(12); - masterReadAccesses - .init(max_masters) + requestorReadAccesses + .init(max_requestors) .flags(nozero); - masterWriteAccesses - .init(max_masters) + requestorWriteAccesses + .init(max_requestors) .flags(nozero); - masterReadTotalLat - .init(max_masters) + requestorReadTotalLat + .init(max_requestors) .flags(nozero | nonan); - masterReadAvgLat + requestorReadAvgLat .flags(nonan) .precision(2); - masterWriteRate + requestorWriteRate .flags(nozero | nonan) .precision(12); - masterWriteTotalLat - .init(max_masters) + requestorWriteTotalLat + .init(max_requestors) .flags(nozero | nonan); - masterWriteAvgLat + requestorWriteAvgLat .flags(nonan) .precision(2); - for (int i = 0; i < max_masters; i++) { - const std::string master = ctrl.system()->getMasterName(i); - masterReadBytes.subname(i, master); - masterReadRate.subname(i, master); - masterWriteBytes.subname(i, master); - masterWriteRate.subname(i, master); - masterReadAccesses.subname(i, master); - masterWriteAccesses.subname(i, master); - masterReadTotalLat.subname(i, master); - masterReadAvgLat.subname(i, master); - masterWriteTotalLat.subname(i, master); - masterWriteAvgLat.subname(i, master); + for (int i = 0; i < max_requestors; i++) { + const std::string requestor = ctrl.system()->getRequestorName(i); + requestorReadBytes.subname(i, requestor); + requestorReadRate.subname(i, requestor); + requestorWriteBytes.subname(i, requestor); + requestorWriteRate.subname(i, requestor); + requestorReadAccesses.subname(i, requestor); + requestorWriteAccesses.subname(i, requestor); + requestorReadTotalLat.subname(i, requestor); + requestorReadAvgLat.subname(i, requestor); + requestorWriteTotalLat.subname(i, requestor); + requestorWriteAvgLat.subname(i, requestor); } // Formula stats @@ -1332,10 +1332,10 @@ MemCtrl::CtrlStats::regStats() avgGap = totGap / (readReqs + writeReqs); - masterReadRate = masterReadBytes / simSeconds; - masterWriteRate = masterWriteBytes / simSeconds; - masterReadAvgLat = masterReadTotalLat / masterReadAccesses; - masterWriteAvgLat = masterWriteTotalLat / masterWriteAccesses; + requestorReadRate = requestorReadBytes / simSeconds; + requestorWriteRate = requestorWriteBytes / simSeconds; + requestorReadAvgLat = requestorReadTotalLat / requestorReadAccesses; + requestorWriteAvgLat = requestorWriteTotalLat / requestorWriteAccesses; } void @@ -1421,7 +1421,7 @@ MemCtrl::drainResume() } MemCtrl::MemoryPort::MemoryPort(const std::string& name, MemCtrl& _ctrl) - : QueuedSlavePort(name, &_ctrl, queue), queue(_ctrl, *this, true), + : QueuedResponsePort(name, &_ctrl, queue), queue(_ctrl, *this, true), ctrl(_ctrl) { } diff --git a/src/mem/mem_ctrl.hh b/src/mem/mem_ctrl.hh index 834cb5c5e..2e3cf8c6a 100644 --- a/src/mem/mem_ctrl.hh +++ b/src/mem/mem_ctrl.hh @@ -101,8 +101,8 @@ class MemPacket /** This comes from the outside world */ const PacketPtr pkt; - /** MasterID associated with the packet */ - const MasterID _masterId; + /** RequestorID associated with the packet */ + const RequestorID _requestorId; const bool read; @@ -159,10 +159,10 @@ class MemPacket inline uint8_t qosValue() const { return _qosValue; } /** - * Get the packet MasterID + * Get the packet RequestorID * (interface compatibility with Packet) */ - inline MasterID masterId() const { return _masterId; } + inline RequestorID requestorId() const { return _requestorId; } /** * Get the packet size @@ -197,7 +197,7 @@ class MemPacket uint8_t _bank, uint32_t _row, uint16_t bank_id, Addr _addr, unsigned int _size) : entryTime(curTick()), readyTime(curTick()), pkt(_pkt), - _masterId(pkt->masterId()), + _requestorId(pkt->requestorId()), read(is_read), dram(is_dram), rank(_rank), bank(_bank), row(_row), bankId(bank_id), addr(_addr), size(_size), burstHelper(NULL), _qosValue(_pkt->qosValue()) @@ -237,9 +237,9 @@ class MemCtrl : public QoS::MemCtrl { private: - // For now, make use of a queued slave port to avoid dealing with + // For now, make use of a queued response port to avoid dealing with // flow control for the responses being sent back - class MemoryPort : public QueuedSlavePort + class MemoryPort : public QueuedResponsePort { RespPacketQueue queue; @@ -562,25 +562,25 @@ class MemCtrl : public QoS::MemCtrl Stats::Scalar totGap; Stats::Formula avgGap; - // per-master bytes read and written to memory - Stats::Vector masterReadBytes; - Stats::Vector masterWriteBytes; + // per-requestor bytes read and written to memory + Stats::Vector requestorReadBytes; + Stats::Vector requestorWriteBytes; - // per-master bytes read and written to memory rate - Stats::Formula masterReadRate; - Stats::Formula masterWriteRate; + // per-requestor bytes read and written to memory rate + Stats::Formula requestorReadRate; + Stats::Formula requestorWriteRate; - // per-master read and write serviced memory accesses - Stats::Vector masterReadAccesses; - Stats::Vector masterWriteAccesses; + // per-requestor read and write serviced memory accesses + Stats::Vector requestorReadAccesses; + Stats::Vector requestorWriteAccesses; - // per-master read and write total memory access latency - Stats::Vector masterReadTotalLat; - Stats::Vector masterWriteTotalLat; + // per-requestor read and write total memory access latency + Stats::Vector requestorReadTotalLat; + Stats::Vector requestorWriteTotalLat; - // per-master raed and write average memory access latency - Stats::Formula masterReadAvgLat; - Stats::Formula masterWriteAvgLat; + // per-requestor raed and write average memory access latency + Stats::Formula requestorReadAvgLat; + Stats::Formula requestorWriteAvgLat; }; CtrlStats stats; diff --git a/src/mem/mem_delay.cc b/src/mem/mem_delay.cc index 9b4e7b63e..8120a99d6 100644 --- a/src/mem/mem_delay.cc +++ b/src/mem/mem_delay.cc @@ -42,18 +42,18 @@ MemDelay::MemDelay(const MemDelayParams *p) : ClockedObject(p), - masterPort(name() + "-master", *this), - slavePort(name() + "-slave", *this), - reqQueue(*this, masterPort), - respQueue(*this, slavePort), - snoopRespQueue(*this, masterPort) + requestPort(name() + "-mem_side_port", *this), + responsePort(name() + "-cpu_side_port", *this), + reqQueue(*this, requestPort), + respQueue(*this, responsePort), + snoopRespQueue(*this, requestPort) { } void MemDelay::init() { - if (!slavePort.isConnected() || !masterPort.isConnected()) + if (!responsePort.isConnected() || !requestPort.isConnected()) fatal("Memory delay is not connected on both sides.\n"); } @@ -61,10 +61,10 @@ MemDelay::init() Port & MemDelay::getPort(const std::string &if_name, PortID idx) { - if (if_name == "master") { - return masterPort; - } else if (if_name == "slave") { - return slavePort; + if (if_name == "mem_side_port") { + return requestPort; + } else if (if_name == "cpu_side_port") { + return responsePort; } else { return ClockedObject::getPort(if_name, idx); } @@ -73,12 +73,12 @@ MemDelay::getPort(const std::string &if_name, PortID idx) bool MemDelay::trySatisfyFunctional(PacketPtr pkt) { - return slavePort.trySatisfyFunctional(pkt) || - masterPort.trySatisfyFunctional(pkt); + return responsePort.trySatisfyFunctional(pkt) || + requestPort.trySatisfyFunctional(pkt); } MemDelay::RequestPort::RequestPort(const std::string &_name, MemDelay &_parent) - : QueuedMasterPort(_name, &_parent, + : QueuedRequestPort(_name, &_parent, _parent.reqQueue, _parent.snoopRespQueue), parent(_parent) { @@ -94,7 +94,7 @@ MemDelay::RequestPort::recvTimingResp(PacketPtr pkt) const Tick when = curTick() + parent.delayResp(pkt) + receive_delay; - parent.slavePort.schedTimingResp(pkt, when); + parent.responsePort.schedTimingResp(pkt, when); return true; } @@ -105,7 +105,7 @@ MemDelay::RequestPort::recvFunctionalSnoop(PacketPtr pkt) if (parent.trySatisfyFunctional(pkt)) { pkt->makeResponse(); } else { - parent.slavePort.sendFunctionalSnoop(pkt); + parent.responsePort.sendFunctionalSnoop(pkt); } } @@ -114,19 +114,19 @@ MemDelay::RequestPort::recvAtomicSnoop(PacketPtr pkt) { const Tick delay = parent.delaySnoopResp(pkt); - return delay + parent.slavePort.sendAtomicSnoop(pkt); + return delay + parent.responsePort.sendAtomicSnoop(pkt); } void MemDelay::RequestPort::recvTimingSnoopReq(PacketPtr pkt) { - parent.slavePort.sendTimingSnoopReq(pkt); + parent.responsePort.sendTimingSnoopReq(pkt); } MemDelay::ResponsePort:: ResponsePort(const std::string &_name, MemDelay &_parent) - : QueuedSlavePort(_name, &_parent, _parent.respQueue), + : QueuedResponsePort(_name, &_parent, _parent.respQueue), parent(_parent) { } @@ -136,7 +136,7 @@ MemDelay::ResponsePort::recvAtomic(PacketPtr pkt) { const Tick delay = parent.delayReq(pkt) + parent.delayResp(pkt); - return delay + parent.masterPort.sendAtomic(pkt); + return delay + parent.requestPort.sendAtomic(pkt); } bool @@ -150,7 +150,7 @@ MemDelay::ResponsePort::recvTimingReq(PacketPtr pkt) const Tick when = curTick() + parent.delayReq(pkt) + receive_delay; - parent.masterPort.schedTimingReq(pkt, when); + parent.requestPort.schedTimingReq(pkt, when); return true; } @@ -161,7 +161,7 @@ MemDelay::ResponsePort::recvFunctional(PacketPtr pkt) if (parent.trySatisfyFunctional(pkt)) { pkt->makeResponse(); } else { - parent.masterPort.sendFunctional(pkt); + parent.requestPort.sendFunctional(pkt); } } @@ -170,7 +170,7 @@ MemDelay::ResponsePort::recvTimingSnoopResp(PacketPtr pkt) { const Tick when = curTick() + parent.delaySnoopResp(pkt); - parent.masterPort.schedTimingSnoopResp(pkt, when); + parent.requestPort.schedTimingSnoopResp(pkt, when); return true; } diff --git a/src/mem/mem_delay.hh b/src/mem/mem_delay.hh index d337b3df6..50929eaf5 100644 --- a/src/mem/mem_delay.hh +++ b/src/mem/mem_delay.hh @@ -71,7 +71,7 @@ class MemDelay : public ClockedObject Port &getPort(const std::string &if_name, PortID idx=InvalidPortID) override; - class RequestPort : public QueuedMasterPort + class RequestPort : public QueuedRequestPort { public: RequestPort(const std::string &_name, MemDelay &_parent); @@ -86,18 +86,18 @@ class MemDelay : public ClockedObject void recvTimingSnoopReq(PacketPtr pkt) override; void recvRangeChange() override { - parent.slavePort.sendRangeChange(); + parent.responsePort.sendRangeChange(); } bool isSnooping() const override { - return parent.slavePort.isSnooping(); + return parent.responsePort.isSnooping(); } private: MemDelay& parent; }; - class ResponsePort : public QueuedSlavePort + class ResponsePort : public QueuedResponsePort { public: ResponsePort(const std::string &_name, MemDelay &_parent); @@ -109,7 +109,7 @@ class MemDelay : public ClockedObject bool recvTimingSnoopResp(PacketPtr pkt) override; AddrRangeList getAddrRanges() const override { - return parent.masterPort.getAddrRanges(); + return parent.requestPort.getAddrRanges(); } bool tryTiming(PacketPtr pkt) override { return true; } @@ -122,8 +122,8 @@ class MemDelay : public ClockedObject bool trySatisfyFunctional(PacketPtr pkt); - RequestPort masterPort; - ResponsePort slavePort; + RequestPort requestPort; + ResponsePort responsePort; ReqPacketQueue reqQueue; RespPacketQueue respQueue; diff --git a/src/mem/mem_interface.cc b/src/mem/mem_interface.cc index 307cf922c..d65c5d9db 100644 --- a/src/mem/mem_interface.cc +++ b/src/mem/mem_interface.cc @@ -2265,7 +2265,6 @@ NVMInterface::processReadReadyEvent() } } - bool NVMInterface::burstReady(MemPacket* pkt) const { bool read_rdy = pkt->isRead() && (ctrl->inReadBusState(true)) && diff --git a/src/mem/mem_interface.hh b/src/mem/mem_interface.hh index b67cdb07d..f150f775e 100644 --- a/src/mem/mem_interface.hh +++ b/src/mem/mem_interface.hh @@ -83,7 +83,6 @@ class MemInterface : public AbstractMemory { public: - static const uint32_t NO_ROW = -1; uint32_t openRow; @@ -751,6 +750,7 @@ class DRAMInterface : public MemInterface const Tick wrToRdDlySameBG; const Tick rdToWrDlySameBG; + Enums::PageManage pageMgmt; /** * Max column accesses (read and write) per row, before forefully @@ -1098,7 +1098,6 @@ class NVMInterface : public MemInterface Stats::Histogram pendingWrites; Stats::Histogram bytesPerBank; }; - NVMStats stats; void processWriteRespondEvent(); diff --git a/src/mem/mem_master.hh b/src/mem/mem_master.hh deleted file mode 100644 index 468cd11f1..000000000 --- a/src/mem/mem_master.hh +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Copyright (c) 2018 ARM Limited - * All rights reserved - * - * The license below extends only to copyright in the software and shall - * not be construed as granting a license to any other intellectual - * property including but not limited to intellectual property relating - * to a hardware implementation of the functionality of the software - * licensed hereunder. You may use the software subject to the license - * terms below provided that you ensure that this notice is replicated - * unmodified and in its entirety in all distributions of the software, - * modified or unmodified, in source code or in binary form. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions are - * met: redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer; - * redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution; - * neither the name of the copyright holders nor the names of its - * contributors may be used to endorse or promote products derived from - * this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/** - * @file - * MasterInfo declaration. - */ - -#ifndef __MEM_MEM_MASTER_HH__ -#define __MEM_MEM_MASTER_HH__ - -#include "mem/request.hh" -#include "sim/sim_object.hh" - -/** - * The MasterInfo class contains data about a specific master. - */ -struct MasterInfo -{ - MasterInfo(const SimObject* _obj, - std::string master_name, - MasterID master_id) - : obj(_obj), masterName(master_name), masterId(master_id) - {} - - /** SimObject related to the Master */ - const SimObject* obj; - - /** Master Name */ - std::string masterName; - - /** Master ID */ - MasterID masterId; -}; - -#endif // __MEM_MEM_MASTER_HH__ diff --git a/src/mem/mem_requestor.hh b/src/mem/mem_requestor.hh new file mode 100644 index 000000000..9a52d875d --- /dev/null +++ b/src/mem/mem_requestor.hh @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2018 ARM Limited + * All rights reserved + * + * The license below extends only to copyright in the software and shall + * not be construed as granting a license to any other intellectual + * property including but not limited to intellectual property relating + * to a hardware implementation of the functionality of the software + * licensed hereunder. You may use the software subject to the license + * terms below provided that you ensure that this notice is replicated + * unmodified and in its entirety in all distributions of the software, + * modified or unmodified, in source code or in binary form. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * @file + * RequestorInfo declaration. + */ + +#ifndef __MEM_MEM_REQUESTOR_HH__ +#define __MEM_MEM_REQUESTOR_HH__ + +#include "mem/request.hh" +#include "sim/sim_object.hh" + +/** + * The RequestorInfo class contains data about a specific requestor. + */ +struct RequestorInfo +{ + RequestorInfo(const SimObject* _obj, + std::string requestor_name, + RequestorID requestor_id) + : obj(_obj), req_name(requestor_name), id(requestor_id) + {} + + /** SimObject related to the Requestor */ + const SimObject* obj; + + /** Requestor Name */ + std::string req_name; + + /** Requestor id */ + RequestorID id; +}; + +#endif // __MEM_MEM_REQUESTOR_HH__ diff --git a/src/mem/noncoherent_xbar.cc b/src/mem/noncoherent_xbar.cc index 47be02324..72d894fbb 100644 --- a/src/mem/noncoherent_xbar.cc +++ b/src/mem/noncoherent_xbar.cc @@ -53,34 +53,35 @@ NoncoherentXBar::NoncoherentXBar(const NoncoherentXBarParams *p) : BaseXBar(p) { - // create the ports based on the size of the master and slave - // vector ports, and the presence of the default port, the ports - // are enumerated starting from zero - for (int i = 0; i < p->port_master_connection_count; ++i) { - std::string portName = csprintf("%s.master[%d]", name(), i); - RequestPort* bp = new NoncoherentXBarMasterPort(portName, *this, i); - masterPorts.push_back(bp); + // create the ports based on the size of the memory-side port and + // CPU-side port vector ports, and the presence of the default port, + // the ports are enumerated starting from zero + for (int i = 0; i < p->port_mem_side_ports_connection_count; ++i) { + std::string portName = csprintf("%s.mem_side_port[%d]", name(), i); + RequestPort* bp = new NoncoherentXBarRequestPort(portName, *this, i); + memSidePorts.push_back(bp); reqLayers.push_back(new ReqLayer(*bp, *this, csprintf("reqLayer%d", i))); } - // see if we have a default slave device connected and if so add - // our corresponding master port + // see if we have a default CPU-side-port device connected and if so add + // our corresponding memory-side port if (p->port_default_connection_count) { - defaultPortID = masterPorts.size(); + defaultPortID = memSidePorts.size(); std::string portName = name() + ".default"; - RequestPort* bp = new NoncoherentXBarMasterPort(portName, *this, + RequestPort* bp = new NoncoherentXBarRequestPort(portName, *this, defaultPortID); - masterPorts.push_back(bp); + memSidePorts.push_back(bp); reqLayers.push_back(new ReqLayer(*bp, *this, csprintf("reqLayer%d", defaultPortID))); } - // create the slave ports, once again starting at zero - for (int i = 0; i < p->port_slave_connection_count; ++i) { - std::string portName = csprintf("%s.slave[%d]", name(), i); - QueuedSlavePort* bp = new NoncoherentXBarSlavePort(portName, *this, i); - slavePorts.push_back(bp); + // create the CPU-side ports, once again starting at zero + for (int i = 0; i < p->port_cpu_side_ports_connection_count; ++i) { + std::string portName = csprintf("%s.cpu_side_ports[%d]", name(), i); + QueuedResponsePort* bp = new NoncoherentXBarResponsePort(portName, + *this, i); + cpuSidePorts.push_back(bp); respLayers.push_back(new RespLayer(*bp, *this, csprintf("respLayer%d", i))); } @@ -95,20 +96,20 @@ NoncoherentXBar::~NoncoherentXBar() } bool -NoncoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) +NoncoherentXBar::recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id) { // determine the source port based on the id - ResponsePort *src_port = slavePorts[slave_port_id]; + ResponsePort *src_port = cpuSidePorts[cpu_side_port_id]; // we should never see express snoops on a non-coherent crossbar assert(!pkt->isExpressSnoop()); // determine the destination based on the address - PortID master_port_id = findPort(pkt->getAddrRange()); + PortID mem_side_port_id = findPort(pkt->getAddrRange()); // test if the layer should be considered occupied for the current // port - if (!reqLayers[master_port_id]->tryTiming(src_port)) { + if (!reqLayers[mem_side_port_id]->tryTiming(src_port)) { DPRINTF(NoncoherentXBar, "recvTimingReq: src %s %s 0x%x BUSY\n", src_port->name(), pkt->cmdString(), pkt->getAddr()); return false; @@ -140,7 +141,7 @@ NoncoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) !pkt->cacheResponding(); // since it is a normal request, attempt to send the packet - bool success = masterPorts[master_port_id]->sendTimingReq(pkt); + bool success = memSidePorts[mem_side_port_id]->sendTimingReq(pkt); if (!success) { DPRINTF(NoncoherentXBar, "recvTimingReq: src %s %s 0x%x RETRY\n", @@ -150,7 +151,7 @@ NoncoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) pkt->headerDelay = old_header_delay; // occupy until the header is sent - reqLayers[master_port_id]->failedTiming(src_port, + reqLayers[mem_side_port_id]->failedTiming(src_port, clockEdge(Cycles(1))); return false; @@ -159,35 +160,35 @@ NoncoherentXBar::recvTimingReq(PacketPtr pkt, PortID slave_port_id) // remember where to route the response to if (expect_response) { assert(routeTo.find(pkt->req) == routeTo.end()); - routeTo[pkt->req] = slave_port_id; + routeTo[pkt->req] = cpu_side_port_id; } - reqLayers[master_port_id]->succeededTiming(packetFinishTime); + reqLayers[mem_side_port_id]->succeededTiming(packetFinishTime); // stats updates - pktCount[slave_port_id][master_port_id]++; - pktSize[slave_port_id][master_port_id] += pkt_size; + pktCount[cpu_side_port_id][mem_side_port_id]++; + pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size; transDist[pkt_cmd]++; return true; } bool -NoncoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id) +NoncoherentXBar::recvTimingResp(PacketPtr pkt, PortID mem_side_port_id) { // determine the source port based on the id - RequestPort *src_port = masterPorts[master_port_id]; + RequestPort *src_port = memSidePorts[mem_side_port_id]; // determine the destination const auto route_lookup = routeTo.find(pkt->req); assert(route_lookup != routeTo.end()); - const PortID slave_port_id = route_lookup->second; - assert(slave_port_id != InvalidPortID); - assert(slave_port_id < respLayers.size()); + const PortID cpu_side_port_id = route_lookup->second; + assert(cpu_side_port_id != InvalidPortID); + assert(cpu_side_port_id < respLayers.size()); // test if the layer should be considered occupied for the current // port - if (!respLayers[slave_port_id]->tryTiming(src_port)) { + if (!respLayers[cpu_side_port_id]->tryTiming(src_port)) { DPRINTF(NoncoherentXBar, "recvTimingResp: src %s %s 0x%x BUSY\n", src_port->name(), pkt->cmdString(), pkt->getAddr()); return false; @@ -210,57 +211,59 @@ NoncoherentXBar::recvTimingResp(PacketPtr pkt, PortID master_port_id) // determine how long to be crossbar layer is busy Tick packetFinishTime = clockEdge(Cycles(1)) + pkt->payloadDelay; - // send the packet through the destination slave port, and pay for + // send the packet through the destination CPU-side port, and pay for // any outstanding latency Tick latency = pkt->headerDelay; pkt->headerDelay = 0; - slavePorts[slave_port_id]->schedTimingResp(pkt, curTick() + latency); + cpuSidePorts[cpu_side_port_id]->schedTimingResp(pkt, + curTick() + latency); // remove the request from the routing table routeTo.erase(route_lookup); - respLayers[slave_port_id]->succeededTiming(packetFinishTime); + respLayers[cpu_side_port_id]->succeededTiming(packetFinishTime); // stats updates - pktCount[slave_port_id][master_port_id]++; - pktSize[slave_port_id][master_port_id] += pkt_size; + pktCount[cpu_side_port_id][mem_side_port_id]++; + pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size; transDist[pkt_cmd]++; return true; } void -NoncoherentXBar::recvReqRetry(PortID master_port_id) +NoncoherentXBar::recvReqRetry(PortID mem_side_port_id) { // responses never block on forwarding them, so the retry will // always be coming from a port to which we tried to forward a // request - reqLayers[master_port_id]->recvRetry(); + reqLayers[mem_side_port_id]->recvRetry(); } Tick -NoncoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, +NoncoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id, MemBackdoorPtr *backdoor) { DPRINTF(NoncoherentXBar, "recvAtomic: packet src %s addr 0x%x cmd %s\n", - slavePorts[slave_port_id]->name(), pkt->getAddr(), + cpuSidePorts[cpu_side_port_id]->name(), pkt->getAddr(), pkt->cmdString()); unsigned int pkt_size = pkt->hasData() ? pkt->getSize() : 0; unsigned int pkt_cmd = pkt->cmdToIndex(); // determine the destination port - PortID master_port_id = findPort(pkt->getAddrRange()); + PortID mem_side_port_id = findPort(pkt->getAddrRange()); // stats updates for the request - pktCount[slave_port_id][master_port_id]++; - pktSize[slave_port_id][master_port_id] += pkt_size; + pktCount[cpu_side_port_id][mem_side_port_id]++; + pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size; transDist[pkt_cmd]++; // forward the request to the appropriate destination - auto master = masterPorts[master_port_id]; + auto mem_side_port = memSidePorts[mem_side_port_id]; Tick response_latency = backdoor ? - master->sendAtomicBackdoor(pkt, *backdoor) : master->sendAtomic(pkt); + mem_side_port->sendAtomicBackdoor(pkt, *backdoor) : + mem_side_port->sendAtomic(pkt); // add the response data if (pkt->isResponse()) { @@ -268,8 +271,8 @@ NoncoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, pkt_cmd = pkt->cmdToIndex(); // stats updates - pktCount[slave_port_id][master_port_id]++; - pktSize[slave_port_id][master_port_id] += pkt_size; + pktCount[cpu_side_port_id][mem_side_port_id]++; + pktSize[cpu_side_port_id][mem_side_port_id] += pkt_size; transDist[pkt_cmd]++; } @@ -279,18 +282,18 @@ NoncoherentXBar::recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, } void -NoncoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id) +NoncoherentXBar::recvFunctional(PacketPtr pkt, PortID cpu_side_port_id) { if (!pkt->isPrint()) { // don't do DPRINTFs on PrintReq as it clutters up the output DPRINTF(NoncoherentXBar, "recvFunctional: packet src %s addr 0x%x cmd %s\n", - slavePorts[slave_port_id]->name(), pkt->getAddr(), + cpuSidePorts[cpu_side_port_id]->name(), pkt->getAddr(), pkt->cmdString()); } - // since our slave ports are queued ports we need to check them as well - for (const auto& p : slavePorts) { + // since our CPU-side ports are queued ports we need to check them as well + for (const auto& p : cpuSidePorts) { // if we find a response that has the data, then the // downstream caches/memories may be out of date, so simply stop // here @@ -305,7 +308,7 @@ NoncoherentXBar::recvFunctional(PacketPtr pkt, PortID slave_port_id) PortID dest_id = findPort(pkt->getAddrRange()); // forward the request to the appropriate destination - masterPorts[dest_id]->sendFunctional(pkt); + memSidePorts[dest_id]->sendFunctional(pkt); } NoncoherentXBar* diff --git a/src/mem/noncoherent_xbar.hh b/src/mem/noncoherent_xbar.hh index 1d3541ca1..c2fd95ae3 100644 --- a/src/mem/noncoherent_xbar.hh +++ b/src/mem/noncoherent_xbar.hh @@ -50,13 +50,13 @@ #include "params/NoncoherentXBar.hh" /** - * A non-coherent crossbar connects a number of non-snooping masters - * and slaves, and routes the request and response packets based on - * the address. The request packets issued by the master connected to + * A non-coherent crossbar connects a number of non-snooping memory-side ports + * and cpu_sides, and routes the request and response packets based on + * the address. The request packets issued by the memory-side port connected to * a non-coherent crossbar could still snoop in caches attached to a * coherent crossbar, as is the case with the I/O bus and memory bus * in most system configurations. No snoops will, however, reach any - * master on the non-coherent crossbar itself. + * memory-side port on the non-coherent crossbar itself. * * The non-coherent crossbar can be used as a template for modelling * PCIe, and non-coherent AMBA and OCP buses, and is typically used @@ -75,11 +75,11 @@ class NoncoherentXBar : public BaseXBar std::vector respLayers; /** - * Declaration of the non-coherent crossbar slave port type, one - * will be instantiated for each of the master ports connecting to + * Declaration of the non-coherent crossbar CPU-side port type, one + * will be instantiated for each of the memory-side ports connecting to * the crossbar. */ - class NoncoherentXBarSlavePort : public QueuedSlavePort + class NoncoherentXBarResponsePort : public QueuedResponsePort { private: @@ -91,9 +91,9 @@ class NoncoherentXBar : public BaseXBar public: - NoncoherentXBarSlavePort(const std::string &_name, + NoncoherentXBarResponsePort(const std::string &_name, NoncoherentXBar &_xbar, PortID _id) - : QueuedSlavePort(_name, &_xbar, queue, _id), xbar(_xbar), + : QueuedResponsePort(_name, &_xbar, queue, _id), xbar(_xbar), queue(_xbar, *this) { } @@ -131,11 +131,11 @@ class NoncoherentXBar : public BaseXBar }; /** - * Declaration of the crossbar master port type, one will be - * instantiated for each of the slave ports connecting to the + * Declaration of the crossbar memory-side port type, one will be + * instantiated for each of the CPU-side ports connecting to the * crossbar. */ - class NoncoherentXBarMasterPort : public RequestPort + class NoncoherentXBarRequestPort : public RequestPort { private: @@ -144,7 +144,7 @@ class NoncoherentXBar : public BaseXBar public: - NoncoherentXBarMasterPort(const std::string &_name, + NoncoherentXBarRequestPort(const std::string &_name, NoncoherentXBar &_xbar, PortID _id) : RequestPort(_name, &_xbar, _id), xbar(_xbar) { } @@ -170,12 +170,12 @@ class NoncoherentXBar : public BaseXBar } }; - virtual bool recvTimingReq(PacketPtr pkt, PortID slave_port_id); - virtual bool recvTimingResp(PacketPtr pkt, PortID master_port_id); - void recvReqRetry(PortID master_port_id); - Tick recvAtomicBackdoor(PacketPtr pkt, PortID slave_port_id, + virtual bool recvTimingReq(PacketPtr pkt, PortID cpu_side_port_id); + virtual bool recvTimingResp(PacketPtr pkt, PortID mem_side_port_id); + void recvReqRetry(PortID mem_side_port_id); + Tick recvAtomicBackdoor(PacketPtr pkt, PortID cpu_side_port_id, MemBackdoorPtr *backdoor=nullptr); - void recvFunctional(PacketPtr pkt, PortID slave_port_id); + void recvFunctional(PacketPtr pkt, PortID cpu_side_port_id); public: diff --git a/src/mem/packet.hh b/src/mem/packet.hh index 4ded3b360..0f14816dd 100644 --- a/src/mem/packet.hh +++ b/src/mem/packet.hh @@ -250,7 +250,7 @@ class MemCmd /** * A Packet is used to encapsulate a transfer between two objects in * the memory system (e.g., the L1 and L2 cache). (In contrast, a - * single Request travels all the way from the requester to the + * single Request travels all the way from the requestor to the * ultimate destination and back, possibly being conveyed by several * different Packets along the way.) */ @@ -737,7 +737,7 @@ class Packet : public Printable inline void qosValue(const uint8_t qos_value) { _qosValue = qos_value; } - inline MasterID masterId() const { return req->masterId(); } + inline RequestorID requestorId() const { return req->requestorId(); } // Network error conditions... encapsulate them as methods since // their encoding keeps changing (from result field to command diff --git a/src/mem/packet_queue.cc b/src/mem/packet_queue.cc index fe08d4943..ecc665320 100644 --- a/src/mem/packet_queue.cc +++ b/src/mem/packet_queue.cc @@ -232,44 +232,45 @@ PacketQueue::drain() } } -ReqPacketQueue::ReqPacketQueue(EventManager& _em, RequestPort& _masterPort, +ReqPacketQueue::ReqPacketQueue(EventManager& _em, RequestPort& _mem_side_port, const std::string _label) - : PacketQueue(_em, _label, name(_masterPort, _label)), - masterPort(_masterPort) + : PacketQueue(_em, _label, name(_mem_side_port, _label)), + memSidePort(_mem_side_port) { } bool ReqPacketQueue::sendTiming(PacketPtr pkt) { - return masterPort.sendTimingReq(pkt); + return memSidePort.sendTimingReq(pkt); } SnoopRespPacketQueue::SnoopRespPacketQueue(EventManager& _em, - RequestPort& _masterPort, + RequestPort& _mem_side_port, bool force_order, const std::string _label) - : PacketQueue(_em, _label, name(_masterPort, _label), force_order), - masterPort(_masterPort) + : PacketQueue(_em, _label, name(_mem_side_port, _label), force_order), + memSidePort(_mem_side_port) { } bool SnoopRespPacketQueue::sendTiming(PacketPtr pkt) { - return masterPort.sendTimingSnoopResp(pkt); + return memSidePort.sendTimingSnoopResp(pkt); } -RespPacketQueue::RespPacketQueue(EventManager& _em, ResponsePort& _slavePort, +RespPacketQueue::RespPacketQueue(EventManager& _em, + ResponsePort& _cpu_side_port, bool force_order, const std::string _label) - : PacketQueue(_em, _label, name(_slavePort, _label), force_order), - slavePort(_slavePort) + : PacketQueue(_em, _label, name(_cpu_side_port, _label), force_order), + cpuSidePort(_cpu_side_port) { } bool RespPacketQueue::sendTiming(PacketPtr pkt) { - return slavePort.sendTimingResp(pkt); + return cpuSidePort.sendTimingResp(pkt); } diff --git a/src/mem/packet_queue.hh b/src/mem/packet_queue.hh index 1fc92a250..baf615260 100644 --- a/src/mem/packet_queue.hh +++ b/src/mem/packet_queue.hh @@ -57,7 +57,7 @@ /** * A packet queue is a class that holds deferred packets and later - * sends them using the associated slave port or master port. + * sends them using the associated CPU-side port or memory-side port. */ class PacketQueue : public Drainable { @@ -124,7 +124,7 @@ class PacketQueue : public Drainable /** * Send a packet using the appropriate method for the specific - * subclass (reuest, response or snoop response). + * subclass (request, response or snoop response). */ virtual bool sendTiming(PacketPtr pkt) = 0; @@ -224,32 +224,32 @@ class ReqPacketQueue : public PacketQueue protected: - RequestPort& masterPort; + RequestPort& memSidePort; // Static definition so it can be called when constructing the parent // without us being completely initialized. - static const std::string name(const RequestPort& masterPort, + static const std::string name(const RequestPort& memSidePort, const std::string& label) - { return masterPort.name() + "-" + label; } + { return memSidePort.name() + "-" + label; } public: /** * Create a request packet queue, linked to an event manager, a - * master port, and a label that will be used for functional print + * memory-side port, and a label that will be used for functional print * request packets. * * @param _em Event manager used for scheduling this queue - * @param _masterPort Master port used to send the packets + * @param _mem_side_port Mem_side port used to send the packets * @param _label Label to push on the label stack for print request packets */ - ReqPacketQueue(EventManager& _em, RequestPort& _masterPort, + ReqPacketQueue(EventManager& _em, RequestPort& _mem_side_port, const std::string _label = "ReqPacketQueue"); virtual ~ReqPacketQueue() { } const std::string name() const - { return name(masterPort, label); } + { return name(memSidePort, label); } bool sendTiming(PacketPtr pkt); @@ -260,34 +260,34 @@ class SnoopRespPacketQueue : public PacketQueue protected: - RequestPort& masterPort; + RequestPort& memSidePort; // Static definition so it can be called when constructing the parent // without us being completely initialized. - static const std::string name(const RequestPort& masterPort, + static const std::string name(const RequestPort& memSidePort, const std::string& label) - { return masterPort.name() + "-" + label; } + { return memSidePort.name() + "-" + label; } public: /** * Create a snoop response packet queue, linked to an event - * manager, a master port, and a label that will be used for + * manager, a memory-side port, and a label that will be used for * functional print request packets. * * @param _em Event manager used for scheduling this queue - * @param _masterPort Master port used to send the packets + * @param _mem_side_port memory-side port used to send the packets * @param force_order Force insertion order for packets with same address * @param _label Label to push on the label stack for print request packets */ - SnoopRespPacketQueue(EventManager& _em, RequestPort& _masterPort, + SnoopRespPacketQueue(EventManager& _em, RequestPort& _mem_side_port, bool force_order = false, const std::string _label = "SnoopRespPacketQueue"); virtual ~SnoopRespPacketQueue() { } const std::string name() const - { return name(masterPort, label); } + { return name(memSidePort, label); } bool sendTiming(PacketPtr pkt); @@ -298,34 +298,34 @@ class RespPacketQueue : public PacketQueue protected: - ResponsePort& slavePort; + ResponsePort& cpuSidePort; // Static definition so it can be called when constructing the parent // without us being completely initialized. - static const std::string name(const ResponsePort& slavePort, + static const std::string name(const ResponsePort& cpuSidePort, const std::string& label) - { return slavePort.name() + "-" + label; } + { return cpuSidePort.name() + "-" + label; } public: /** * Create a response packet queue, linked to an event manager, a - * slave port, and a label that will be used for functional print + * CPU-side port, and a label that will be used for functional print * request packets. * * @param _em Event manager used for scheduling this queue - * @param _slavePort Slave port used to send the packets + * @param _cpu_side_port Cpu_side port used to send the packets * @param force_order Force insertion order for packets with same address * @param _label Label to push on the label stack for print request packets */ - RespPacketQueue(EventManager& _em, ResponsePort& _slavePort, + RespPacketQueue(EventManager& _em, ResponsePort& _cpu_side_port, bool force_order = false, const std::string _label = "RespPacketQueue"); virtual ~RespPacketQueue() { } const std::string name() const - { return name(slavePort, label); } + { return name(cpuSidePort, label); } bool sendTiming(PacketPtr pkt); diff --git a/src/mem/port.cc b/src/mem/port.cc index b9015717a..e5d8308d2 100644 --- a/src/mem/port.cc +++ b/src/mem/port.cc @@ -154,7 +154,7 @@ void RequestPort::printAddr(Addr a) { auto req = std::make_shared( - a, 1, 0, Request::funcMasterId); + a, 1, 0, Request::funcRequestorId); Packet pkt(req, MemCmd::PrintReq); Packet::PrintReqState prs(std::cerr); diff --git a/src/mem/port_proxy.cc b/src/mem/port_proxy.cc index b5fce1975..f12ba8b8f 100644 --- a/src/mem/port_proxy.cc +++ b/src/mem/port_proxy.cc @@ -47,7 +47,7 @@ PortProxy::readBlobPhys(Addr addr, Request::Flags flags, gen.next()) { auto req = std::make_shared( - gen.addr(), gen.size(), flags, Request::funcMasterId); + gen.addr(), gen.size(), flags, Request::funcRequestorId); Packet pkt(req, MemCmd::ReadReq); pkt.dataStatic(static_cast(p)); @@ -64,7 +64,7 @@ PortProxy::writeBlobPhys(Addr addr, Request::Flags flags, gen.next()) { auto req = std::make_shared( - gen.addr(), gen.size(), flags, Request::funcMasterId); + gen.addr(), gen.size(), flags, Request::funcRequestorId); Packet pkt(req, MemCmd::WriteReq); pkt.dataStaticConst(static_cast(p)); diff --git a/src/mem/probes/MemTraceProbe.py b/src/mem/probes/MemTraceProbe.py index ba791ebb1..2225c36c7 100644 --- a/src/mem/probes/MemTraceProbe.py +++ b/src/mem/probes/MemTraceProbe.py @@ -50,5 +50,5 @@ class MemTraceProbe(BaseMemProbe): # packet trace output file, disabled by default trace_file = Param.String("", "Packet trace output file") - # System object to look up the name associated with a master ID + # System object to look up the name associated with a requestor ID system = Param.System(Parent.any, "System the probe belongs to") diff --git a/src/mem/probes/mem_trace.cc b/src/mem/probes/mem_trace.cc index a11b9e60a..dbfb685bc 100644 --- a/src/mem/probes/mem_trace.cc +++ b/src/mem/probes/mem_trace.cc @@ -86,10 +86,10 @@ MemTraceProbe::startup() header_msg.set_obj_id(name()); header_msg.set_tick_freq(SimClock::Frequency); - for (int i = 0; i < system->maxMasters(); i++) { + for (int i = 0; i < system->maxRequestors(); i++) { auto id_string = header_msg.add_id_strings(); id_string->set_key(i); - id_string->set_value(system->getMasterName(i)); + id_string->set_value(system->getRequestorName(i)); } traceStream->write(header_msg); @@ -114,7 +114,7 @@ MemTraceProbe::handleRequest(const ProbePoints::PacketInfo &pkt_info) pkt_msg.set_size(pkt_info.size); if (withPC && pkt_info.pc != 0) pkt_msg.set_pc(pkt_info.pc); - pkt_msg.set_pkt_id(pkt_info.master); + pkt_msg.set_pkt_id(pkt_info.id); traceStream->write(pkt_msg); } diff --git a/src/mem/qos/QoSMemCtrl.py b/src/mem/qos/QoSMemCtrl.py index f55105bef..e4826d6ba 100644 --- a/src/mem/qos/QoSMemCtrl.py +++ b/src/mem/qos/QoSMemCtrl.py @@ -69,7 +69,7 @@ class QoSMemCtrl(ClockedObject): "Memory Controller Requests same-QoS selection policy") # flag to select QoS syncronised scheduling - # (calls the scheduler on all masters at every packet arrival) + # (calls the scheduler on all requestors at every packet arrival) qos_syncro_scheduler = Param.Bool(False, "Enables QoS syncronized scheduling") @@ -77,6 +77,8 @@ class QoSMemCtrl(ClockedObject): qos_priority_escalation = Param.Bool(False, "Enables QoS priority escalation") - # Master ID to be mapped to service parameters in QoS schedulers - qos_masters = VectorParam.String(['']* 16, - "Master Names to be mapped to service parameters in QoS scheduler") + # Requestor ID to be mapped to service parameters in QoS schedulers + qos_requestors = VectorParam.String(['']* 16, + "Requestor Names to be mapped to service parameters in QoS scheduler") + qos_masters = DeprecatedParam(qos_requestors, + '`qos_master` is now called `qos_requestors`') diff --git a/src/mem/qos/QoSPolicy.py b/src/mem/qos/QoSPolicy.py index ed72043db..0945cd7a4 100644 --- a/src/mem/qos/QoSPolicy.py +++ b/src/mem/qos/QoSPolicy.py @@ -51,36 +51,43 @@ class QoSFixedPriorityPolicy(QoSPolicy): cxx_class = 'QoS::FixedPriorityPolicy' cxx_exports = [ - PyBindMethod('initMasterName'), - PyBindMethod('initMasterObj'), + PyBindMethod('initRequestorName'), + PyBindMethod('initRequestorObj'), ] - _mpriorities = None + _requestor_priorities = None - def setMasterPriority(self, master, priority): - if not self._mpriorities: - self._mpriorities = [] + def setRequestorPriority(self, request_port, priority): + if not self._requestor_priorities: + self._requestor_priorities = [] - self._mpriorities.append([master, priority]) + self._requestor_priorities.append([request_port, priority]) + + def setMasterPriority(self, request_port, priority): + warn('QosFixedPriority.setMasterPriority is deprecated in favor of ' + 'setRequestorPriority. See src/mem/qos/QoSPolicy.py for more ' + 'information') + self.setRequestorPriority(request_port, priority) def init(self): - if not self._mpriorities: - print("Error, use setMasterPriority to init masters/priorities\n"); + if not self._requestor_priorities: + print("Error," + "use setRequestorPriority to init requestors/priorities\n"); exit(1) else: - for mprio in self._mpriorities: - master = mprio[0] - priority = mprio[1] - if isinstance(master, string_types): - self.getCCObject().initMasterName( - master, int(priority)) + for prio in self._requestor_priorities: + request_port = prio[0] + priority = prio[1] + if isinstance(request_port, string_types): + self.getCCObject().initRequestorName( + request_port, int(priority)) else: - self.getCCObject().initMasterObj( - master.getCCObject(), priority) + self.getCCObject().initRequestorObj( + request_port.getCCObject(), priority) - # default fixed priority value for non-listed Masters + # default fixed priority value for non-listed Requestors qos_fixed_prio_default_prio = Param.UInt8(0, - "Default priority for non-listed Masters") + "Default priority for non-listed Requestors") class QoSPropFairPolicy(QoSPolicy): type = 'QoSPropFairPolicy' @@ -88,31 +95,31 @@ class QoSPropFairPolicy(QoSPolicy): cxx_class = 'QoS::PropFairPolicy' cxx_exports = [ - PyBindMethod('initMasterName'), - PyBindMethod('initMasterObj'), + PyBindMethod('initRequestorName'), + PyBindMethod('initRequestorObj'), ] - _mscores = None + _requestor_scores = None - def setInitialScore(self, master, score): - if not self._mscores: - self._mscores = [] + def setInitialScore(self, request_port, score): + if not self._requestor_scores: + self._requestor_scores = [] - self._mscores.append([master, score]) + self._requestor_scores.append([request_port, score]) def init(self): - if not self._mscores: - print("Error, use setInitialScore to init masters/scores\n"); + if not self._requestor_scores: + print("Error, use setInitialScore to init requestors/scores\n"); exit(1) else: - for mprio in self._mscores: - master = mprio[0] - score = mprio[1] - if isinstance(master, string_types): - self.getCCObject().initMasterName( - master, float(score)) + for prio in self._requestor_scores: + request_port = prio[0] + score = prio[1] + if isinstance(request_port, string_types): + self.getCCObject().initRequestorName( + request_port, float(score)) else: - self.getCCObject().initMasterObj( - master.getCCObject(), float(score)) + self.getCCObject().initRequestorObj( + request_port.getCCObject(), float(score)) weight = Param.Float(0.5, "Pf score weight") diff --git a/src/mem/qos/mem_ctrl.cc b/src/mem/qos/mem_ctrl.cc index 190960bc1..b5caf6ef6 100644 --- a/src/mem/qos/mem_ctrl.cc +++ b/src/mem/qos/mem_ctrl.cc @@ -78,18 +78,18 @@ MemCtrl::~MemCtrl() {} void -MemCtrl::logRequest(BusState dir, MasterID m_id, uint8_t qos, +MemCtrl::logRequest(BusState dir, RequestorID id, uint8_t qos, Addr addr, uint64_t entries) { // If needed, initialize all counters and statistics - // for this master - addMaster(m_id); + // for this requestor + addRequestor(id); DPRINTF(QOS, - "QoSMemCtrl::logRequest MASTER %s [id %d] address %d" - " prio %d this master q packets %d" + "QoSMemCtrl::logRequest REQUESTOR %s [id %d] address %d" + " prio %d this requestor q packets %d" " - queue size %d - requested entries %d\n", - masters[m_id], m_id, addr, qos, packetPriorities[m_id][qos], + requestors[id], id, addr, qos, packetPriorities[id][qos], (dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos], entries); @@ -101,51 +101,51 @@ MemCtrl::logRequest(BusState dir, MasterID m_id, uint8_t qos, totalWriteQueueSize += entries; } - packetPriorities[m_id][qos] += entries; + packetPriorities[id][qos] += entries; for (auto j = 0; j < entries; ++j) { - requestTimes[m_id][addr].push_back(curTick()); + requestTimes[id][addr].push_back(curTick()); } // Record statistics - stats.avgPriority[m_id].sample(qos); + stats.avgPriority[id].sample(qos); // Compute avg priority distance - for (uint8_t i = 0; i < packetPriorities[m_id].size(); ++i) { + for (uint8_t i = 0; i < packetPriorities[id].size(); ++i) { uint8_t distance = - (abs(int(qos) - int(i))) * packetPriorities[m_id][i]; + (abs(int(qos) - int(i))) * packetPriorities[id][i]; if (distance > 0) { - stats.avgPriorityDistance[m_id].sample(distance); + stats.avgPriorityDistance[id].sample(distance); DPRINTF(QOS, - "QoSMemCtrl::logRequest MASTER %s [id %d]" + "QoSMemCtrl::logRequest REQUESTOR %s [id %d]" " registering priority distance %d for priority %d" " (packets %d)\n", - masters[m_id], m_id, distance, i, - packetPriorities[m_id][i]); + requestors[id], id, distance, i, + packetPriorities[id][i]); } } DPRINTF(QOS, - "QoSMemCtrl::logRequest MASTER %s [id %d] prio %d " - "this master q packets %d - new queue size %d\n", - masters[m_id], m_id, qos, packetPriorities[m_id][qos], + "QoSMemCtrl::logRequest REQUESTOR %s [id %d] prio %d " + "this requestor q packets %d - new queue size %d\n", + requestors[id], id, qos, packetPriorities[id][qos], (dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos]); } void -MemCtrl::logResponse(BusState dir, MasterID m_id, uint8_t qos, +MemCtrl::logResponse(BusState dir, RequestorID id, uint8_t qos, Addr addr, uint64_t entries, double delay) { - panic_if(!hasMaster(m_id), - "Logging response with invalid master\n"); + panic_if(!hasRequestor(id), + "Logging response with invalid requestor\n"); DPRINTF(QOS, - "QoSMemCtrl::logResponse MASTER %s [id %d] address %d prio" - " %d this master q packets %d" + "QoSMemCtrl::logResponse REQUESTOR %s [id %d] address %d prio" + " %d this requestor q packets %d" " - queue size %d - requested entries %d\n", - masters[m_id], m_id, addr, qos, packetPriorities[m_id][qos], + requestors[id], id, addr, qos, packetPriorities[id][qos], (dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos], entries); @@ -157,17 +157,17 @@ MemCtrl::logResponse(BusState dir, MasterID m_id, uint8_t qos, totalWriteQueueSize -= entries; } - panic_if(packetPriorities[m_id][qos] == 0, - "QoSMemCtrl::logResponse master %s negative packets for priority" - " %d", masters[m_id], qos); + panic_if(packetPriorities[id][qos] == 0, + "QoSMemCtrl::logResponse requestor %s negative packets " + "for priority %d", requestors[id], qos); - packetPriorities[m_id][qos] -= entries; + packetPriorities[id][qos] -= entries; for (auto j = 0; j < entries; ++j) { - auto it = requestTimes[m_id].find(addr); - panic_if(it == requestTimes[m_id].end(), - "QoSMemCtrl::logResponse master %s unmatched response for" - " address %d received", masters[m_id], addr); + auto it = requestTimes[id].find(addr); + panic_if(it == requestTimes[id].end(), + "QoSMemCtrl::logResponse requestor %s unmatched response for" + " address %d received", requestors[id], addr); // Load request time uint64_t requestTime = it->second.front(); @@ -177,7 +177,7 @@ MemCtrl::logResponse(BusState dir, MasterID m_id, uint8_t qos, // Remove whole address entry if last one if (it->second.empty()) { - requestTimes[m_id].erase(it); + requestTimes[id].erase(it); } // Compute latency double latency = (double) (curTick() + delay - requestTime) @@ -197,22 +197,22 @@ MemCtrl::logResponse(BusState dir, MasterID m_id, uint8_t qos, } DPRINTF(QOS, - "QoSMemCtrl::logResponse MASTER %s [id %d] prio %d " - "this master q packets %d - new queue size %d\n", - masters[m_id], m_id, qos, packetPriorities[m_id][qos], + "QoSMemCtrl::logResponse REQUESTOR %s [id %d] prio %d " + "this requestor q packets %d - new queue size %d\n", + requestors[id], id, qos, packetPriorities[id][qos], (dir == READ) ? readQueueSizes[qos]: writeQueueSizes[qos]); } uint8_t -MemCtrl::schedule(MasterID m_id, uint64_t data) +MemCtrl::schedule(RequestorID id, uint64_t data) { if (policy) { - return policy->schedule(m_id, data); + return policy->schedule(id, data); } else { DPRINTF(QOS, - "QoSScheduler::schedule master ID [%d] " + "QoSScheduler::schedule requestor id [%d] " "data received [%d], but QoS scheduler not initialized\n", - m_id,data); + id,data); return 0; } } @@ -223,7 +223,7 @@ MemCtrl::schedule(const PacketPtr pkt) assert(pkt->req); if (policy) { - return schedule(pkt->req->masterId(), pkt->getSize()); + return schedule(pkt->req->requestorId(), pkt->getSize()); } else { DPRINTF(QOS, "QoSScheduler::schedule Packet received [Qv %d], " "but QoS scheduler not initialized\n", @@ -261,16 +261,16 @@ MemCtrl::selectNextBusState() } void -MemCtrl::addMaster(MasterID m_id) +MemCtrl::addRequestor(RequestorID id) { - if (!hasMaster(m_id)) { - masters.emplace(m_id, _system->getMasterName(m_id)); - packetPriorities[m_id].resize(numPriorities(), 0); + if (!hasRequestor(id)) { + requestors.emplace(id, _system->getRequestorName(id)); + packetPriorities[id].resize(numPriorities(), 0); DPRINTF(QOS, - "QoSMemCtrl::addMaster registering" - " Master %s [id %d]\n", - masters[m_id], m_id); + "QoSMemCtrl::addRequestor registering" + " Requestor %s [id %d]\n", + requestors[id], id); } } @@ -307,18 +307,18 @@ MemCtrl::MemCtrlStats::regStats() using namespace Stats; System *system = memCtrl._system; - const auto max_masters = system->maxMasters(); + const auto max_requestors = system->maxRequestors(); const auto num_priorities = memCtrl.numPriorities(); - // Initializes per master statistics + // Initializes per requestor statistics avgPriority - .init(max_masters) + .init(max_requestors) .flags(nozero | nonan) .precision(2) ; avgPriorityDistance - .init(max_masters) + .init(max_requestors) .flags(nozero | nonan) ; @@ -332,10 +332,10 @@ MemCtrl::MemCtrlStats::regStats() .precision(12) ; - for (int i = 0; i < max_masters; i++) { - const std::string master = system->getMasterName(i); - avgPriority.subname(i, master); - avgPriorityDistance.subname(i, master); + for (int i = 0; i < max_requestors; i++) { + const std::string name = system->getRequestorName(i); + avgPriority.subname(i, name); + avgPriorityDistance.subname(i, name); } for (int j = 0; j < num_priorities; ++j) { diff --git a/src/mem/qos/mem_ctrl.hh b/src/mem/qos/mem_ctrl.hh index 5d7c9d632..d472f200e 100644 --- a/src/mem/qos/mem_ctrl.hh +++ b/src/mem/qos/mem_ctrl.hh @@ -80,18 +80,18 @@ class MemCtrl : public ClockedObject /** * Enables QoS synchronized scheduling invokes the QoS scheduler - * on all masters, at every packet arrival. + * on all requestors, at every packet arrival. */ const bool qosSyncroScheduler; - /** Hash of master ID - master name */ - std::unordered_map masters; + /** Hash of requestor ID - requestor name */ + std::unordered_map requestors; - /** Hash of masters - number of packets queued per priority */ - std::unordered_map > packetPriorities; + /** Hash of requestors - number of packets queued per priority */ + std::unordered_map > packetPriorities; - /** Hash of masters - address of request - queue of times of request */ - std::unordered_map> > requestTimes; /** @@ -129,10 +129,10 @@ class MemCtrl : public ClockedObject const MemCtrl &memCtrl; - /** per-master average QoS priority */ + /** per-requestor average QoS priority */ Stats::VectorStandardDeviation avgPriority; /** - * per-master average QoS distance between assigned and + * per-requestor average QoS distance between assigned and * queued values */ Stats::VectorStandardDeviation avgPriorityDistance; @@ -156,23 +156,23 @@ class MemCtrl : public ClockedObject /** * Initializes dynamically counters and - * statistics for a given Master + * statistics for a given Requestor * - * @param m_id the master ID + * @param id the requestor's ID */ - void addMaster(const MasterID m_id); + void addRequestor(const RequestorID id); /** * Called upon receiving a request or * updates statistics and updates queues status * * @param dir request direction - * @param m_id master id + * @param id requestor id * @param qos packet qos value * @param addr packet address * @param entries number of entries to record */ - void logRequest(BusState dir, MasterID m_id, uint8_t qos, + void logRequest(BusState dir, RequestorID id, uint8_t qos, Addr addr, uint64_t entries); /** @@ -180,13 +180,13 @@ class MemCtrl : public ClockedObject * updates statistics and updates queues status * * @param dir response direction - * @param m_id master id + * @param id requestor id * @param qos packet qos value * @param addr packet address * @param entries number of entries to record * @param delay response delay */ - void logResponse(BusState dir, MasterID m_id, uint8_t qos, + void logResponse(BusState dir, RequestorID id, uint8_t qos, Addr addr, uint64_t entries, double delay); /** @@ -203,7 +203,7 @@ class MemCtrl : public ClockedObject uint64_t queue_entry_size, const PacketPtr pkt); using SimObject::schedule; - uint8_t schedule(MasterID m_id, uint64_t data); + uint8_t schedule(RequestorID id, uint64_t data); uint8_t schedule(const PacketPtr pkt); /** @@ -226,22 +226,22 @@ class MemCtrl : public ClockedObject /** * Escalates/demotes priority of all packets - * belonging to the passed master to given + * belonging to the passed requestor to given * priority value * * @param queues list of pointers to packet queues * @param queue_entry_size size of an entry in the queue - * @param m_id master whose packets priority will change + * @param id requestor whose packets priority will change * @param tgt_prio target priority value */ template void escalate(std::initializer_list queues, uint64_t queue_entry_size, - MasterID m_id, uint8_t tgt_prio); + RequestorID id, uint8_t tgt_prio); /** * Escalates/demotes priority of all packets - * belonging to the passed master to given + * belonging to the passed requestor to given * priority value in a specified cluster of queues * (e.g. read queues or write queues) which is passed * as an argument to the function. @@ -250,13 +250,13 @@ class MemCtrl : public ClockedObject * * @param queues reference to packet queues * @param queue_entry_size size of an entry in the queue - * @param m_id master whose packets priority will change + * @param id requestor whose packets priority will change * @param curr_prio source queue priority value * @param tgt_prio target queue priority value */ template void escalateQueues(Queues& queues, uint64_t queue_entry_size, - MasterID m_id, uint8_t curr_prio, uint8_t tgt_prio); + RequestorID id, uint8_t curr_prio, uint8_t tgt_prio); public: /** @@ -283,18 +283,18 @@ class MemCtrl : public ClockedObject BusState getBusStateNext() const { return busStateNext; } /** - * hasMaster returns true if the selected master(ID) has + * hasRequestor returns true if the selected requestor(ID) has * been registered in the memory controller, which happens if * the memory controller has received at least a packet from - * that master. + * that requestor. * - * @param m_id master id to lookup + * @param id requestor id to lookup * @return true if the memory controller has received a packet - * from the master, false otherwise. + * from the requestor, false otherwise. */ - bool hasMaster(MasterID m_id) const + bool hasRequestor(RequestorID id) const { - return masters.find(m_id) != masters.end(); + return requestors.find(id) != requestors.end(); } /** @@ -353,50 +353,50 @@ class MemCtrl : public ClockedObject template void MemCtrl::escalateQueues(Queues& queues, uint64_t queue_entry_size, - MasterID m_id, uint8_t curr_prio, uint8_t tgt_prio) + RequestorID id, uint8_t curr_prio, uint8_t tgt_prio) { auto it = queues[curr_prio].begin(); while (it != queues[curr_prio].end()) { // No packets left to move - if (packetPriorities[m_id][curr_prio] == 0) + if (packetPriorities[id][curr_prio] == 0) break; auto pkt = *it; DPRINTF(QOS, "QoSMemCtrl::escalate checking priority %d packet " - "m_id %d address %d\n", curr_prio, - pkt->masterId(), pkt->getAddr()); + "id %d address %d\n", curr_prio, + pkt->requestorId(), pkt->getAddr()); // Found a packet to move - if (pkt->masterId() == m_id) { + if (pkt->requestorId() == id) { uint64_t moved_entries = divCeil(pkt->getSize(), queue_entry_size); DPRINTF(QOS, - "QoSMemCtrl::escalate Master %s [id %d] moving " + "QoSMemCtrl::escalate Requestor %s [id %d] moving " "packet addr %d size %d (p size %d) from priority %d " "to priority %d - " - "this master packets %d (entries to move %d)\n", - masters[m_id], m_id, pkt->getAddr(), + "this requestor packets %d (entries to move %d)\n", + requestors[id], id, pkt->getAddr(), pkt->getSize(), queue_entry_size, curr_prio, tgt_prio, - packetPriorities[m_id][curr_prio], moved_entries); + packetPriorities[id][curr_prio], moved_entries); if (pkt->isRead()) { panic_if(readQueueSizes[curr_prio] < moved_entries, - "QoSMemCtrl::escalate master %s negative READ " + "QoSMemCtrl::escalate requestor %s negative READ " "packets for priority %d", - masters[m_id], tgt_prio); + requestors[id], tgt_prio); readQueueSizes[curr_prio] -= moved_entries; readQueueSizes[tgt_prio] += moved_entries; } else if (pkt->isWrite()) { panic_if(writeQueueSizes[curr_prio] < moved_entries, - "QoSMemCtrl::escalate master %s negative WRITE " + "QoSMemCtrl::escalate requestor %s negative WRITE " "packets for priority %d", - masters[m_id], tgt_prio); + requestors[id], tgt_prio); writeQueueSizes[curr_prio] -= moved_entries; writeQueueSizes[tgt_prio] += moved_entries; } @@ -408,13 +408,13 @@ MemCtrl::escalateQueues(Queues& queues, uint64_t queue_entry_size, // Erase element from source packet queue, this will // increment the iterator it = queues[curr_prio].erase(it); - panic_if(packetPriorities[m_id][curr_prio] < moved_entries, - "QoSMemCtrl::escalate master %s negative packets " + panic_if(packetPriorities[id][curr_prio] < moved_entries, + "QoSMemCtrl::escalate requestor %s negative packets " "for priority %d", - masters[m_id], tgt_prio); + requestors[id], tgt_prio); - packetPriorities[m_id][curr_prio] -= moved_entries; - packetPriorities[m_id][tgt_prio] += moved_entries; + packetPriorities[id][curr_prio] -= moved_entries; + packetPriorities[id][tgt_prio] += moved_entries; } else { // Increment iterator to next location in the queue it++; @@ -426,16 +426,16 @@ template void MemCtrl::escalate(std::initializer_list queues, uint64_t queue_entry_size, - MasterID m_id, uint8_t tgt_prio) + RequestorID id, uint8_t tgt_prio) { // If needed, initialize all counters and statistics - // for this master - addMaster(m_id); + // for this requestor + addRequestor(id); DPRINTF(QOS, - "QoSMemCtrl::escalate Master %s [id %d] to priority " - "%d (currently %d packets)\n",masters[m_id], m_id, tgt_prio, - packetPriorities[m_id][tgt_prio]); + "QoSMemCtrl::escalate Requestor %s [id %d] to priority " + "%d (currently %d packets)\n",requestors[id], id, tgt_prio, + packetPriorities[id][tgt_prio]); for (uint8_t curr_prio = 0; curr_prio < numPriorities(); ++curr_prio) { // Skip target priority @@ -443,30 +443,30 @@ MemCtrl::escalate(std::initializer_list queues, continue; // Process other priority packet - while (packetPriorities[m_id][curr_prio] > 0) { + while (packetPriorities[id][curr_prio] > 0) { DPRINTF(QOS, "QoSMemCtrl::escalate MID %d checking priority %d " "(packets %d)- current packets in prio %d: %d\n" "\t(source read %d source write %d target read %d, " "target write %d)\n", - m_id, curr_prio, packetPriorities[m_id][curr_prio], - tgt_prio, packetPriorities[m_id][tgt_prio], + id, curr_prio, packetPriorities[id][curr_prio], + tgt_prio, packetPriorities[id][tgt_prio], readQueueSizes[curr_prio], writeQueueSizes[curr_prio], readQueueSizes[tgt_prio], writeQueueSizes[tgt_prio]); // Check both read and write queue for (auto q : queues) { - escalateQueues(*q, queue_entry_size, m_id, + escalateQueues(*q, queue_entry_size, id, curr_prio, tgt_prio); } } } DPRINTF(QOS, - "QoSMemCtrl::escalate Completed master %s [id %d] to priority %d " - "(now %d packets)\n\t(total read %d, total write %d)\n", - masters[m_id], m_id, tgt_prio, packetPriorities[m_id][tgt_prio], + "QoSMemCtrl::escalate Completed requestor %s [id %d] to priority " + "%d (now %d packets)\n\t(total read %d, total write %d)\n", + requestors[id], id, tgt_prio, packetPriorities[id][tgt_prio], readQueueSizes[tgt_prio], writeQueueSizes[tgt_prio]); } @@ -484,21 +484,21 @@ MemCtrl::qosSchedule(std::initializer_list queues, pkt->qosValue(pkt_priority); if (qosSyncroScheduler) { - // Call the scheduling function on all other masters. - for (const auto& m : masters) { + // Call the scheduling function on all other requestors. + for (const auto& requestor : requestors) { - if (m.first == pkt->masterId()) + if (requestor.first == pkt->requestorId()) continue; - uint8_t prio = schedule(m.first, 0); + uint8_t prio = schedule(requestor.first, 0); if (qosPriorityEscalation) { DPRINTF(QOS, "QoSMemCtrl::qosSchedule: (syncro) escalating " - "MASTER %s to assigned priority %d\n", - _system->getMasterName(m.first), + "REQUESTOR %s to assigned priority %d\n", + _system->getRequestorName(requestor.first), prio); - escalate(queues, queue_entry_size, m.first, prio); + escalate(queues, queue_entry_size, requestor.first, prio); } } } @@ -506,10 +506,10 @@ MemCtrl::qosSchedule(std::initializer_list queues, if (qosPriorityEscalation) { DPRINTF(QOS, "QoSMemCtrl::qosSchedule: escalating " - "MASTER %s to assigned priority %d\n", - _system->getMasterName(pkt->masterId()), + "REQUESTOR %s to assigned priority %d\n", + _system->getRequestorName(pkt->requestorId()), pkt_priority); - escalate(queues, queue_entry_size, pkt->masterId(), pkt_priority); + escalate(queues, queue_entry_size, pkt->requestorId(), pkt_priority); } // Update last service tick for selected priority diff --git a/src/mem/qos/mem_sink.cc b/src/mem/qos/mem_sink.cc index dbdf54812..e931429a8 100644 --- a/src/mem/qos/mem_sink.cc +++ b/src/mem/qos/mem_sink.cc @@ -136,9 +136,9 @@ MemSinkCtrl::recvTimingReq(PacketPtr pkt) __func__); DPRINTF(QOS, - "%s: MASTER %s request %s addr %lld size %d\n", + "%s: REQUESTOR %s request %s addr %lld size %d\n", __func__, - _system->getMasterName(pkt->req->masterId()), + _system->getRequestorName(pkt->req->requestorId()), pkt->cmdString(), pkt->getAddr(), pkt->getSize()); uint64_t required_entries = divCeil(pkt->getSize(), memoryPacketSize); @@ -182,7 +182,7 @@ MemSinkCtrl::recvTimingReq(PacketPtr pkt) if (req_accepted) { // The packet is accepted - log it logRequest(pkt->isRead()? READ : WRITE, - pkt->req->masterId(), + pkt->req->requestorId(), pkt->qosValue(), pkt->getAddr(), required_entries); @@ -225,7 +225,7 @@ MemSinkCtrl::processNextReqEvent() for (uint8_t i = 0; i < numPriorities(); ++i) { std::string plist = ""; for (auto& e : (busState == WRITE ? writeQueue[i]: readQueue[i])) { - plist += (std::to_string(e->req->masterId())) + " "; + plist += (std::to_string(e->req->requestorId())) + " "; } DPRINTF(QOS, "%s priority Queue [%i] contains %i elements, " @@ -255,9 +255,9 @@ MemSinkCtrl::processNextReqEvent() queue->erase(p_it); DPRINTF(QOS, - "%s scheduling packet address %d for master %s from " + "%s scheduling packet address %d for requestor %s from " "priority queue %d\n", __func__, pkt->getAddr(), - _system->getMasterName(pkt->req->masterId()), + _system->getRequestorName(pkt->req->requestorId()), curr_prio); break; } @@ -272,9 +272,9 @@ MemSinkCtrl::processNextReqEvent() uint64_t removed_entries = divCeil(pkt->getSize(), memoryPacketSize); DPRINTF(QOS, - "%s scheduled packet address %d for master %s size is %d, " + "%s scheduled packet address %d for requestor %s size is %d, " "corresponds to %d memory packets\n", __func__, pkt->getAddr(), - _system->getMasterName(pkt->req->masterId()), + _system->getRequestorName(pkt->req->requestorId()), pkt->getSize(), removed_entries); // Schedule response @@ -287,7 +287,7 @@ MemSinkCtrl::processNextReqEvent() // Log the response logResponse(pkt->isRead()? READ : WRITE, - pkt->req->masterId(), + pkt->req->requestorId(), pkt->qosValue(), pkt->getAddr(), removed_entries, responseLatency); @@ -348,7 +348,8 @@ MemSinkCtrl::regStats() MemSinkCtrl::MemoryPort::MemoryPort(const std::string& n, MemSinkCtrl& m) - : QueuedSlavePort(n, &m, queue, true), memory(m), queue(memory, *this, true) + : QueuedResponsePort(n, &m, queue, true), + memory(m), queue(memory, *this, true) {} AddrRangeList diff --git a/src/mem/qos/mem_sink.hh b/src/mem/qos/mem_sink.hh index 5f6c1be76..93783ae10 100644 --- a/src/mem/qos/mem_sink.hh +++ b/src/mem/qos/mem_sink.hh @@ -68,7 +68,7 @@ class MemSinkCtrl : public MemCtrl using PacketQueue = std::deque; private: - class MemoryPort : public QueuedSlavePort + class MemoryPort : public QueuedResponsePort { private: /** reference to parent memory object */ @@ -135,11 +135,11 @@ class MemSinkCtrl : public MemCtrl DrainState drain() override; /** - * Getter method to access this memory's slave port + * Getter method to access this memory's response port * * @param if_name interface name * @param idx port ID number - * @return reference to this memory's slave port + * @return reference to this memory's response port */ Port &getPort(const std::string &if_name, PortID=InvalidPortID) override; @@ -164,7 +164,7 @@ class MemSinkCtrl : public MemCtrl /** Write request packets queue buffer size in #packets */ const uint64_t writeBufferSize; - /** Memory slave port */ + /** Memory response port */ MemoryPort port; /** diff --git a/src/mem/qos/policy.cc b/src/mem/qos/policy.cc index b5431d283..93c841d27 100644 --- a/src/mem/qos/policy.cc +++ b/src/mem/qos/policy.cc @@ -51,7 +51,7 @@ uint8_t Policy::schedule(const PacketPtr pkt) { assert(pkt->req); - return schedule(pkt->req->masterId(), pkt->getSize()); + return schedule(pkt->req->requestorId(), pkt->getSize()); } } // namespace QoS diff --git a/src/mem/qos/policy.hh b/src/mem/qos/policy.hh index 3ffe4811d..7bf6c128b 100644 --- a/src/mem/qos/policy.hh +++ b/src/mem/qos/policy.hh @@ -74,31 +74,32 @@ class Policy : public SimObject void setMemCtrl(MemCtrl* mem) { memCtrl = mem; }; /** - * Builds a MasterID/value pair given a master input. - * This will be lookuped in the system list of masters in order - * to retrieve the associated MasterID. - * In case the master name/object cannot be resolved, the pairing + * Builds a RequestorID/value pair given a requestor input. + * This will be looked up in the system list of requestors in order + * to retrieve the associated RequestorID. + * In case the requestor name/object cannot be resolved, the pairing * method will panic. * - * @param master Master to lookup in the system - * @param value Value to be associated with the MasterID - * @return A MasterID/Value pair. + * @param requestor Requestor to lookup in the system + * @param value Value to be associated with the RequestorID + * @return A RequestorID/Value pair. */ - template - std::pair pair(M master, T value); + template + std::pair pair(Requestor requestor, T value); /** * Schedules data - must be defined by derived class * - * @param mId master id to schedule + * @param requestor_id requestor id to schedule * @param data data to schedule * @return QoS priority value */ - virtual uint8_t schedule(const MasterID mId, const uint64_t data) = 0; + virtual uint8_t schedule(const RequestorID requestor_id, + const uint64_t data) = 0; /** * Schedules a packet. Non virtual interface for the scheduling - * method requiring a master ID. + * method requiring a requestor id. * * @param pkt pointer to packet to schedule * @return QoS priority value @@ -110,20 +111,20 @@ class Policy : public SimObject MemCtrl* memCtrl; }; -template -std::pair -Policy::pair(M master, T value) +template +std::pair +Policy::pair(Requestor requestor, T value) { - auto id = memCtrl->system()->lookupMasterId(master); + auto id = memCtrl->system()->lookupRequestorId(requestor); - panic_if(id == Request::invldMasterId, - "Unable to find master %s\n", master); + panic_if(id == Request::invldRequestorId, + "Unable to find requestor %s\n", requestor); DPRINTF(QOS, - "Master %s [id %d] associated with QoS data %d\n", - master, id, value); + "Requestor %s [id %d] associated with QoS data %d\n", + requestor, id, value); - return std::pair(id, value); + return std::pair(id, value); } } // namespace QoS diff --git a/src/mem/qos/policy_fixed_prio.cc b/src/mem/qos/policy_fixed_prio.cc index 449a25d23..d00048b0b 100644 --- a/src/mem/qos/policy_fixed_prio.cc +++ b/src/mem/qos/policy_fixed_prio.cc @@ -59,35 +59,36 @@ FixedPriorityPolicy::init() } void -FixedPriorityPolicy::initMasterName(std::string master, uint8_t priority) +FixedPriorityPolicy::initRequestorName(std::string requestor, uint8_t priority) { priorityMap.insert( - this->pair(master, priority)); + this->pair(requestor, priority)); } void -FixedPriorityPolicy::initMasterObj(const SimObject* master, uint8_t priority) +FixedPriorityPolicy::initRequestorObj(const SimObject* requestor, + uint8_t priority) { priorityMap.insert( - this->pair(master, priority)); + this->pair(requestor, priority)); } uint8_t -FixedPriorityPolicy::schedule(const MasterID mId, const uint64_t data) +FixedPriorityPolicy::schedule(const RequestorID id, const uint64_t data) { - // Reads a packet's MasterID contained in its encapsulated request + // Reads a packet's RequestorID contained in its encapsulated request // if a match is found in the configured priority map, returns the // matching priority, else returns zero - auto ret = priorityMap.find(mId); + auto ret = priorityMap.find(id); if (ret != priorityMap.end()) { return ret->second; } else { - DPRINTF(QOS, "Master %s (MasterID %d) not present in priorityMap, " - "assigning default priority %d\n", - memCtrl->system()->getMasterName(mId), - mId, defaultPriority); + DPRINTF(QOS, "Requestor %s (RequestorID %d) not present in " + "priorityMap, assigning default priority %d\n", + memCtrl->system()->getRequestorName(id), + id, defaultPriority); return defaultPriority; } } diff --git a/src/mem/qos/policy_fixed_prio.hh b/src/mem/qos/policy_fixed_prio.hh index 08f46a495..b25c34b35 100644 --- a/src/mem/qos/policy_fixed_prio.hh +++ b/src/mem/qos/policy_fixed_prio.hh @@ -48,8 +48,8 @@ namespace QoS { /** * Fixed Priority QoS Policy * - * Fixed Priority Policy: based on a configured MasterID to priority map, - * it returns a fixed QoS priority value: every master has a fixed priority. + * Fixed Priority Policy: based on a configured RequestorID to priority map, + * it returns a fixed QoS priority value: every requestor has a fixed priority. */ class FixedPriorityPolicy : public Policy { @@ -62,42 +62,42 @@ class FixedPriorityPolicy : public Policy void init() override; /** - * Initialize the fixed master's priority by providing - * the master's name and priority value. - * The master's name has to match a name in the system. + * Initialize the fixed requestor's priority by providing + * the requestor's name and priority value. + * The requestor's name has to match a name in the system. * - * @param master master's name to lookup. - * @param priority priority value for the master + * @param requestor requestor's name to lookup. + * @param priority priority value for the requestor */ - void initMasterName(std::string master, uint8_t priority); + void initRequestorName(std::string requestor, uint8_t priority); /** - * Initialize the fixed master's priority by providing - * the master's SimObject pointer and priority value. + * Initialize the fixed requestor's priority by providing + * the requestor's SimObject pointer and priority value. * - * @param master master's SimObject pointer to lookup. - * @param priority priority value for the master + * @param requestor requestor's SimObject pointer to lookup. + * @param priority priority value for the requestor */ - void initMasterObj(const SimObject* master, uint8_t priority); + void initRequestorObj(const SimObject* requestor, uint8_t priority); /** * Schedules a packet based on fixed priority configuration * - * @param mId master id to schedule + * @param id requestor id to schedule * @param data data to schedule * @return QoS priority value */ - virtual uint8_t schedule(const MasterID, const uint64_t) override; + virtual uint8_t schedule(const RequestorID, const uint64_t) override; protected: - /** Default fixed priority value for non-listed masters */ + /** Default fixed priority value for non-listed requestors */ const uint8_t defaultPriority; /** - * Priority map, associates configured masters with + * Priority map, associates configured requestors with * a fixed QoS priority value */ - std::map priorityMap; + std::map priorityMap; }; } // namespace QoS diff --git a/src/mem/qos/policy_pf.cc b/src/mem/qos/policy_pf.cc index 58c4e3896..47e2096a5 100644 --- a/src/mem/qos/policy_pf.cc +++ b/src/mem/qos/policy_pf.cc @@ -53,32 +53,32 @@ PropFairPolicy::PropFairPolicy(const Params* p) PropFairPolicy::~PropFairPolicy() {} -template +template void -PropFairPolicy::initMaster(const Master master, const double score) +PropFairPolicy::initRequestor(const Requestor requestor, const double score) { - MasterID m_id = memCtrl->system()->lookupMasterId(master); + RequestorID id = memCtrl->system()->lookupRequestorId(requestor); - assert(m_id != Request::invldMasterId); + assert(id != Request::invldRequestorId); - // Setting the Initial score for the selected master. - history.push_back(std::make_pair(m_id, score)); + // Setting the Initial score for the selected requestor. + history.push_back(std::make_pair(id, score)); fatal_if(history.size() > memCtrl->numPriorities(), - "Policy's maximum number of masters is currently dictated " + "Policy's maximum number of requestors is currently dictated " "by the maximum number of priorities\n"); } void -PropFairPolicy::initMasterName(const std::string master, const double score) +PropFairPolicy::initRequestorName(const std::string requestor, const double score) { - initMaster(master, score); + initRequestor(requestor, score); } void -PropFairPolicy::initMasterObj(const SimObject* master, const double score) +PropFairPolicy::initRequestorObj(const SimObject* requestor, const double score) { - initMaster(master, score); + initRequestor(requestor, score); } double @@ -89,10 +89,10 @@ PropFairPolicy::updateScore( } uint8_t -PropFairPolicy::schedule(const MasterID pkt_mid, const uint64_t pkt_size) +PropFairPolicy::schedule(const RequestorID pkt_id, const uint64_t pkt_size) { auto sort_pred = - [] (const MasterHistory& lhs, const MasterHistory& rhs) + [] (const RequestorHistory& lhs, const RequestorHistory& rhs) { return lhs.second > rhs.second; }; // Sorting in reverse in base of personal history: @@ -105,10 +105,10 @@ PropFairPolicy::schedule(const MasterID pkt_mid, const uint64_t pkt_size) uint8_t pkt_priority = 0; for (auto m_hist = history.begin(); m_hist != history.end(); m_hist++) { - MasterID curr_mid = m_hist->first; + RequestorID curr_id = m_hist->first; double& curr_score = m_hist->second; - if (curr_mid == pkt_mid) { + if (curr_id == pkt_id) { // The qos priority is the position in the sorted vector. pkt_priority = std::distance(history.begin(), m_hist); diff --git a/src/mem/qos/policy_pf.hh b/src/mem/qos/policy_pf.hh index c741e9ca7..429e85bcb 100644 --- a/src/mem/qos/policy_pf.hh +++ b/src/mem/qos/policy_pf.hh @@ -67,38 +67,38 @@ class PropFairPolicy : public Policy virtual ~PropFairPolicy(); /** - * Initialize the master's score by providing - * the master's name and initial score value. - * The master's name has to match a name in the system. + * Initialize the requestor's score by providing + * the requestor's name and initial score value. + * The requestor's name has to match a name in the system. * - * @param master master's name to lookup. - * @param score initial score value for the master + * @param requestor requestor's name to lookup. + * @param score initial score value for the requestor */ - void initMasterName(const std::string master, const double score); + void initRequestorName(const std::string requestor, const double score); /** - * Initialize the master's score by providing - * the master's SimObject pointer and initial score value. - * The master's pointer has to match a master in the system. + * Initialize the requestor's score by providing + * the requestor's SimObject pointer and initial score value. + * The requestor's pointer has to match a requestor in the system. * - * @param master master's SimObject pointer to lookup. - * @param score initial score value for the master + * @param requestor requestor's SimObject pointer to lookup. + * @param score initial score value for the requestor */ - void initMasterObj(const SimObject* master, const double score); + void initRequestorObj(const SimObject* requestor, const double score); /** * Schedules a packet based on proportional fair configuration * - * @param m_id master id to schedule + * @param id requestor id to schedule * @param pkt_size size of the packet * @return QoS priority value */ virtual uint8_t - schedule(const MasterID m_id, const uint64_t pkt_size) override; + schedule(const RequestorID id, const uint64_t pkt_size) override; protected: - template - void initMaster(const Master master, const double score); + template + void initRequestor(const Requestor requestor, const double score); inline double updateScore(const double old_score, const uint64_t served_bytes) const; @@ -107,9 +107,9 @@ class PropFairPolicy : public Policy /** PF Policy weight */ const double weight; - /** history is keeping track of every master's score */ - using MasterHistory = std::pair; - std::vector history; + /** history is keeping track of every requestor's score */ + using RequestorHistory = std::pair; + std::vector history; }; } // namespace QoS diff --git a/src/mem/qos/q_policy.cc b/src/mem/qos/q_policy.cc index 88ce95d0a..df16c5176 100644 --- a/src/mem/qos/q_policy.cc +++ b/src/mem/qos/q_policy.cc @@ -67,8 +67,8 @@ LrgQueuePolicy::selectPacket(PacketQueue* q) { QueuePolicy::PacketQueue::iterator ret = q->end(); - // Tracks one packet per master in the queue - std::unordered_map track; + // Tracks one packet per requestor in the queue + std::unordered_map track; // Cycle queue only once for (auto pkt_it = q->begin(); pkt_it != q->end(); ++pkt_it) { @@ -78,51 +78,51 @@ LrgQueuePolicy::selectPacket(PacketQueue* q) panic_if(!pkt->req, "QoSQPolicy::lrg detected packet without request"); - // Get Request MasterID - MasterID m_id = pkt->req->masterId(); + // Get Request RequestorID + RequestorID requestor_id = pkt->req->requestorId(); DPRINTF(QOS, "QoSQPolicy::lrg checking packet " - "from queue with id %d\n", m_id); + "from queue with id %d\n", requestor_id); - // Check if this is a known master. - panic_if(memCtrl->hasMaster(m_id), - "%s: Unrecognized Master\n", __func__); + // Check if this is a known requestor. + panic_if(memCtrl->hasRequestor(requestor_id), + "%s: Unrecognized Requestor\n", __func__); panic_if(toServe.size() > 0, "%s: toServe list is empty\n", __func__); - if (toServe.front() == m_id) { + if (toServe.front() == requestor_id) { DPRINTF(QOS, "QoSQPolicy::lrg matched to served " - "master id %d\n", m_id); - // This packet matches the MasterID to be served next + "requestor id %d\n", requestor_id); + // This packet matches the RequestorID to be served next // move toServe front to back - toServe.push_back(m_id); + toServe.push_back(requestor_id); toServe.pop_front(); return pkt_it; } - // The master generating the packet is not first in the toServe list - // (Doesn't have the highest priority among masters) - // Check if this is the first packet seen with its master ID + // The requestor generating the packet is not first in the toServe list + // (Doesn't have the highest priority among requestors) + // Check if this is the first packet seen with its requestor id // and remember it. Then keep looping over the remaining packets // in the queue. - if (track.find(m_id) == track.end()) { - track[m_id] = pkt_it; + if (track.find(requestor_id) == track.end()) { + track[requestor_id] = pkt_it; DPRINTF(QOS, "QoSQPolicy::lrg tracking a packet for " - "master id %d\n", m_id); + "requestor id %d\n", requestor_id); } } - // If here, the current master to be serviced doesn't have a pending - // packet in the queue: look for the next master in the list. - for (const auto& masterId : toServe) { + // If here, the current requestor to be serviced doesn't have a pending + // packet in the queue: look for the next requestor in the list. + for (const auto& requestorId : toServe) { DPRINTF(QOS, "QoSQPolicy::lrg evaluating alternative " - "master id %d\n", masterId); + "requestor id %d\n", requestorId); - if (track.find(masterId) != track.end()) { - ret = track[masterId]; - DPRINTF(QOS, "QoSQPolicy::lrg master id " - "%d selected for service\n", masterId); + if (track.find(requestorId) != track.end()) { + ret = track[requestorId]; + DPRINTF(QOS, "QoSQPolicy::lrg requestor id " + "%d selected for service\n", requestorId); return ret; } @@ -138,9 +138,9 @@ LrgQueuePolicy::selectPacket(PacketQueue* q) void LrgQueuePolicy::enqueuePacket(PacketPtr pkt) { - MasterID m_id = pkt->masterId(); - if (!memCtrl->hasMaster(m_id)) { - toServe.push_back(m_id); + RequestorID requestor_id = pkt->requestorId(); + if (!memCtrl->hasRequestor(requestor_id)) { + toServe.push_back(requestor_id); } }; diff --git a/src/mem/qos/q_policy.hh b/src/mem/qos/q_policy.hh index ba36b43b7..f932b9114 100644 --- a/src/mem/qos/q_policy.hh +++ b/src/mem/qos/q_policy.hh @@ -155,7 +155,7 @@ class FifoQueuePolicy : public QueuePolicy /** * Least Recently Granted Queue Policy * It selects packets from the queue with a round - * robin-like policy: using the master id as a switching + * robin-like policy: using the requestor id as a switching * parameter rather than switching over a time quantum. */ class LrgQueuePolicy : public QueuePolicy @@ -179,10 +179,10 @@ class LrgQueuePolicy : public QueuePolicy protected: /** * Support structure for lrg algorithms: - * keeps track of serviced masters, + * keeps track of serviced requestors, * always serve the front element. */ - std::list toServe; + std::list toServe; }; } // namespace QoS diff --git a/src/mem/qport.hh b/src/mem/qport.hh index 97a206548..a70fbd3fe 100644 --- a/src/mem/qport.hh +++ b/src/mem/qport.hh @@ -51,11 +51,11 @@ * A queued port is a port that has an infinite queue for outgoing * packets and thus decouples the module that wants to send * request/responses from the flow control (retry mechanism) of the - * port. A queued port can be used by both a master and a slave. The + * port. A queued port can be used by both a requestor and a responder. The * queue is a parameter to allow tailoring of the queue implementation * (used in the cache). */ -class QueuedSlavePort : public ResponsePort +class QueuedResponsePort : public ResponsePort { protected: @@ -74,12 +74,12 @@ class QueuedSlavePort : public ResponsePort * behaviuor in a subclass, and provide the latter to the * QueuePort constructor. */ - QueuedSlavePort(const std::string& name, SimObject* owner, + QueuedResponsePort(const std::string& name, SimObject* owner, RespPacketQueue &resp_queue, PortID id = InvalidPortID) : ResponsePort(name, owner, id), respQueue(resp_queue) { } - virtual ~QueuedSlavePort() { } + virtual ~QueuedResponsePort() { } /** * Schedule the sending of a timing response. @@ -97,13 +97,13 @@ class QueuedSlavePort : public ResponsePort }; /** - * The QueuedMasterPort combines two queues, a request queue and a + * The QueuedRequestPort combines two queues, a request queue and a * snoop response queue, that both share the same port. The flow * control for requests and snoop responses are completely * independent, and so each queue manages its own flow control * (retries). */ -class QueuedMasterPort : public RequestPort +class QueuedRequestPort : public RequestPort { protected: @@ -127,7 +127,7 @@ class QueuedMasterPort : public RequestPort * behaviuor in a subclass, and provide the latter to the * QueuePort constructor. */ - QueuedMasterPort(const std::string& name, SimObject* owner, + QueuedRequestPort(const std::string& name, SimObject* owner, ReqPacketQueue &req_queue, SnoopRespPacketQueue &snoop_resp_queue, PortID id = InvalidPortID) : @@ -135,7 +135,7 @@ class QueuedMasterPort : public RequestPort snoopRespQueue(snoop_resp_queue) { } - virtual ~QueuedMasterPort() { } + virtual ~QueuedRequestPort() { } /** * Schedule the sending of a timing request. diff --git a/src/mem/request.hh b/src/mem/request.hh index 7f0ddcbd4..43f54e64a 100644 --- a/src/mem/request.hh +++ b/src/mem/request.hh @@ -82,7 +82,7 @@ class Request; class ThreadContext; typedef std::shared_ptr RequestPtr; -typedef uint16_t MasterID; +typedef uint16_t RequestorID; class Request { @@ -236,23 +236,23 @@ class Request static const FlagsType HTM_CMD = HTM_START | HTM_COMMIT | HTM_CANCEL | HTM_ABORT; - /** Master Ids that are statically allocated + /** Requestor Ids that are statically allocated * @{*/ - enum : MasterID { - /** This master id is used for writeback requests by the caches */ - wbMasterId = 0, + enum : RequestorID { + /** This requestor id is used for writeback requests by the caches */ + wbRequestorId = 0, /** - * This master id is used for functional requests that + * This requestor id is used for functional requests that * don't come from a particular device */ - funcMasterId = 1, - /** This master id is used for message signaled interrupts */ - intMasterId = 2, + funcRequestorId = 1, + /** This requestor id is used for message signaled interrupts */ + intRequestorId = 2, /** - * Invalid master id for assertion checking only. It is + * Invalid requestor id for assertion checking only. It is * invalid behavior to ever send this id as part of a request. */ - invldMasterId = std::numeric_limits::max() + invldRequestorId = std::numeric_limits::max() }; /** @} */ @@ -347,7 +347,7 @@ class Request /** The requestor ID which is unique in the system for all ports * that are capable of issuing a transaction */ - MasterID _masterId = invldMasterId; + RequestorID _requestorId = invldRequestorId; /** Flag structure for the request. */ Flags _flags; @@ -427,25 +427,25 @@ class Request * just physical address, size, flags, and timestamp (to curTick()). * These fields are adequate to perform a request. */ - Request(Addr paddr, unsigned size, Flags flags, MasterID mid) : - _paddr(paddr), _size(size), _masterId(mid), _time(curTick()) + Request(Addr paddr, unsigned size, Flags flags, RequestorID id) : + _paddr(paddr), _size(size), _requestorId(id), _time(curTick()) { _flags.set(flags); privateFlags.set(VALID_PADDR|VALID_SIZE); } Request(Addr vaddr, unsigned size, Flags flags, - MasterID mid, Addr pc, ContextID cid, + RequestorID id, Addr pc, ContextID cid, AtomicOpFunctorPtr atomic_op=nullptr) { - setVirt(vaddr, size, flags, mid, pc, std::move(atomic_op)); + setVirt(vaddr, size, flags, id, pc, std::move(atomic_op)); setContext(cid); } Request(const Request& other) : _paddr(other._paddr), _size(other._size), _byteEnable(other._byteEnable), - _masterId(other._masterId), + _requestorId(other._requestorId), _flags(other._flags), _cacheCoherenceFlags(other._cacheCoherenceFlags), privateFlags(other.privateFlags), @@ -493,12 +493,12 @@ class Request * allocated Request object. */ void - setVirt(Addr vaddr, unsigned size, Flags flags, MasterID mid, Addr pc, + setVirt(Addr vaddr, unsigned size, Flags flags, RequestorID id, Addr pc, AtomicOpFunctorPtr amo_op=nullptr) { _vaddr = vaddr; _size = size; - _masterId = mid; + _requestorId = id; _pc = pc; _time = curTick(); @@ -737,10 +737,10 @@ class Request } /** Accesssor for the requestor id. */ - MasterID - masterId() const + RequestorID + requestorId() const { - return _masterId; + return _requestorId; } uint32_t diff --git a/src/mem/ruby/network/MessageBuffer.py b/src/mem/ruby/network/MessageBuffer.py index c796960cc..a0a208fca 100644 --- a/src/mem/ruby/network/MessageBuffer.py +++ b/src/mem/ruby/network/MessageBuffer.py @@ -40,5 +40,7 @@ class MessageBuffer(SimObject): random delays if RubySystem \ randomization flag is True)") - master = RequestPort("Master port to MessageBuffer receiver") - slave = ResponsePort("Slave port from MessageBuffer sender") + out_port = RequestPort("Request port to MessageBuffer receiver") + master = DeprecatedParam(out_port, '`master` is now called `out_port`') + in_port = ResponsePort("Response port from MessageBuffer sender") + slave = DeprecatedParam(in_port, '`slave` is now called `in_port`') diff --git a/src/mem/ruby/network/Network.py b/src/mem/ruby/network/Network.py index 5acad60b6..8999ff1cd 100644 --- a/src/mem/ruby/network/Network.py +++ b/src/mem/ruby/network/Network.py @@ -49,5 +49,7 @@ class RubyNetwork(ClockedObject): ext_links = VectorParam.BasicExtLink("Links to external nodes") int_links = VectorParam.BasicIntLink("Links between internal nodes") - slave = VectorSlavePort("CPU slave port") - master = VectorMasterPort("CPU master port") + in_port = VectorResponsePort("CPU input port") + slave = DeprecatedParam(in_port, '`slave` is now called `in_port`') + out_port = VectorRequestPort("CPU output port") + master = DeprecatedParam(out_port, '`master` is now called `out_port`') diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc index dd3a9e7aa..5d9e5f680 100644 --- a/src/mem/ruby/slicc_interface/AbstractController.cc +++ b/src/mem/ruby/slicc_interface/AbstractController.cc @@ -50,7 +50,7 @@ AbstractController::AbstractController(const Params *p) : ClockedObject(p), Consumer(this), m_version(p->version), m_clusterID(p->cluster_id), - m_masterId(p->system->getMasterId(this)), m_is_blocking(false), + m_id(p->system->getRequestorId(this)), m_is_blocking(false), m_number_of_TBEs(p->number_of_TBEs), m_transitions_per_cycle(p->transitions_per_cycle), m_buffer_size(p->buffer_size), m_recycle_latency(p->recycle_latency), @@ -219,7 +219,7 @@ AbstractController::serviceMemoryQueue() } RequestPtr req - = std::make_shared(mem_msg->m_addr, req_size, 0, m_masterId); + = std::make_shared(mem_msg->m_addr, req_size, 0, m_id); PacketPtr pkt; if (mem_msg->getType() == MemoryRequestType_MEMORY_WB) { pkt = Packet::createWrite(req); diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh index daa52da62..98cb0a785 100644 --- a/src/mem/ruby/slicc_interface/AbstractController.hh +++ b/src/mem/ruby/slicc_interface/AbstractController.hh @@ -147,7 +147,7 @@ class AbstractController : public ClockedObject, public Consumer public: MachineID getMachineID() const { return m_machineID; } - MasterID getMasterId() const { return m_masterId; } + RequestorID getRequestorId() const { return m_id; } Stats::Histogram& getDelayHist() { return m_delayHistogram; } Stats::Histogram& getDelayVCHist(uint32_t index) @@ -185,8 +185,8 @@ class AbstractController : public ClockedObject, public Consumer MachineID m_machineID; const NodeID m_clusterID; - // MasterID used by some components of gem5. - const MasterID m_masterId; + // RequestorID used by some components of gem5. + const RequestorID m_id; Network *m_net_ptr; bool m_is_blocking; @@ -237,7 +237,7 @@ class AbstractController : public ClockedObject, public Consumer void recvReqRetry(); }; - /* Master port to the memory controller. */ + /* Request port to the memory controller. */ MemoryPort memoryPort; // State that is stored in packets sent to the memory controller. diff --git a/src/mem/ruby/system/CacheRecorder.cc b/src/mem/ruby/system/CacheRecorder.cc index 1fc7bb8ef..3fb5c2fdb 100644 --- a/src/mem/ruby/system/CacheRecorder.cc +++ b/src/mem/ruby/system/CacheRecorder.cc @@ -87,7 +87,7 @@ CacheRecorder::enqueueNextFlushRequest() m_records_flushed++; auto req = std::make_shared(rec->m_data_address, m_block_size_bytes, 0, - Request::funcMasterId); + Request::funcRequestorId); MemCmd::Command requestType = MemCmd::FlushReq; Packet *pkt = new Packet(req, requestType); @@ -119,18 +119,20 @@ CacheRecorder::enqueueNextFetchRequest() requestType = MemCmd::ReadReq; req = std::make_shared( traceRecord->m_data_address + rec_bytes_read, - RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); + RubySystem::getBlockSizeBytes(), 0, + Request::funcRequestorId); } else if (traceRecord->m_type == RubyRequestType_IFETCH) { requestType = MemCmd::ReadReq; req = std::make_shared( traceRecord->m_data_address + rec_bytes_read, RubySystem::getBlockSizeBytes(), - Request::INST_FETCH, Request::funcMasterId); + Request::INST_FETCH, Request::funcRequestorId); } else { requestType = MemCmd::WriteReq; req = std::make_shared( traceRecord->m_data_address + rec_bytes_read, - RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); + RubySystem::getBlockSizeBytes(), 0, + Request::funcRequestorId); } Packet *pkt = new Packet(req, requestType); diff --git a/src/mem/ruby/system/DMASequencer.cc b/src/mem/ruby/system/DMASequencer.cc index bad49c97d..938044a74 100644 --- a/src/mem/ruby/system/DMASequencer.cc +++ b/src/mem/ruby/system/DMASequencer.cc @@ -57,8 +57,8 @@ DMASequencer::init() RubyPort::init(); m_data_block_mask = mask(RubySystem::getBlockSizeBits()); - for (const auto &s_port : slave_ports) - s_port->sendRangeChange(); + for (const auto &response_port : response_ports) + response_port->sendRangeChange(); } RequestStatus diff --git a/src/mem/ruby/system/GPUCoalescer.cc b/src/mem/ruby/system/GPUCoalescer.cc index 80bc19a64..03c392f7f 100644 --- a/src/mem/ruby/system/GPUCoalescer.cc +++ b/src/mem/ruby/system/GPUCoalescer.cc @@ -669,7 +669,7 @@ GPUCoalescer::coalescePacket(PacketPtr pkt) // back the requesting CU when we receive write // complete callbacks for all issued Ruby requests of this // instruction. - RubyPort::MemSlavePort* mem_slave_port = ss->port; + RubyPort::MemResponsePort* mem_response_port = ss->port; GPUDynInstPtr gpuDynInst = nullptr; @@ -686,7 +686,8 @@ GPUCoalescer::coalescePacket(PacketPtr pkt) } PendingWriteInst& inst = pendingWriteInsts[seqNum]; - inst.addPendingReq(mem_slave_port, gpuDynInst, m_usingRubyTester); + inst.addPendingReq(mem_response_port, gpuDynInst, + m_usingRubyTester); } return true; @@ -783,7 +784,7 @@ GPUCoalescer::completeHitCallback(std::vector & mylist) for (auto& pkt : mylist) { RubyPort::SenderState *ss = safe_cast(pkt->senderState); - MemSlavePort *port = ss->port; + MemResponsePort *port = ss->port; assert(port != NULL); pkt->senderState = ss->predecessor; diff --git a/src/mem/ruby/system/GPUCoalescer.hh b/src/mem/ruby/system/GPUCoalescer.hh index 401f70bd4..3b1b7af2b 100644 --- a/src/mem/ruby/system/GPUCoalescer.hh +++ b/src/mem/ruby/system/GPUCoalescer.hh @@ -120,7 +120,7 @@ class CoalescedRequest // PendingWriteInst tracks the number of outstanding Ruby requests // per write instruction. Once all requests associated with one instruction -// are completely done in Ruby, we call back the requester to mark +// are completely done in Ruby, we call back the requestor to mark // that this instruction is complete. class PendingWriteInst { @@ -135,7 +135,7 @@ class PendingWriteInst {} void - addPendingReq(RubyPort::MemSlavePort* port, GPUDynInstPtr inst, + addPendingReq(RubyPort::MemResponsePort* port, GPUDynInstPtr inst, bool usingRubyTester) { assert(port); @@ -157,7 +157,7 @@ class PendingWriteInst return (numPendingStores == 0) ? true : false; } - // ack the original requester that this write instruction is complete + // ack the original requestor that this write instruction is complete void ackWriteCompletion(bool usingRubyTester) { @@ -175,7 +175,7 @@ class PendingWriteInst pkt->senderState = ss; } - // send the ack response to the requester + // send the ack response to the requestor originalPort->sendTimingResp(pkt); } @@ -192,7 +192,7 @@ class PendingWriteInst // which implies multiple ports per instruction. However, we need // only 1 of the ports to call back the CU. Therefore, here we keep // track the port that sent the first packet of this instruction. - RubyPort::MemSlavePort* originalPort; + RubyPort::MemResponsePort* originalPort; // similar to the originalPort, this gpuDynInstPtr is set only for // the first packet of this instruction. GPUDynInstPtr gpuDynInstPtr; @@ -201,12 +201,12 @@ class PendingWriteInst class GPUCoalescer : public RubyPort { public: - class GMTokenPort : public TokenSlavePort + class GMTokenPort : public TokenResponsePort { public: GMTokenPort(const std::string& name, ClockedObject *owner, PortID id = InvalidPortID) - : TokenSlavePort(name, owner, id) + : TokenResponsePort(name, owner, id) { } ~GMTokenPort() { } diff --git a/src/mem/ruby/system/GPUCoalescer.py b/src/mem/ruby/system/GPUCoalescer.py index 9d4a76bdd..0bb5628ac 100644 --- a/src/mem/ruby/system/GPUCoalescer.py +++ b/src/mem/ruby/system/GPUCoalescer.py @@ -53,4 +53,4 @@ class RubyGPUCoalescer(RubyPort): "deadlock/livelock declared") garnet_standalone = Param.Bool(False, "") - gmTokenPort = SlavePort("Port to the CU for sharing tokens") + gmTokenPort = ResponsePort("Port to the CU for sharing tokens") diff --git a/src/mem/ruby/system/HTMSequencer.cc b/src/mem/ruby/system/HTMSequencer.cc index d2cfa0751..87bc7d74e 100644 --- a/src/mem/ruby/system/HTMSequencer.cc +++ b/src/mem/ruby/system/HTMSequencer.cc @@ -227,7 +227,7 @@ HTMSequencer::rubyHtmCallback(PacketPtr pkt, RubyPort::SenderState *senderState = safe_cast(pkt->popSenderState()); - MemSlavePort *port = safe_cast(senderState->port); + MemResponsePort *port = safe_cast(senderState->port); assert(port != nullptr); delete senderState; @@ -237,7 +237,7 @@ HTMSequencer::rubyHtmCallback(PacketPtr pkt, pkt->req->isHTMStart(), pkt->req->isHTMCommit(), pkt->req->isHTMCancel(), htm_return_code); - // turn packet around to go back to requester if response expected + // turn packet around to go back to requestor if response expected if (pkt->needsResponse()) { DPRINTF(RubyPort, "Sending packet back over port\n"); pkt->makeHtmTransactionalReqResponse( diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc index bb86e6038..4fc41c973 100644 --- a/src/mem/ruby/system/RubyPort.cc +++ b/src/mem/ruby/system/RubyPort.cc @@ -55,28 +55,29 @@ RubyPort::RubyPort(const Params *p) : ClockedObject(p), m_ruby_system(p->ruby_system), m_version(p->version), m_controller(NULL), m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester), system(p->system), - pioMasterPort(csprintf("%s.pio-master-port", name()), this), - pioSlavePort(csprintf("%s.pio-slave-port", name()), this), - memMasterPort(csprintf("%s.mem-master-port", name()), this), - memSlavePort(csprintf("%s-mem-slave-port", name()), this, + pioRequestPort(csprintf("%s.pio-request-port", name()), this), + pioResponsePort(csprintf("%s.pio-response-port", name()), this), + memRequestPort(csprintf("%s.mem-request-port", name()), this), + memResponsePort(csprintf("%s-mem-response-port", name()), this, p->ruby_system->getAccessBackingStore(), -1, p->no_retry_on_stall), - gotAddrRanges(p->port_master_connection_count), + gotAddrRanges(p->port_request_ports_connection_count), m_isCPUSequencer(p->is_cpu_sequencer) { assert(m_version != -1); - // create the slave ports based on the number of connected ports - for (size_t i = 0; i < p->port_slave_connection_count; ++i) { - slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(), - i), this, p->ruby_system->getAccessBackingStore(), + // create the response ports based on the number of connected ports + for (size_t i = 0; i < p->port_response_ports_connection_count; ++i) { + response_ports.push_back(new MemResponsePort(csprintf + ("%s.response_ports%d", name(), i), this, + p->ruby_system->getAccessBackingStore(), i, p->no_retry_on_stall)); } - // create the master ports based on the number of connected ports - for (size_t i = 0; i < p->port_master_connection_count; ++i) { - master_ports.push_back(new PioMasterPort(csprintf("%s.master%d", - name(), i), this)); + // create the request ports based on the number of connected ports + for (size_t i = 0; i < p->port_request_ports_connection_count; ++i) { + request_ports.push_back(new PioRequestPort(csprintf( + "%s.request_ports%d", name(), i), this)); } } @@ -90,82 +91,84 @@ RubyPort::init() Port & RubyPort::getPort(const std::string &if_name, PortID idx) { - if (if_name == "mem_master_port") { - return memMasterPort; - } else if (if_name == "pio_master_port") { - return pioMasterPort; - } else if (if_name == "mem_slave_port") { - return memSlavePort; - } else if (if_name == "pio_slave_port") { - return pioSlavePort; - } else if (if_name == "master") { + if (if_name == "mem_request_port") { + return memRequestPort; + } else if (if_name == "pio_request_port") { + return pioRequestPort; + } else if (if_name == "mem_response_port") { + return memResponsePort; + } else if (if_name == "pio_response_port") { + return pioResponsePort; + } else if (if_name == "request_ports") { // used by the x86 CPUs to connect the interrupt PIO and interrupt - // slave port - if (idx >= static_cast(master_ports.size())) { - panic("RubyPort::getPort master: unknown index %d\n", idx); + // response port + if (idx >= static_cast(request_ports.size())) { + panic("%s: unknown %s index (%d)\n", __func__, if_name, idx); } - return *master_ports[idx]; - } else if (if_name == "slave") { + return *request_ports[idx]; + } else if (if_name == "response_ports") { // used by the CPUs to connect the caches to the interconnect, and - // for the x86 case also the interrupt master - if (idx >= static_cast(slave_ports.size())) { - panic("RubyPort::getPort slave: unknown index %d\n", idx); + // for the x86 case also the interrupt request port + if (idx >= static_cast(response_ports.size())) { + panic("%s: unknown %s index (%d)\n", __func__, if_name, idx); } - return *slave_ports[idx]; + return *response_ports[idx]; } // pass it along to our super class return ClockedObject::getPort(if_name, idx); } -RubyPort::PioMasterPort::PioMasterPort(const std::string &_name, +RubyPort::PioRequestPort::PioRequestPort(const std::string &_name, RubyPort *_port) - : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue), + : QueuedRequestPort(_name, _port, reqQueue, snoopRespQueue), reqQueue(*_port, *this), snoopRespQueue(*_port, *this) { - DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name); + DPRINTF(RubyPort, "Created request pioport on sequencer %s\n", _name); } -RubyPort::PioSlavePort::PioSlavePort(const std::string &_name, +RubyPort::PioResponsePort::PioResponsePort(const std::string &_name, RubyPort *_port) - : QueuedSlavePort(_name, _port, queue), queue(*_port, *this) + : QueuedResponsePort(_name, _port, queue), queue(*_port, *this) { - DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name); + DPRINTF(RubyPort, "Created response pioport on sequencer %s\n", _name); } -RubyPort::MemMasterPort::MemMasterPort(const std::string &_name, +RubyPort::MemRequestPort::MemRequestPort(const std::string &_name, RubyPort *_port) - : QueuedMasterPort(_name, _port, reqQueue, snoopRespQueue), + : QueuedRequestPort(_name, _port, reqQueue, snoopRespQueue), reqQueue(*_port, *this), snoopRespQueue(*_port, *this) { - DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name); + DPRINTF(RubyPort, "Created request memport on ruby sequencer %s\n", _name); } -RubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port, +RubyPort:: +MemResponsePort::MemResponsePort(const std::string &_name, RubyPort *_port, bool _access_backing_store, PortID id, bool _no_retry_on_stall) - : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this), + : QueuedResponsePort(_name, _port, queue, id), queue(*_port, *this), access_backing_store(_access_backing_store), no_retry_on_stall(_no_retry_on_stall) { - DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name); + DPRINTF(RubyPort, "Created response memport on ruby sequencer %s\n", + _name); } bool -RubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt) +RubyPort::PioRequestPort::recvTimingResp(PacketPtr pkt) { RubyPort *rp = static_cast(&owner); DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr()); // send next cycle - rp->pioSlavePort.schedTimingResp( + rp->pioResponsePort.schedTimingResp( pkt, curTick() + rp->m_ruby_system->clockPeriod()); return true; } -bool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt) +bool RubyPort::MemRequestPort::recvTimingResp(PacketPtr pkt) { // got a response from a device assert(pkt->isResponse()); @@ -174,7 +177,7 @@ bool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt) // First we must retrieve the request port from the sender State RubyPort::SenderState *senderState = safe_cast(pkt->popSenderState()); - MemSlavePort *port = senderState->port; + MemResponsePort *port = senderState->port; assert(port != NULL); delete senderState; @@ -191,18 +194,18 @@ bool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt) } bool -RubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt) +RubyPort::PioResponsePort::recvTimingReq(PacketPtr pkt) { RubyPort *ruby_port = static_cast(&owner); - for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) { - AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges(); + for (size_t i = 0; i < ruby_port->request_ports.size(); ++i) { + AddrRangeList l = ruby_port->request_ports[i]->getAddrRanges(); for (auto it = l.begin(); it != l.end(); ++it) { if (it->contains(pkt->getAddr())) { // generally it is not safe to assume success here as // the port could be blocked bool M5_VAR_USED success = - ruby_port->master_ports[i]->sendTimingReq(pkt); + ruby_port->request_ports[i]->sendTimingReq(pkt); assert(success); return true; } @@ -212,7 +215,7 @@ RubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt) } Tick -RubyPort::PioSlavePort::recvAtomic(PacketPtr pkt) +RubyPort::PioResponsePort::recvAtomic(PacketPtr pkt) { RubyPort *ruby_port = static_cast(&owner); // Only atomic_noncaching mode supported! @@ -220,11 +223,11 @@ RubyPort::PioSlavePort::recvAtomic(PacketPtr pkt) panic("Ruby supports atomic accesses only in noncaching mode\n"); } - for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) { - AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges(); + for (size_t i = 0; i < ruby_port->request_ports.size(); ++i) { + AddrRangeList l = ruby_port->request_ports[i]->getAddrRanges(); for (auto it = l.begin(); it != l.end(); ++it) { if (it->contains(pkt->getAddr())) { - return ruby_port->master_ports[i]->sendAtomic(pkt); + return ruby_port->request_ports[i]->sendAtomic(pkt); } } } @@ -232,7 +235,7 @@ RubyPort::PioSlavePort::recvAtomic(PacketPtr pkt) } bool -RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt) +RubyPort::MemResponsePort::recvTimingReq(PacketPtr pkt) { DPRINTF(RubyPort, "Timing request for address %#x on port %d\n", pkt->getAddr(), id); @@ -255,7 +258,7 @@ RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt) if (pkt->cmd != MemCmd::MemSyncReq) { if (!isPhysMemAddress(pkt)) { assert(!pkt->req->isHTMCmd()); - assert(ruby_port->memMasterPort.isConnected()); + assert(ruby_port->memRequestPort.isConnected()); DPRINTF(RubyPort, "Request address %#x assumed to be a " "pio address\n", pkt->getAddr()); @@ -265,7 +268,7 @@ RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt) // send next cycle RubySystem *rs = ruby_port->m_ruby_system; - ruby_port->memMasterPort.schedTimingReq(pkt, + ruby_port->memRequestPort.schedTimingReq(pkt, curTick() + rs->clockPeriod()); return true; } @@ -304,7 +307,7 @@ RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt) } Tick -RubyPort::MemSlavePort::recvAtomic(PacketPtr pkt) +RubyPort::MemResponsePort::recvAtomic(PacketPtr pkt) { RubyPort *ruby_port = static_cast(&owner); // Only atomic_noncaching mode supported! @@ -316,7 +319,7 @@ RubyPort::MemSlavePort::recvAtomic(PacketPtr pkt) // pio port. if (pkt->cmd != MemCmd::MemSyncReq) { if (!isPhysMemAddress(pkt)) { - assert(ruby_port->memMasterPort.isConnected()); + assert(ruby_port->memRequestPort.isConnected()); DPRINTF(RubyPort, "Request address %#x assumed to be a " "pio address\n", pkt->getAddr()); @@ -325,7 +328,7 @@ RubyPort::MemSlavePort::recvAtomic(PacketPtr pkt) pkt->pushSenderState(new SenderState(this)); // send next cycle - Tick req_ticks = ruby_port->memMasterPort.sendAtomic(pkt); + Tick req_ticks = ruby_port->memRequestPort.sendAtomic(pkt); return ruby_port->ticksToCycles(req_ticks); } @@ -349,12 +352,12 @@ RubyPort::MemSlavePort::recvAtomic(PacketPtr pkt) } void -RubyPort::MemSlavePort::addToRetryList() +RubyPort::MemResponsePort::addToRetryList() { RubyPort *ruby_port = static_cast(&owner); // - // Unless the requestor do not want retries (e.g., the Ruby tester), + // Unless the request port do not want retries (e.g., the Ruby tester), // record the stalled M5 port for later retry when the sequencer // becomes free. // @@ -364,7 +367,7 @@ RubyPort::MemSlavePort::addToRetryList() } void -RubyPort::MemSlavePort::recvFunctional(PacketPtr pkt) +RubyPort::MemResponsePort::recvFunctional(PacketPtr pkt) { DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr()); @@ -375,8 +378,8 @@ RubyPort::MemSlavePort::recvFunctional(PacketPtr pkt) // pio port. if (!isPhysMemAddress(pkt)) { DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr()); - assert(rp->pioMasterPort.isConnected()); - rp->pioMasterPort.sendFunctional(pkt); + assert(rp->pioRequestPort.isConnected()); + rp->pioRequestPort.sendFunctional(pkt); return; } @@ -402,14 +405,14 @@ RubyPort::MemSlavePort::recvFunctional(PacketPtr pkt) panic("Unsupported functional command %s\n", pkt->cmdString()); } - // Unless the requester explicitly said otherwise, generate an error if - // the functional request failed + // Unless the request port explicitly said otherwise, generate an error + // if the functional request failed if (!accessSucceeded && !pkt->suppressFuncError()) { fatal("Ruby functional %s failed for address %#x\n", pkt->isWrite() ? "write" : "read", pkt->getAddr()); } - // turn packet around to go back to requester if response expected + // turn packet around to go back to request port if response expected if (needsResponse) { // The pkt is already turned into a reponse if the directory // forwarded the request to the memory controller (see @@ -439,7 +442,7 @@ RubyPort::ruby_hit_callback(PacketPtr pkt) // First we must retrieve the request port from the sender State RubyPort::SenderState *senderState = safe_cast(pkt->popSenderState()); - MemSlavePort *port = senderState->port; + MemResponsePort *port = senderState->port; assert(port != NULL); delete senderState; @@ -452,8 +455,8 @@ void RubyPort::trySendRetries() { // - // If we had to stall the MemSlavePorts, wake them up because the sequencer - // likely has free resources now. + // If we had to stall the MemResponsePorts, wake them up because the + // sequencer likely has free resources now. // if (!retryList.empty()) { // Record the current list of ports to retry on a temporary list @@ -461,7 +464,7 @@ RubyPort::trySendRetries() // an immediate retry, which may result in the ports being put back on // the list. Therefore we want to clear the retryList before calling // sendRetryReq. - std::vector curRetryList(retryList); + std::vector curRetryList(retryList); retryList.clear(); @@ -509,11 +512,11 @@ RubyPort::drain() } void -RubyPort::MemSlavePort::hitCallback(PacketPtr pkt) +RubyPort::MemResponsePort::hitCallback(PacketPtr pkt) { bool needsResponse = pkt->needsResponse(); - // Unless specified at configuraiton, all responses except failed SC + // Unless specified at configuration, all responses except failed SC // and Flush operations access M5 physical memory. bool accessPhysMem = access_backing_store; @@ -558,7 +561,7 @@ RubyPort::MemSlavePort::hitCallback(PacketPtr pkt) // We must check device memory first in case it overlaps with the // system memory range. if (ruby_port->system->isDeviceMemAddr(pkt)) { - auto dmem = ruby_port->system->getDeviceMemory(pkt->masterId()); + auto dmem = ruby_port->system->getDeviceMemory(pkt->requestorId()); dmem->access(pkt); } else if (ruby_port->system->isMemAddr(pkt->getAddr())) { rs->getPhysMem()->access(pkt); @@ -569,7 +572,7 @@ RubyPort::MemSlavePort::hitCallback(PacketPtr pkt) pkt->makeResponse(); } - // turn packet around to go back to requester if response expected + // turn packet around to go back to request port if response expected if (needsResponse || pkt->isResponse()) { DPRINTF(RubyPort, "Sending packet back over port\n"); // Send a response in the same cycle. There is no need to delay the @@ -584,15 +587,15 @@ RubyPort::MemSlavePort::hitCallback(PacketPtr pkt) } AddrRangeList -RubyPort::PioSlavePort::getAddrRanges() const +RubyPort::PioResponsePort::getAddrRanges() const { - // at the moment the assumption is that the master does not care + // at the moment the assumption is that the request port does not care AddrRangeList ranges; RubyPort *ruby_port = static_cast(&owner); - for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) { + for (size_t i = 0; i < ruby_port->request_ports.size(); ++i) { ranges.splice(ranges.begin(), - ruby_port->master_ports[i]->getAddrRanges()); + ruby_port->request_ports[i]->getAddrRanges()); } for (const auto M5_VAR_USED &r : ranges) DPRINTF(RubyPort, "%s\n", r.to_string()); @@ -600,7 +603,7 @@ RubyPort::PioSlavePort::getAddrRanges() const } bool -RubyPort::MemSlavePort::isPhysMemAddress(PacketPtr pkt) const +RubyPort::MemResponsePort::isPhysMemAddress(PacketPtr pkt) const { RubyPort *ruby_port = static_cast(&owner); return ruby_port->system->isMemAddr(pkt->getAddr()) @@ -613,16 +616,17 @@ RubyPort::ruby_eviction_callback(Addr address) DPRINTF(RubyPort, "Sending invalidations.\n"); // Allocate the invalidate request and packet on the stack, as it is // assumed they will not be modified or deleted by receivers. - // TODO: should this really be using funcMasterId? + // TODO: should this really be using funcRequestorId? auto request = std::make_shared( address, RubySystem::getBlockSizeBytes(), 0, - Request::funcMasterId); + Request::funcRequestorId); // Use a single packet to signal all snooping ports of the invalidation. // This assumes that snooping ports do NOT modify the packet/request Packet pkt(request, MemCmd::InvalidateReq); - for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) { - // check if the connected master port is snooping + for (CpuPortIter p = response_ports.begin(); p != response_ports.end(); + ++p) { + // check if the connected request port is snooping if ((*p)->isSnooping()) { // send as a snoop request (*p)->sendTimingSnoopReq(&pkt); @@ -631,12 +635,12 @@ RubyPort::ruby_eviction_callback(Addr address) } void -RubyPort::PioMasterPort::recvRangeChange() +RubyPort::PioRequestPort::recvRangeChange() { RubyPort &r = static_cast(owner); r.gotAddrRanges--; if (r.gotAddrRanges == 0 && FullSystem) { - r.pioSlavePort.sendRangeChange(); + r.pioResponsePort.sendRangeChange(); } } @@ -644,7 +648,7 @@ int RubyPort::functionalWrite(Packet *func_pkt) { int num_written = 0; - for (auto port : slave_ports) { + for (auto port : response_ports) { if (port->trySatisfyFunctional(func_pkt)) { num_written += 1; } diff --git a/src/mem/ruby/system/RubyPort.hh b/src/mem/ruby/system/RubyPort.hh index 1e21090ad..73c4557c2 100644 --- a/src/mem/ruby/system/RubyPort.hh +++ b/src/mem/ruby/system/RubyPort.hh @@ -58,21 +58,21 @@ class AbstractController; class RubyPort : public ClockedObject { public: - class MemMasterPort : public QueuedMasterPort + class MemRequestPort : public QueuedRequestPort { private: ReqPacketQueue reqQueue; SnoopRespPacketQueue snoopRespQueue; public: - MemMasterPort(const std::string &_name, RubyPort *_port); + MemRequestPort(const std::string &_name, RubyPort *_port); protected: bool recvTimingResp(PacketPtr pkt); void recvRangeChange() {} }; - class MemSlavePort : public QueuedSlavePort + class MemResponsePort : public QueuedResponsePort { private: RespPacketQueue queue; @@ -80,7 +80,7 @@ class RubyPort : public ClockedObject bool no_retry_on_stall; public: - MemSlavePort(const std::string &_name, RubyPort *_port, + MemResponsePort(const std::string &_name, RubyPort *_port, bool _access_backing_store, PortID id, bool _no_retry_on_stall); void hitCallback(PacketPtr pkt); @@ -102,27 +102,27 @@ class RubyPort : public ClockedObject bool isPhysMemAddress(PacketPtr pkt) const; }; - class PioMasterPort : public QueuedMasterPort + class PioRequestPort : public QueuedRequestPort { private: ReqPacketQueue reqQueue; SnoopRespPacketQueue snoopRespQueue; public: - PioMasterPort(const std::string &_name, RubyPort *_port); + PioRequestPort(const std::string &_name, RubyPort *_port); protected: bool recvTimingResp(PacketPtr pkt); void recvRangeChange(); }; - class PioSlavePort : public QueuedSlavePort + class PioResponsePort : public QueuedResponsePort { private: RespPacketQueue queue; public: - PioSlavePort(const std::string &_name, RubyPort *_port); + PioResponsePort(const std::string &_name, RubyPort *_port); protected: bool recvTimingReq(PacketPtr pkt); @@ -130,15 +130,16 @@ class RubyPort : public ClockedObject Tick recvAtomic(PacketPtr pkt); void recvFunctional(PacketPtr pkt) - { panic("recvFunctional should never be called on pio slave port!"); } + { panic("recvFunctional should never be called on pio response " + "port!"); } AddrRangeList getAddrRanges() const; }; struct SenderState : public Packet::SenderState { - MemSlavePort *port; - SenderState(MemSlavePort * _port) : port(_port) + MemResponsePort *port; + SenderState(MemResponsePort * _port) : port(_port) {} }; @@ -178,11 +179,11 @@ class RubyPort : public ClockedObject * Called by the PIO port when receiving a timing response. * * @param pkt Response packet - * @param master_port_id Port id of the PIO port + * @param request_port_id Port id of the PIO port * * @return Whether successfully sent */ - bool recvTimingResp(PacketPtr pkt, PortID master_port_id); + bool recvTimingResp(PacketPtr pkt, PortID request_port_id); RubySystem *m_ruby_system; uint32_t m_version; @@ -191,35 +192,35 @@ class RubyPort : public ClockedObject bool m_usingRubyTester; System* system; - std::vector slave_ports; + std::vector response_ports; private: - bool onRetryList(MemSlavePort * port) + bool onRetryList(MemResponsePort * port) { return (std::find(retryList.begin(), retryList.end(), port) != retryList.end()); } - void addToRetryList(MemSlavePort * port) + void addToRetryList(MemResponsePort * port) { if (onRetryList(port)) return; retryList.push_back(port); } - PioMasterPort pioMasterPort; - PioSlavePort pioSlavePort; - MemMasterPort memMasterPort; - MemSlavePort memSlavePort; + PioRequestPort pioRequestPort; + PioResponsePort pioResponsePort; + MemRequestPort memRequestPort; + MemResponsePort memResponsePort; unsigned int gotAddrRanges; /** Vector of M5 Ports attached to this Ruby port. */ - typedef std::vector::iterator CpuPortIter; - std::vector master_ports; + typedef std::vector::iterator CpuPortIter; + std::vector request_ports; // // Based on similar code in the M5 bus. Stores pointers to those ports // that should be called when the Sequencer becomes available after a stall. // - std::vector retryList; + std::vector retryList; bool m_isCPUSequencer; }; diff --git a/src/mem/ruby/system/RubySystem.cc b/src/mem/ruby/system/RubySystem.cc index c35ab023f..ac5515f53 100644 --- a/src/mem/ruby/system/RubySystem.cc +++ b/src/mem/ruby/system/RubySystem.cc @@ -123,23 +123,23 @@ RubySystem::registerMachineID(const MachineID& mach_id, Network* network) machineToNetwork.insert(std::make_pair(mach_id, network_id)); } -// This registers all master IDs in the system for functional reads. This -// should be called in init() since master IDs are obtained in a SimObject's +// This registers all requestor IDs in the system for functional reads. This +// should be called in init() since requestor IDs are obtained in a SimObject's // constructor and there are functional reads/writes between init() and // startup(). void -RubySystem::registerMasterIDs() +RubySystem::registerRequestorIDs() { - // Create the map for MasterID to network node. This is done in init() - // because all MasterIDs must be obtained in the constructor and + // Create the map for RequestorID to network node. This is done in init() + // because all RequestorIDs must be obtained in the constructor and // AbstractControllers are registered in their constructor. This is done // in two steps: (1) Add all of the AbstractControllers. Since we don't - // have a mapping of MasterID to MachineID this is the easiest way to - // filter out AbstractControllers from non-Ruby masters. (2) Go through - // the system's list of MasterIDs and add missing MasterIDs to network 0 - // (the default). + // have a mapping of RequestorID to MachineID this is the easiest way to + // filter out AbstractControllers from non-Ruby requestors. (2) Go through + // the system's list of RequestorIDs and add missing RequestorIDs to + // network 0 (the default). for (auto& cntrl : m_abs_cntrl_vec) { - MasterID mid = cntrl->getMasterId(); + RequestorID id = cntrl->getRequestorId(); MachineID mach_id = cntrl->getMachineID(); // These are setup in Network constructor and should exist @@ -148,16 +148,16 @@ RubySystem::registerMasterIDs() MachineIDToString(mach_id).c_str()); auto network_id = machineToNetwork[mach_id]; - masterToNetwork.insert(std::make_pair(mid, network_id)); + requestorToNetwork.insert(std::make_pair(id, network_id)); // Create helper vectors for each network to iterate over. netCntrls[network_id].push_back(cntrl); } - // Default all other master IDs to network 0 - for (auto mid = 0; mid < params()->system->maxMasters(); ++mid) { - if (!masterToNetwork.count(mid)) { - masterToNetwork.insert(std::make_pair(mid, 0)); + // Default all other requestor IDs to network 0 + for (auto id = 0; id < params()->system->maxRequestors(); ++id) { + if (!requestorToNetwork.count(id)) { + requestorToNetwork.insert(std::make_pair(id, 0)); } } } @@ -400,7 +400,7 @@ RubySystem::unserialize(CheckpointIn &cp) void RubySystem::init() { - registerMasterIDs(); + registerRequestorIDs(); } void @@ -491,9 +491,9 @@ RubySystem::functionalRead(PacketPtr pkt) unsigned int num_invalid = 0; // Only send functional requests within the same network. - assert(masterToNetwork.count(pkt->masterId())); - int master_net_id = masterToNetwork[pkt->masterId()]; - assert(netCntrls.count(master_net_id)); + assert(requestorToNetwork.count(pkt->requestorId())); + int request_net_id = requestorToNetwork[pkt->requestorId()]; + assert(netCntrls.count(request_net_id)); AbstractController *ctrl_ro = nullptr; AbstractController *ctrl_rw = nullptr; @@ -501,7 +501,7 @@ RubySystem::functionalRead(PacketPtr pkt) // In this loop we count the number of controllers that have the given // address in read only, read write and busy states. - for (auto& cntrl : netCntrls[master_net_id]) { + for (auto& cntrl : netCntrls[request_net_id]) { access_perm = cntrl-> getAccessPermission(line_address); if (access_perm == AccessPermission_Read_Only){ num_ro++; @@ -537,7 +537,7 @@ RubySystem::functionalRead(PacketPtr pkt) // The reason is because the Backing_Store memory could easily be stale, if // there are copies floating around the cache hierarchy, so you want to read // it only if it's not in the cache hierarchy at all. - int num_controllers = netCntrls[master_net_id].size(); + int num_controllers = netCntrls[request_net_id].size(); if (num_invalid == (num_controllers - 1) && num_backing_store == 1) { DPRINTF(RubySystem, "only copy in Backing_Store memory, read from it\n"); ctrl_backing_store->functionalRead(line_address, pkt); @@ -573,7 +573,7 @@ RubySystem::functionalRead(PacketPtr pkt) DPRINTF(RubySystem, "Controllers functionalRead lookup " "(num_maybe_stale=%d, num_busy = %d)\n", num_maybe_stale, num_busy); - for (auto& cntrl : netCntrls[master_net_id]) { + for (auto& cntrl : netCntrls[request_net_id]) { if (cntrl->functionalReadBuffers(pkt)) return true; } @@ -605,11 +605,11 @@ RubySystem::functionalWrite(PacketPtr pkt) uint32_t M5_VAR_USED num_functional_writes = 0; // Only send functional requests within the same network. - assert(masterToNetwork.count(pkt->masterId())); - int master_net_id = masterToNetwork[pkt->masterId()]; - assert(netCntrls.count(master_net_id)); + assert(requestorToNetwork.count(pkt->requestorId())); + int request_net_id = requestorToNetwork[pkt->requestorId()]; + assert(netCntrls.count(request_net_id)); - for (auto& cntrl : netCntrls[master_net_id]) { + for (auto& cntrl : netCntrls[request_net_id]) { num_functional_writes += cntrl->functionalWriteBuffers(pkt); access_perm = cntrl->getAccessPermission(line_addr); diff --git a/src/mem/ruby/system/RubySystem.hh b/src/mem/ruby/system/RubySystem.hh index d14b3833c..cdd2b5cfb 100644 --- a/src/mem/ruby/system/RubySystem.hh +++ b/src/mem/ruby/system/RubySystem.hh @@ -97,7 +97,7 @@ class RubySystem : public ClockedObject void registerNetwork(Network*); void registerAbstractController(AbstractController*); void registerMachineID(const MachineID& mach_id, Network* network); - void registerMasterIDs(); + void registerRequestorIDs(); bool eventQueueEmpty() { return eventq->empty(); } void enqueueRubyEvent(Tick tick) @@ -142,7 +142,7 @@ class RubySystem : public ClockedObject Cycles m_start_cycle; std::unordered_map machineToNetwork; - std::unordered_map masterToNetwork; + std::unordered_map requestorToNetwork; std::unordered_map> netCntrls; public: diff --git a/src/mem/ruby/system/Sequencer.py b/src/mem/ruby/system/Sequencer.py index 47c5b41ce..6869fc28c 100644 --- a/src/mem/ruby/system/Sequencer.py +++ b/src/mem/ruby/system/Sequencer.py @@ -35,12 +35,21 @@ class RubyPort(ClockedObject): cxx_header = "mem/ruby/system/RubyPort.hh" version = Param.Int(0, "") - slave = VectorSlavePort("CPU slave port") - master = VectorMasterPort("CPU master port") - pio_master_port = RequestPort("Ruby mem master port") - mem_master_port = RequestPort("Ruby mem master port") - pio_slave_port = ResponsePort("Ruby pio slave port") - mem_slave_port = ResponsePort("Ruby memory port") + response_ports = VectorResponsePort("CPU response port") + slave = DeprecatedParam(response_ports, + '`slave` is now called `response_ports`') + request_ports = VectorRequestPort("CPU request port") + master = DeprecatedParam(request_ports, + '`master` is now called `request_ports`') + pio_request_port = RequestPort("Ruby pio request port") + pio_master_port = DeprecatedParam(pio_request_port, + '`pio_master_port` is now called `pio_request_port`') + mem_request_port = RequestPort("Ruby mem request port") + mem_master_port = DeprecatedParam(mem_request_port, + '`mem_master_port` is now called `mem_request_port`') + pio_response_port = ResponsePort("Ruby pio response port") + pio_slave_port = DeprecatedParam(pio_response_port, + '`pio_slave_port` is now called `pio_response_port`') using_ruby_tester = Param.Bool(False, "") no_retry_on_stall = Param.Bool(False, "") diff --git a/src/mem/ruby/system/VIPERCoalescer.cc b/src/mem/ruby/system/VIPERCoalescer.cc index eafce6da7..a8a3aa952 100644 --- a/src/mem/ruby/system/VIPERCoalescer.cc +++ b/src/mem/ruby/system/VIPERCoalescer.cc @@ -248,7 +248,7 @@ VIPERCoalescer::writeCompleteCallback(Addr addr, uint64_t instSeqNum) RubyPort::SenderState *ss = safe_cast (writeCompletePkt->senderState); - MemSlavePort *port = ss->port; + MemResponsePort *port = ss->port; assert(port != NULL); writeCompletePkt->senderState = ss->predecessor; diff --git a/src/mem/serial_link.cc b/src/mem/serial_link.cc index d05328e12..74ac43f54 100644 --- a/src/mem/serial_link.cc +++ b/src/mem/serial_link.cc @@ -51,14 +51,15 @@ #include "debug/SerialLink.hh" #include "params/SerialLink.hh" -SerialLink::SerialLinkSlavePort::SerialLinkSlavePort(const std::string& _name, +SerialLink::SerialLinkResponsePort:: +SerialLinkResponsePort(const std::string& _name, SerialLink& _serial_link, - SerialLinkMasterPort& _masterPort, + SerialLinkRequestPort& _mem_side_port, Cycles _delay, int _resp_limit, const std::vector& _ranges) : ResponsePort(_name, &_serial_link), serial_link(_serial_link), - masterPort(_masterPort), delay(_delay), + mem_side_port(_mem_side_port), delay(_delay), ranges(_ranges.begin(), _ranges.end()), outstandingResponses(0), retryReq(false), respQueueLimit(_resp_limit), @@ -66,21 +67,22 @@ SerialLink::SerialLinkSlavePort::SerialLinkSlavePort(const std::string& _name, { } -SerialLink::SerialLinkMasterPort::SerialLinkMasterPort(const std::string& +SerialLink::SerialLinkRequestPort::SerialLinkRequestPort(const std::string& _name, SerialLink& _serial_link, - SerialLinkSlavePort& _slavePort, - Cycles _delay, int _req_limit) + SerialLinkResponsePort& + _cpu_side_port, Cycles _delay, + int _req_limit) : RequestPort(_name, &_serial_link), serial_link(_serial_link), - slavePort(_slavePort), delay(_delay), reqQueueLimit(_req_limit), + cpu_side_port(_cpu_side_port), delay(_delay), reqQueueLimit(_req_limit), sendEvent([this]{ trySendTiming(); }, _name) { } SerialLink::SerialLink(SerialLinkParams *p) : ClockedObject(p), - slavePort(p->name + ".slave", *this, masterPort, + cpu_side_port(p->name + ".cpu_side_port", *this, mem_side_port, ticksToCycles(p->delay), p->resp_size, p->ranges), - masterPort(p->name + ".master", *this, slavePort, + mem_side_port(p->name + ".mem_side_port", *this, cpu_side_port, ticksToCycles(p->delay), p->req_size), num_lanes(p->num_lanes), link_speed(p->link_speed) @@ -91,10 +93,10 @@ SerialLink::SerialLink(SerialLinkParams *p) Port& SerialLink::getPort(const std::string &if_name, PortID idx) { - if (if_name == "master") - return masterPort; - else if (if_name == "slave") - return slavePort; + if (if_name == "mem_side_port") + return mem_side_port; + else if (if_name == "cpu_side_port") + return cpu_side_port; else // pass it along to our super class return ClockedObject::getPort(if_name, idx); @@ -104,29 +106,29 @@ void SerialLink::init() { // make sure both sides are connected and have the same block size - if (!slavePort.isConnected() || !masterPort.isConnected()) + if (!cpu_side_port.isConnected() || !mem_side_port.isConnected()) fatal("Both ports of a serial_link must be connected.\n"); - // notify the master side of our address ranges - slavePort.sendRangeChange(); + // notify the request side of our address ranges + cpu_side_port.sendRangeChange(); } bool -SerialLink::SerialLinkSlavePort::respQueueFull() const +SerialLink::SerialLinkResponsePort::respQueueFull() const { return outstandingResponses == respQueueLimit; } bool -SerialLink::SerialLinkMasterPort::reqQueueFull() const +SerialLink::SerialLinkRequestPort::reqQueueFull() const { return transmitList.size() == reqQueueLimit; } bool -SerialLink::SerialLinkMasterPort::recvTimingResp(PacketPtr pkt) +SerialLink::SerialLinkRequestPort::recvTimingResp(PacketPtr pkt) { - // all checks are done when the request is accepted on the slave + // all checks are done when the request is accepted on the response // side, so we are guaranteed to have space for the response DPRINTF(SerialLink, "recvTimingResp: %s addr 0x%x\n", pkt->cmdString(), pkt->getAddr()); @@ -151,13 +153,13 @@ SerialLink::SerialLinkMasterPort::recvTimingResp(PacketPtr pkt) // one crosses this link faster than the first one (because the packet // waits in the link based on its size). This can reorder the received // response. - slavePort.schedTimingResp(pkt, t); + cpu_side_port.schedTimingResp(pkt, t); return true; } bool -SerialLink::SerialLinkSlavePort::recvTimingReq(PacketPtr pkt) +SerialLink::SerialLinkResponsePort::recvTimingReq(PacketPtr pkt) { DPRINTF(SerialLink, "recvTimingReq: %s addr 0x%x\n", pkt->cmdString(), pkt->getAddr()); @@ -169,7 +171,7 @@ SerialLink::SerialLinkSlavePort::recvTimingReq(PacketPtr pkt) transmitList.size(), outstandingResponses); // if the request queue is full then there is no hope - if (masterPort.reqQueueFull()) { + if (mem_side_port.reqQueueFull()) { DPRINTF(SerialLink, "Request queue full\n"); retryReq = true; } else if ( !retryReq ) { @@ -212,19 +214,19 @@ SerialLink::SerialLinkSlavePort::recvTimingReq(PacketPtr pkt) // that the second one crosses this link faster than the first one // (because the packet waits in the link based on its size). // This can reorder the received response. - masterPort.schedTimingReq(pkt, t); + mem_side_port.schedTimingReq(pkt, t); } } // remember that we are now stalling a packet and that we have to - // tell the sending master to retry once space becomes available, + // tell the sending requestor to retry once space becomes available, // we make no distinction whether the stalling is due to the // request queue or response queue being full return !retryReq; } void -SerialLink::SerialLinkSlavePort::retryStalledReq() +SerialLink::SerialLinkResponsePort::retryStalledReq() { if (retryReq) { DPRINTF(SerialLink, "Request waiting for retry, now retrying\n"); @@ -234,7 +236,7 @@ SerialLink::SerialLinkSlavePort::retryStalledReq() } void -SerialLink::SerialLinkMasterPort::schedTimingReq(PacketPtr pkt, Tick when) +SerialLink::SerialLinkRequestPort::schedTimingReq(PacketPtr pkt, Tick when) { // If we're about to put this packet at the head of the queue, we // need to schedule an event to do the transmit. Otherwise there @@ -251,7 +253,7 @@ SerialLink::SerialLinkMasterPort::schedTimingReq(PacketPtr pkt, Tick when) void -SerialLink::SerialLinkSlavePort::schedTimingResp(PacketPtr pkt, Tick when) +SerialLink::SerialLinkResponsePort::schedTimingResp(PacketPtr pkt, Tick when) { // If we're about to put this packet at the head of the queue, we // need to schedule an event to do the transmit. Otherwise there @@ -265,7 +267,7 @@ SerialLink::SerialLinkSlavePort::schedTimingResp(PacketPtr pkt, Tick when) } void -SerialLink::SerialLinkMasterPort::trySendTiming() +SerialLink::SerialLinkRequestPort::trySendTiming() { assert(!transmitList.empty()); @@ -300,7 +302,7 @@ SerialLink::SerialLinkMasterPort::trySendTiming() // then send a retry at this point, also note that if the // request we stalled was waiting for the response queue // rather than the request queue we might stall it again - slavePort.retryStalledReq(); + cpu_side_port.retryStalledReq(); } // if the send failed, then we try again once we receive a retry, @@ -308,7 +310,7 @@ SerialLink::SerialLinkMasterPort::trySendTiming() } void -SerialLink::SerialLinkSlavePort::trySendTiming() +SerialLink::SerialLinkResponsePort::trySendTiming() { assert(!transmitList.empty()); @@ -344,7 +346,7 @@ SerialLink::SerialLinkSlavePort::trySendTiming() // if there is space in the request queue and we were stalling // a request, it will definitely be possible to accept it now // since there is guaranteed space in the response queue - if (!masterPort.reqQueueFull() && retryReq) { + if (!mem_side_port.reqQueueFull() && retryReq) { DPRINTF(SerialLink, "Request waiting for retry, now retrying\n"); retryReq = false; sendRetryReq(); @@ -356,25 +358,25 @@ SerialLink::SerialLinkSlavePort::trySendTiming() } void -SerialLink::SerialLinkMasterPort::recvReqRetry() +SerialLink::SerialLinkRequestPort::recvReqRetry() { trySendTiming(); } void -SerialLink::SerialLinkSlavePort::recvRespRetry() +SerialLink::SerialLinkResponsePort::recvRespRetry() { trySendTiming(); } Tick -SerialLink::SerialLinkSlavePort::recvAtomic(PacketPtr pkt) +SerialLink::SerialLinkResponsePort::recvAtomic(PacketPtr pkt) { - return delay * serial_link.clockPeriod() + masterPort.sendAtomic(pkt); + return delay * serial_link.clockPeriod() + mem_side_port.sendAtomic(pkt); } void -SerialLink::SerialLinkSlavePort::recvFunctional(PacketPtr pkt) +SerialLink::SerialLinkResponsePort::recvFunctional(PacketPtr pkt) { pkt->pushLabel(name()); @@ -386,19 +388,19 @@ SerialLink::SerialLinkSlavePort::recvFunctional(PacketPtr pkt) } } - // also check the master port's request queue - if (masterPort.trySatisfyFunctional(pkt)) { + // also check the memory-side port's request queue + if (mem_side_port.trySatisfyFunctional(pkt)) { return; } pkt->popLabel(); // fall through if pkt still not satisfied - masterPort.sendFunctional(pkt); + mem_side_port.sendFunctional(pkt); } bool -SerialLink::SerialLinkMasterPort::trySatisfyFunctional(PacketPtr pkt) +SerialLink::SerialLinkRequestPort::trySatisfyFunctional(PacketPtr pkt) { bool found = false; auto i = transmitList.begin(); @@ -415,7 +417,7 @@ SerialLink::SerialLinkMasterPort::trySatisfyFunctional(PacketPtr pkt) } AddrRangeList -SerialLink::SerialLinkSlavePort::getAddrRanges() const +SerialLink::SerialLinkResponsePort::getAddrRanges() const { return ranges; } diff --git a/src/mem/serial_link.hh b/src/mem/serial_link.hh index 7f047ef1f..903387e91 100644 --- a/src/mem/serial_link.hh +++ b/src/mem/serial_link.hh @@ -82,16 +82,16 @@ class SerialLink : public ClockedObject { } }; - // Forward declaration to allow the slave port to have a pointer - class SerialLinkMasterPort; + // Forward declaration to allow the CPU-side port to have a pointer + class SerialLinkRequestPort; /** * The port on the side that receives requests and sends - * responses. The slave port has a set of address ranges that it - * is responsible for. The slave port also has a buffer for the + * responses. The CPU-side port has a set of address ranges that it + * is responsible for. The CPU-side port also has a buffer for the * responses not yet sent. */ - class SerialLinkSlavePort : public ResponsePort + class SerialLinkResponsePort : public ResponsePort { private: @@ -100,9 +100,9 @@ class SerialLink : public ClockedObject SerialLink& serial_link; /** - * Master port on the other side of the serial_link. + * Request port on the other side of the serial_link. */ - SerialLinkMasterPort& masterPort; + SerialLinkRequestPort& mem_side_port; /** Minimum request delay though this serial_link. */ const Cycles delay; @@ -147,18 +147,18 @@ class SerialLink : public ClockedObject public: /** - * Constructor for the SerialLinkSlavePort. + * Constructor for the SerialLinkResponsePort. * * @param _name the port name including the owner * @param _serial_link the structural owner - * @param _masterPort the master port on the other side of the + * @param _mem_side_port the memory-side port on the other side of the * serial_link * @param _delay the delay in cycles from receiving to sending * @param _resp_limit the size of the response queue * @param _ranges a number of address ranges to forward */ - SerialLinkSlavePort(const std::string& _name, SerialLink& - _serial_link, SerialLinkMasterPort& _masterPort, + SerialLinkResponsePort(const std::string& _name, SerialLink& + _serial_link, SerialLinkRequestPort& _mem_side_port, Cycles _delay, int _resp_limit, const std::vector& _ranges); @@ -204,10 +204,10 @@ class SerialLink : public ClockedObject /** * Port on the side that forwards requests and receives - * responses. The master port has a buffer for the requests not + * responses. The memory-side port has a buffer for the requests not * yet sent. */ - class SerialLinkMasterPort : public RequestPort + class SerialLinkRequestPort : public RequestPort { private: @@ -216,9 +216,10 @@ class SerialLink : public ClockedObject SerialLink& serial_link; /** - * The slave port on the other side of the serial_link. + * The response (CPU-side port) port on the other side of + * the serial_link. */ - SerialLinkSlavePort& slavePort; + SerialLinkResponsePort& cpu_side_port; /** Minimum delay though this serial_link. */ const Cycles delay; @@ -247,18 +248,18 @@ class SerialLink : public ClockedObject public: /** - * Constructor for the SerialLinkMasterPort. + * Constructor for the SerialLinkRequestPort. * * @param _name the port name including the owner * @param _serial_link the structural owner - * @param _slavePort the slave port on the other side of the - * serial_link + * @param _cpu_side_port the CPU-side port on the other + * side of the serial_link * @param _delay the delay in cycles from receiving to sending * @param _req_limit the size of the request queue */ - SerialLinkMasterPort(const std::string& _name, SerialLink& - _serial_link, SerialLinkSlavePort& _slavePort, Cycles - _delay, int _req_limit); + SerialLinkRequestPort(const std::string& _name, SerialLink& + _serial_link, SerialLinkResponsePort& _cpu_side_port, + Cycles _delay, int _req_limit); /** * Is this side blocked from accepting new request packets. @@ -297,11 +298,11 @@ class SerialLink : public ClockedObject void recvReqRetry(); }; - /** Slave port of the serial_link. */ - SerialLinkSlavePort slavePort; + /** Response port of the serial_link. */ + SerialLinkResponsePort cpu_side_port; - /** Master port of the serial_link. */ - SerialLinkMasterPort masterPort; + /** Request port of the serial_link. */ + SerialLinkRequestPort mem_side_port; /** Number of parallel lanes in this serial link */ unsigned num_lanes; diff --git a/src/mem/simple_mem.cc b/src/mem/simple_mem.cc index 4161e87f0..c593a276a 100644 --- a/src/mem/simple_mem.cc +++ b/src/mem/simple_mem.cc @@ -154,7 +154,7 @@ SimpleMemory::recvTimingReq(PacketPtr pkt) // queue if there is one bool needsResponse = pkt->needsResponse(); recvAtomic(pkt); - // turn packet around to go back to requester if response expected + // turn packet around to go back to requestor if response expected if (needsResponse) { // recvAtomic() should already have turned packet into // atomic response @@ -260,7 +260,7 @@ SimpleMemory::drain() SimpleMemory::MemoryPort::MemoryPort(const std::string& _name, SimpleMemory& _memory) - : SlavePort(_name, &_memory), memory(_memory) + : ResponsePort(_name, &_memory), memory(_memory) { } AddrRangeList diff --git a/src/mem/simple_mem.hh b/src/mem/simple_mem.hh index 0bcb15ccb..e80c88fd4 100644 --- a/src/mem/simple_mem.hh +++ b/src/mem/simple_mem.hh @@ -79,7 +79,7 @@ class SimpleMemory : public AbstractMemory { } }; - class MemoryPort : public SlavePort + class MemoryPort : public ResponsePort { private: SimpleMemory& memory; diff --git a/src/mem/snoop_filter.cc b/src/mem/snoop_filter.cc index d9ac52111..d1a62dc2b 100644 --- a/src/mem/snoop_filter.cc +++ b/src/mem/snoop_filter.cc @@ -61,19 +61,20 @@ SnoopFilter::eraseIfNullEntry(SnoopFilterCache::iterator& sf_it) } std::pair -SnoopFilter::lookupRequest(const Packet* cpkt, const ResponsePort& slave_port) +SnoopFilter::lookupRequest(const Packet* cpkt, const ResponsePort& + cpu_side_port) { DPRINTF(SnoopFilter, "%s: src %s packet %s\n", __func__, - slave_port.name(), cpkt->print()); + cpu_side_port.name(), cpkt->print()); // check if the packet came from a cache - bool allocate = !cpkt->req->isUncacheable() && slave_port.isSnooping() && - cpkt->fromCache(); + bool allocate = !cpkt->req->isUncacheable() && cpu_side_port.isSnooping() + && cpkt->fromCache(); Addr line_addr = cpkt->getBlockAddr(linesize); if (cpkt->isSecure()) { line_addr |= LineSecure; } - SnoopMask req_port = portToMask(slave_port); + SnoopMask req_port = portToMask(cpu_side_port); reqLookupResult.it = cachedLocations.find(line_addr); bool is_hit = (reqLookupResult.it != cachedLocations.end()); @@ -137,7 +138,7 @@ SnoopFilter::lookupRequest(const Packet* cpkt, const ResponsePort& slave_port) } else { // if (!cpkt->needsResponse()) assert(cpkt->isEviction()); // make sure that the sender actually had the line - panic_if((sf_item.holder & req_port).none(), "requester %x is not a " \ + panic_if((sf_item.holder & req_port).none(), "requestor %x is not a " \ "holder :( SF value %x.%x\n", req_port, sf_item.requested, sf_item.holder); // CleanEvicts and Writebacks -> the sender and all caches above @@ -333,16 +334,17 @@ SnoopFilter::updateSnoopForward(const Packet* cpkt, } void -SnoopFilter::updateResponse(const Packet* cpkt, const ResponsePort& slave_port) +SnoopFilter::updateResponse(const Packet* cpkt, const ResponsePort& + cpu_side_port) { DPRINTF(SnoopFilter, "%s: src %s packet %s\n", - __func__, slave_port.name(), cpkt->print()); + __func__, cpu_side_port.name(), cpkt->print()); assert(cpkt->isResponse()); // we only allocate if the packet actually came from a cache, but // start by checking if the port is snooping - if (cpkt->req->isUncacheable() || !slave_port.isSnooping()) + if (cpkt->req->isUncacheable() || !cpu_side_port.isSnooping()) return; // next check if we actually allocated an entry @@ -354,31 +356,31 @@ SnoopFilter::updateResponse(const Packet* cpkt, const ResponsePort& slave_port) if (sf_it == cachedLocations.end()) return; - SnoopMask slave_mask = portToMask(slave_port); + SnoopMask response_mask = portToMask(cpu_side_port); SnoopItem& sf_item = sf_it->second; DPRINTF(SnoopFilter, "%s: old SF value %x.%x\n", __func__, sf_item.requested, sf_item.holder); // Make sure we have seen the actual request, too - panic_if((sf_item.requested & slave_mask).none(), + panic_if((sf_item.requested & response_mask).none(), "SF value %x.%x missing request bit\n", sf_item.requested, sf_item.holder); - sf_item.requested &= ~slave_mask; + sf_item.requested &= ~response_mask; // Update the residency of the cache line. if (cpkt->req->isCacheMaintenance()) { // A cache clean response does not carry any data so it // shouldn't change the holders, unless it is invalidating. if (cpkt->isInvalidate()) { - sf_item.holder &= ~slave_mask; + sf_item.holder &= ~response_mask; } eraseIfNullEntry(sf_it); } else { // Any other response implies that a cache above will have the // block. - sf_item.holder |= slave_mask; + sf_item.holder |= response_mask; assert((sf_item.holder | sf_item.requested).any()); } DPRINTF(SnoopFilter, "%s: new SF value %x.%x\n", diff --git a/src/mem/snoop_filter.hh b/src/mem/snoop_filter.hh index 62d764ed9..6a3832559 100644 --- a/src/mem/snoop_filter.hh +++ b/src/mem/snoop_filter.hh @@ -89,7 +89,7 @@ class SnoopFilter : public SimObject { // Change for systems with more than 256 ports tracked by this object static const int SNOOP_MASK_SIZE = 256; - typedef std::vector SnoopList; + typedef std::vector SnoopList; SnoopFilter (const SnoopFilterParams *p) : SimObject(p), reqLookupResult(cachedLocations.end()), @@ -99,20 +99,20 @@ class SnoopFilter : public SimObject { } /** - * Init a new snoop filter and tell it about all the slave ports + * Init a new snoop filter and tell it about all the cpu_sideports * of the enclosing bus. * - * @param slave_ports Slave ports that the bus is attached to. + * @param _cpu_side_ports Response ports that the bus is attached to. */ - void setSlavePorts(const SnoopList& slave_ports) { - localSlavePortIds.resize(slave_ports.size(), InvalidPortID); + void setCPUSidePorts(const SnoopList& _cpu_side_ports) { + localResponsePortIds.resize(_cpu_side_ports.size(), InvalidPortID); PortID id = 0; - for (const auto& p : slave_ports) { + for (const auto& p : _cpu_side_ports) { // no need to track this port if it is not snooping if (p->isSnooping()) { - slavePorts.push_back(p); - localSlavePortIds[p->getId()] = id++; + cpuSidePorts.push_back(p); + localResponsePortIds[p->getId()] = id++; } } @@ -123,19 +123,19 @@ class SnoopFilter : public SimObject { } /** - * Lookup a request (from a slave port) in the snoop filter and - * return a list of other slave ports that need forwarding of the + * Lookup a request (from a CPU-side port) in the snoop filter and + * return a list of other CPU-side ports that need forwarding of the * resulting snoops. Additionally, update the tracking structures * with new request information. Note that the caller must also * call finishRequest once it is known if the request needs to * retry or not. * - * @param cpkt Pointer to the request packet. Not changed. - * @param slave_port Slave port where the request came from. + * @param cpkt Pointer to the request packet. Not changed. + * @param cpu_side_port Response port where the request came from. * @return Pair of a vector of snoop target ports and lookup latency. */ std::pair lookupRequest(const Packet* cpkt, - const ResponsePort& slave_port); + const ResponsePort& cpu_side_port); /** * For an un-successful request, revert the change to the snoop @@ -149,7 +149,7 @@ class SnoopFilter : public SimObject { void finishRequest(bool will_retry, Addr addr, bool is_secure); /** - * Handle an incoming snoop from below (the master port). These + * Handle an incoming snoop from below (the memory-side port). These * can upgrade the tracking logic and may also benefit from * additional steering thanks to the snoop filter. * @@ -189,11 +189,11 @@ class SnoopFilter : public SimObject { * other cache, or memory) and update the tracking information in * the snoop filter. * - * @param cpkt Pointer to const Packet holding the snoop response. - * @param slave_port ResponsePort that made the original request and - * is the target of this response. + * @param cpkt Pointer to const Packet holding the snoop response. + * @param cpu_side_port ResponsePort that made the original request and + * is the target of this response. */ - void updateResponse(const Packet *cpkt, const ResponsePort& slave_port); + void updateResponse(const Packet *cpkt, const ResponsePort& cpu_side_port); virtual void regStats(); @@ -224,12 +224,12 @@ class SnoopFilter : public SimObject { */ std::pair snoopAll(Cycles latency) const { - return std::make_pair(slavePorts, latency); + return std::make_pair(cpuSidePorts, latency); } - std::pair snoopSelected(const SnoopList& slave_ports, - Cycles latency) const + std::pair snoopSelected(const SnoopList& + _cpu_side_ports, Cycles latency) const { - return std::make_pair(slave_ports, latency); + return std::make_pair(_cpu_side_ports, latency); } std::pair snoopDown(Cycles latency) const { @@ -253,7 +253,7 @@ class SnoopFilter : public SimObject { private: /** - * Removes snoop filter items which have no requesters and no holders. + * Removes snoop filter items which have no requestors and no holders. */ void eraseIfNullEntry(SnoopFilterCache::iterator& sf_it); @@ -290,10 +290,10 @@ class SnoopFilter : public SimObject { ReqLookupResult() = delete; } reqLookupResult; - /** List of all attached snooping slave ports. */ - SnoopList slavePorts; + /** List of all attached snooping CPU-side ports. */ + SnoopList cpuSidePorts; /** Track the mapping from port ids to the local mask ids. */ - std::vector localSlavePortIds; + std::vector localResponsePortIds; /** Cache line size. */ const unsigned linesize; /** Latency for doing a lookup in the filter */ @@ -325,14 +325,14 @@ SnoopFilter::portToMask(const ResponsePort& port) const assert(port.getId() != InvalidPortID); // if this is not a snooping port, return a zero mask return !port.isSnooping() ? 0 : - ((SnoopMask)1) << localSlavePortIds[port.getId()]; + ((SnoopMask)1) << localResponsePortIds[port.getId()]; } inline SnoopFilter::SnoopList SnoopFilter::maskToPortList(SnoopMask port_mask) const { SnoopList res; - for (const auto& p : slavePorts) + for (const auto& p : cpuSidePorts) if ((port_mask & portToMask(*p)).any()) res.push_back(p); return res; diff --git a/src/mem/token_port.cc b/src/mem/token_port.cc index 648d041d6..97c59a0b2 100644 --- a/src/mem/token_port.cc +++ b/src/mem/token_port.cc @@ -40,13 +40,13 @@ #include "debug/TokenPort.hh" void -TokenMasterPort::bind(Port &peer) +TokenRequestPort::bind(Port &peer) { RequestPort::bind(peer); } void -TokenMasterPort::recvTokens(int num_tokens) +TokenRequestPort::recvTokens(int num_tokens) { panic_if(!tokenManager, "TokenManager not set for %s.\n", name()); @@ -54,7 +54,7 @@ TokenMasterPort::recvTokens(int num_tokens) } bool -TokenMasterPort::haveTokens(int num_tokens) +TokenRequestPort::haveTokens(int num_tokens) { panic_if(!tokenManager, "TokenManager not set for %s.\n", name()); @@ -62,7 +62,7 @@ TokenMasterPort::haveTokens(int num_tokens) } void -TokenMasterPort::acquireTokens(int num_tokens) +TokenRequestPort::acquireTokens(int num_tokens) { panic_if(!tokenManager, "TokenManager not set for %s.\n", name()); @@ -70,53 +70,53 @@ TokenMasterPort::acquireTokens(int num_tokens) } void -TokenMasterPort::setTokenManager(TokenManager *_tokenManager) +TokenRequestPort::setTokenManager(TokenManager *_tokenManager) { tokenManager = _tokenManager; } void -TokenSlavePort::sendTokens(int num_tokens) +TokenResponsePort::sendTokens(int num_tokens) { - fatal_if(!tokenMasterPort, "Tried sendTokens to non-token master!\n"); + fatal_if(!tokenRequestPort, "Tried sendTokens to non-token requestor!\n"); - // Send tokens to a master - tokenMasterPort->recvTokens(num_tokens); + // Send tokens to a requestor + tokenRequestPort->recvTokens(num_tokens); } void -TokenSlavePort::bind(Port& peer) +TokenResponsePort::bind(Port& peer) { - // TokenSlavePort is allowed to bind to either TokenMasterPort or a - // RequestPort as fallback. If the type is a RequestPort, tokenMasterPort + // TokenResponsePort is allowed to bind to either TokenRequestPort or a + // RequestPort as fallback. If the type is a RequestPort, tokenRequestPort // is set to nullptr to indicate tokens should not be exchanged. - auto *token_master_port = dynamic_cast(&peer); - auto *master_port = dynamic_cast(&peer); - if (!token_master_port && !master_port) { - fatal("Attempt to bind port %s to unsupported slave port %s.", + auto *token_request_port = dynamic_cast(&peer); + auto *request_port = dynamic_cast(&peer); + if (!token_request_port && !request_port) { + fatal("Attempt to bind port %s to unsupported response port %s.", name(), peer.name()); - } else if (token_master_port) { - // slave port keeps track of the master port - tokenMasterPort = token_master_port; - - // master port also keeps track of slave port - tokenMasterPort->bind(*this); - } else if (master_port) { - tokenMasterPort = nullptr; + } else if (token_request_port) { + // response port keeps track of the request port + tokenRequestPort = token_request_port; + + // request port also keeps track of response port + tokenRequestPort->bind(*this); + } else if (request_port) { + tokenRequestPort = nullptr; } } void -TokenSlavePort::unbind() +TokenResponsePort::unbind() { ResponsePort::responderUnbind(); - tokenMasterPort = nullptr; + tokenRequestPort = nullptr; } void -TokenSlavePort::recvRespRetry() +TokenResponsePort::recvRespRetry() { - // fallback to QueuedSlavePort-like impl for now + // fallback to QueuedResponsePort-like impl for now panic_if(respQueue.empty(), "Attempted to retry a response when no retry was queued!\n"); @@ -129,7 +129,7 @@ TokenSlavePort::recvRespRetry() } bool -TokenSlavePort::sendTimingResp(PacketPtr pkt) +TokenResponsePort::sendTimingResp(PacketPtr pkt) { bool success = ResponsePort::sendTimingResp(pkt); diff --git a/src/mem/token_port.hh b/src/mem/token_port.hh index 358ee03bd..617b9f939 100644 --- a/src/mem/token_port.hh +++ b/src/mem/token_port.hh @@ -38,33 +38,33 @@ #include "sim/clocked_object.hh" class TokenManager; -class TokenSlavePort; +class TokenResponsePort; -class TokenMasterPort : public RequestPort +class TokenRequestPort : public RequestPort { private: /* Manager to track tokens between this token port pair. */ TokenManager *tokenManager; public: - TokenMasterPort(const std::string& name, SimObject* owner, + TokenRequestPort(const std::string& name, SimObject* owner, PortID id = InvalidPortID) : RequestPort(name, owner, id), tokenManager(nullptr) { } /** - * Bind this master port to slave port. Called by the slave port in + * Bind this request port to response port. Called by the response port in * this token implementation. */ void bind(Port &peer) override; /** - * Unbind port. Handled by slave port in token implementation. + * Unbind port. Handled by response port in token implementation. */ void unbind() override {} /** - * Receive tokens returned by the slave port. This increments the number + * Receive tokens returned by the response port. This increments the number * or available tokens across the port. */ void recvTokens(int num_tokens); @@ -82,41 +82,41 @@ class TokenMasterPort : public RequestPort /** * Specify a token manger, which will handle tracking of tokens for a - * TokenMasterPort/SlaveMasterPort pair. + * TokenRequestPort/ResponseRequestPort pair. */ void setTokenManager(TokenManager *_tokenManager); }; -class TokenSlavePort : public ResponsePort +class TokenResponsePort : public ResponsePort { private: - TokenMasterPort *tokenMasterPort; + TokenRequestPort *tokenRequestPort; std::deque respQueue; void recvRespRetry() override; public: - TokenSlavePort(const std::string& name, ClockedObject *owner, + TokenResponsePort(const std::string& name, ClockedObject *owner, PortID id = InvalidPortID) : - ResponsePort(name, owner, id), tokenMasterPort(nullptr) + ResponsePort(name, owner, id), tokenRequestPort(nullptr) { } - ~TokenSlavePort() { } + ~TokenResponsePort() { } /** - * Bind this slave port to a master port. This also does the mirror - * action and bainds the master port to the slave port as well as + * Bind this response port to a request port. This also does the mirror + * action and binds the request port to the response port as well as * binding the base class types. */ void bind(Port &peer) override; /** - * Unbind this slave port and associated master port. + * Unbind this response port and associated request port. */ void unbind() override; /** - * Return num_tokens tokens back to the master port. + * Return num_tokens tokens back to the request port. */ void sendTokens(int num_tokens); diff --git a/src/mem/tport.cc b/src/mem/tport.cc index 2bcb0225f..301dfb1cd 100644 --- a/src/mem/tport.cc +++ b/src/mem/tport.cc @@ -43,7 +43,7 @@ SimpleTimingPort::SimpleTimingPort(const std::string& _name, SimObject* _owner) : - QueuedSlavePort(_name, _owner, queueImpl), queueImpl(*_owner, *this) + QueuedResponsePort(_name, _owner, queueImpl), queueImpl(*_owner, *this) { } @@ -68,7 +68,7 @@ SimpleTimingPort::recvTimingReq(PacketPtr pkt) bool needsResponse = pkt->needsResponse(); Tick latency = recvAtomic(pkt); - // turn packet around to go back to requester if response expected + // turn packet around to go back to requestor if response expected if (needsResponse) { // recvAtomic() should already have turned packet into // atomic response diff --git a/src/mem/tport.hh b/src/mem/tport.hh index 6c560d364..fe32872d1 100644 --- a/src/mem/tport.hh +++ b/src/mem/tport.hh @@ -54,9 +54,9 @@ class SimObject; /** * The simple timing port uses a queued port to implement * recvFunctional and recvTimingReq through recvAtomic. It is always a - * slave port. + * response port. */ -class SimpleTimingPort : public QueuedSlavePort +class SimpleTimingPort : public QueuedResponsePort { private: @@ -64,7 +64,7 @@ class SimpleTimingPort : public QueuedSlavePort /** * The packet queue used to store outgoing responses. Note that * the queue is made private and that we avoid overloading the - * name used in the QueuedSlavePort. Access is provided through + * name used in the QueuedResponsePort. Access is provided through * the queue reference in the base class. */ RespPacketQueue queueImpl; diff --git a/src/mem/translating_port_proxy.cc b/src/mem/translating_port_proxy.cc index 8bb93cc73..1e8d83650 100644 --- a/src/mem/translating_port_proxy.cc +++ b/src/mem/translating_port_proxy.cc @@ -83,7 +83,7 @@ TranslatingPortProxy::tryReadBlob(Addr addr, void *p, int size) const gen.next()) { auto req = std::make_shared( - gen.addr(), gen.size(), flags, Request::funcMasterId, 0, + gen.addr(), gen.size(), flags, Request::funcRequestorId, 0, _tc->contextId()); if (!tryTLBs(req, BaseTLB::Read)) @@ -105,7 +105,7 @@ TranslatingPortProxy::tryWriteBlob( gen.next()) { auto req = std::make_shared( - gen.addr(), gen.size(), flags, Request::funcMasterId, 0, + gen.addr(), gen.size(), flags, Request::funcRequestorId, 0, _tc->contextId()); if (!tryTLBs(req, BaseTLB::Write)) @@ -125,7 +125,7 @@ TranslatingPortProxy::tryMemsetBlob(Addr address, uint8_t v, int size) const gen.next()) { auto req = std::make_shared( - gen.addr(), gen.size(), flags, Request::funcMasterId, 0, + gen.addr(), gen.size(), flags, Request::funcRequestorId, 0, _tc->contextId()); if (!tryTLBs(req, BaseTLB::Write)) diff --git a/src/mem/xbar.cc b/src/mem/xbar.cc index 5e2c28c4b..f9544f8a6 100644 --- a/src/mem/xbar.cc +++ b/src/mem/xbar.cc @@ -59,38 +59,39 @@ BaseXBar::BaseXBar(const BaseXBarParams *p) headerLatency(p->header_latency), width(p->width), gotAddrRanges(p->port_default_connection_count + - p->port_master_connection_count, false), + p->port_mem_side_ports_connection_count, false), gotAllAddrRanges(false), defaultPortID(InvalidPortID), useDefaultRange(p->use_default_range), transDist(this, "trans_dist", "Transaction distribution"), pktCount(this, "pkt_count", - "Packet count per connected master and slave (bytes)"), - pktSize(this, "pkt_size", - "Cumulative packet size per connected master and slave (bytes)") + "Packet count per connected requestor and responder (bytes)"), + pktSize(this, "pkt_size", "Cumulative packet size per connected " + "requestor and responder (bytes)") { } BaseXBar::~BaseXBar() { - for (auto m: masterPorts) - delete m; + for (auto port: memSidePorts) + delete port; - for (auto s: slavePorts) - delete s; + for (auto port: cpuSidePorts) + delete port; } Port & BaseXBar::getPort(const std::string &if_name, PortID idx) { - if (if_name == "master" && idx < masterPorts.size()) { - // the master port index translates directly to the vector position - return *masterPorts[idx]; + if (if_name == "mem_side_ports" && idx < memSidePorts.size()) { + // the memory-side ports index translates directly to the vector + // position + return *memSidePorts[idx]; } else if (if_name == "default") { - return *masterPorts[defaultPortID]; - } else if (if_name == "slave" && idx < slavePorts.size()) { - // the slave port index translates directly to the vector position - return *slavePorts[idx]; + return *memSidePorts[defaultPortID]; + } else if (if_name == "cpu_side_ports" && idx < cpuSidePorts.size()) { + // the CPU-side ports index translates directly to the vector position + return *cpuSidePorts[idx]; } else { return ClockedObject::getPort(if_name, idx); } @@ -179,7 +180,7 @@ BaseXBar::Layer::tryTiming(SrcType* src_port) { // if we are in the retry state, we will not see anything but the // retrying port (or in the case of the snoop ports the snoop - // response port that mirrors the actual slave port) as we leave + // response port that mirrors the actual CPU-side port) as we leave // this state again in zero time if the peer does not immediately // call the layer when receiving the retry @@ -326,7 +327,7 @@ PortID BaseXBar::findPort(AddrRange addr_range) { // we should never see any address lookups before we've got the - // ranges of all connected slave modules + // ranges of all connected CPU-side-port modules assert(gotAllAddrRanges); // Check the address map interval tree @@ -356,14 +357,14 @@ BaseXBar::findPort(AddrRange addr_range) /** Function called by the port when the crossbar is receiving a range change.*/ void -BaseXBar::recvRangeChange(PortID master_port_id) +BaseXBar::recvRangeChange(PortID mem_side_port_id) { - DPRINTF(AddrRanges, "Received range change from slave port %s\n", - masterPorts[master_port_id]->getPeer()); + DPRINTF(AddrRanges, "Received range change from cpu_side_ports %s\n", + memSidePorts[mem_side_port_id]->getPeer()); - // remember that we got a range from this master port and thus the - // connected slave module - gotAddrRanges[master_port_id] = true; + // remember that we got a range from this memory-side port and thus the + // connected CPU-side-port module + gotAddrRanges[mem_side_port_id] = true; // update the global flag if (!gotAllAddrRanges) { @@ -375,19 +376,20 @@ BaseXBar::recvRangeChange(PortID master_port_id) gotAllAddrRanges &= *r++; } if (gotAllAddrRanges) - DPRINTF(AddrRanges, "Got address ranges from all slaves\n"); + DPRINTF(AddrRanges, "Got address ranges from all responders\n"); } // note that we could get the range from the default port at any // point in time, and we cannot assume that the default range is // set before the other ones are, so we do additional checks once // all ranges are provided - if (master_port_id == defaultPortID) { + if (mem_side_port_id == defaultPortID) { // only update if we are indeed checking ranges for the // default port since the port might not have a valid range // otherwise if (useDefaultRange) { - AddrRangeList ranges = masterPorts[master_port_id]->getAddrRanges(); + AddrRangeList ranges = memSidePorts[mem_side_port_id]-> + getAddrRanges(); if (ranges.size() != 1) fatal("Crossbar %s may only have a single default range", @@ -398,9 +400,9 @@ BaseXBar::recvRangeChange(PortID master_port_id) } else { // the ports are allowed to update their address ranges // dynamically, so remove any existing entries - if (gotAddrRanges[master_port_id]) { + if (gotAddrRanges[mem_side_port_id]) { for (auto p = portMap.begin(); p != portMap.end(); ) { - if (p->second == master_port_id) + if (p->second == mem_side_port_id) // erasing invalidates the iterator, so advance it // before the deletion takes place portMap.erase(p++); @@ -409,25 +411,26 @@ BaseXBar::recvRangeChange(PortID master_port_id) } } - AddrRangeList ranges = masterPorts[master_port_id]->getAddrRanges(); + AddrRangeList ranges = memSidePorts[mem_side_port_id]-> + getAddrRanges(); for (const auto& r: ranges) { DPRINTF(AddrRanges, "Adding range %s for id %d\n", - r.to_string(), master_port_id); - if (portMap.insert(r, master_port_id) == portMap.end()) { + r.to_string(), mem_side_port_id); + if (portMap.insert(r, mem_side_port_id) == portMap.end()) { PortID conflict_id = portMap.intersects(r)->second; fatal("%s has two ports responding within range " "%s:\n\t%s\n\t%s\n", name(), r.to_string(), - masterPorts[master_port_id]->getPeer(), - masterPorts[conflict_id]->getPeer()); + memSidePorts[mem_side_port_id]->getPeer(), + memSidePorts[conflict_id]->getPeer()); } } } - // if we have received ranges from all our neighbouring slave - // modules, go ahead and tell our connected master modules in + // if we have received ranges from all our neighbouring CPU-side-port + // modules, go ahead and tell our connected memory-side-port modules in // turn, this effectively assumes a tree structure of the system if (gotAllAddrRanges) { DPRINTF(AddrRanges, "Aggregating address ranges\n"); @@ -508,10 +511,10 @@ BaseXBar::recvRangeChange(PortID master_port_id) } } - // tell all our neighbouring master ports that our address + // tell all our neighbouring memory-side ports that our address // ranges have changed - for (const auto& s: slavePorts) - s->sendRangeChange(); + for (const auto& port: cpuSidePorts) + port->sendRangeChange(); } } @@ -524,7 +527,7 @@ BaseXBar::getAddrRanges() const assert(gotAllAddrRanges); // at the moment, this never happens, as there are no cycles in - // the range queries and no devices on the master side of a crossbar + // the range queries and no devices on the memory side of a crossbar // (CPU, cache, bridge etc) actually care about the ranges of the // ports they are connected to @@ -552,25 +555,26 @@ BaseXBar::regStats() } pktCount - .init(slavePorts.size(), masterPorts.size()) + .init(cpuSidePorts.size(), memSidePorts.size()) .flags(total | nozero | nonan); pktSize - .init(slavePorts.size(), masterPorts.size()) + .init(cpuSidePorts.size(), memSidePorts.size()) .flags(total | nozero | nonan); // both the packet count and total size are two-dimensional - // vectors, indexed by slave port id and master port id, thus the - // neighbouring master and slave, they do not differentiate what - // came from the master and was forwarded to the slave (requests - // and snoop responses) and what came from the slave and was - // forwarded to the master (responses and snoop requests) - for (int i = 0; i < slavePorts.size(); i++) { - pktCount.subname(i, slavePorts[i]->getPeer().name()); - pktSize.subname(i, slavePorts[i]->getPeer().name()); - for (int j = 0; j < masterPorts.size(); j++) { - pktCount.ysubname(j, masterPorts[j]->getPeer().name()); - pktSize.ysubname(j, masterPorts[j]->getPeer().name()); + // vectors, indexed by CPU-side port id and memory-side port id, thus the + // neighbouring memory-side ports and CPU-side ports, they do not + // differentiate what came from the memory-side ports and was forwarded to + // the CPU-side ports (requests and snoop responses) and what came from + // the CPU-side ports and was forwarded to the memory-side ports (responses + // and snoop requests) + for (int i = 0; i < cpuSidePorts.size(); i++) { + pktCount.subname(i, cpuSidePorts[i]->getPeer().name()); + pktSize.subname(i, cpuSidePorts[i]->getPeer().name()); + for (int j = 0; j < memSidePorts.size(); j++) { + pktCount.ysubname(j, memSidePorts[j]->getPeer().name()); + pktSize.ysubname(j, memSidePorts[j]->getPeer().name()); } } } diff --git a/src/mem/xbar.hh b/src/mem/xbar.hh index 535277add..cf067423e 100644 --- a/src/mem/xbar.hh +++ b/src/mem/xbar.hh @@ -79,10 +79,10 @@ class BaseXBar : public ClockedObject * PCIe, etc. * * The template parameter, PortClass, indicates the destination - * port type for the layer. The retry list holds either master - * ports or slave ports, depending on the direction of the - * layer. Thus, a request layer has a retry list containing slave - * ports, whereas a response layer holds master ports. + * port type for the layer. The retry list holds either memory-side ports + * or CPU-side ports, depending on the direction of the + * layer. Thus, a request layer has a retry list containing + * CPU-side ports, whereas a response layer holds memory-side ports. */ template class Layer : public Drainable, public Stats::Group @@ -332,9 +332,9 @@ class BaseXBar : public ClockedObject * Function called by the port when the crossbar is recieving a * range change. * - * @param master_port_id id of the port that received the change + * @param mem_side_port_id id of the port that received the change */ - virtual void recvRangeChange(PortID master_port_id); + virtual void recvRangeChange(PortID mem_side_port_id); /** * Find which port connected to this crossbar (if any) should be @@ -364,17 +364,17 @@ class BaseXBar : public ClockedObject void calcPacketTiming(PacketPtr pkt, Tick header_delay); /** - * Remember for each of the master ports of the crossbar if we got - * an address range from the connected slave. For convenience, - * also keep track of if we got ranges from all the slave modules + * Remember for each of the memory-side ports of the crossbar if we got + * an address range from the connected CPU-side ports. For convenience, + * also keep track of if we got ranges from all the CPU-side-port modules * or not. */ std::vector gotAddrRanges; bool gotAllAddrRanges; - /** The master and slave ports of the crossbar */ - std::vector slavePorts; - std::vector masterPorts; + /** The memory-side ports and CPU-side ports of the crossbar */ + std::vector cpuSidePorts; + std::vector memSidePorts; /** Port that handles requests that don't match any of the interfaces.*/ PortID defaultPortID; @@ -392,9 +392,9 @@ class BaseXBar : public ClockedObject * crossbar. The transaction distribution is globally counting * different types of commands. The packet count and total packet * size are two-dimensional vectors that are indexed by the - * slave port and master port id (thus the neighbouring master and - * neighbouring slave), summing up both directions (request and - * response). + * CPU-side port and memory-side port id (thus the neighbouring memory-side + * ports and neighbouring CPU-side ports), summing up both directions + * (request and response). */ Stats::Vector transDist; Stats::Vector2d pktCount; diff --git a/src/python/m5/SimObject.py b/src/python/m5/SimObject.py index 7c4c809bc..9c9a9ed0f 100644 --- a/src/python/m5/SimObject.py +++ b/src/python/m5/SimObject.py @@ -186,11 +186,11 @@ def createCxxConfigDirectoryEntryFile(code, name, simobj, is_header): for port in simobj._ports.values(): is_vector = isinstance(port, m5.params.VectorPort) - is_master = port.role == 'MASTER' + is_requestor = port.role == 'GEM5 REQUESTOR' code('ports["%s"] = new PortDesc("%s", %s, %s);' % (port.name, port.name, cxx_bool(is_vector), - cxx_bool(is_master))) + cxx_bool(is_requestor))) code.dedent() code('}') diff --git a/src/python/m5/params.py b/src/python/m5/params.py index 4e586671f..45082d7ac 100644 --- a/src/python/m5/params.py +++ b/src/python/m5/params.py @@ -2120,13 +2120,13 @@ class Port(object): def cxx_decl(self, code): code('unsigned int port_${{self.name}}_connection_count;') -Port.compat('GEM5 REQUESTER', 'GEM5 RESPONDER') +Port.compat('GEM5 REQUESTOR', 'GEM5 RESPONDER') class RequestPort(Port): # RequestPort("description") def __init__(self, desc): super(RequestPort, self).__init__( - 'GEM5 REQUESTER', desc, is_source=True) + 'GEM5 REQUESTOR', desc, is_source=True) class ResponsePort(Port): # ResponsePort("description") @@ -2143,7 +2143,7 @@ class VectorRequestPort(VectorPort): # VectorRequestPort("description") def __init__(self, desc): super(VectorRequestPort, self).__init__( - 'GEM5 REQUESTER', desc, is_source=True) + 'GEM5 REQUESTOR', desc, is_source=True) class VectorResponsePort(VectorPort): # VectorResponsePort("description") diff --git a/src/python/m5/util/dot_writer.py b/src/python/m5/util/dot_writer.py index c66108d21..8b757e835 100644 --- a/src/python/m5/util/dot_writer.py +++ b/src/python/m5/util/dot_writer.py @@ -42,7 +42,7 @@ # view. The output generated by do_dot() is a DOT-based figure (as a # pdf and an editable svg file) and its source dot code. Nodes are # components, and edges represent the memory hierarchy: the edges are -# directed, from a master to slave. Initially all nodes are +# directed, from a requestor to responder. Initially all nodes are # generated, and then all edges are added. do_dot should be called # with the top-most SimObject (namely root but not necessarily), the # output folder and the output dot source filename. From the given diff --git a/src/sim/cxx_config.hh b/src/sim/cxx_config.hh index 9f8a07d36..9a45e76eb 100644 --- a/src/sim/cxx_config.hh +++ b/src/sim/cxx_config.hh @@ -95,12 +95,12 @@ class CxxConfigDirectoryEntry /* Is this a vector or singleton parameters/SimObject */ const bool isVector; - /** Is this a master or slave port */ - const bool isMaster; + /** Is this a request or response port */ + const bool isRequestor; PortDesc(const std::string &name_, - bool isVector_, bool isMaster_) : - name(name_), isVector(isVector_), isMaster(isMaster_) + bool isVector_, bool isRequestor_) : + name(name_), isVector(isVector_), isRequestor(isRequestor_) { } }; diff --git a/src/sim/cxx_manager.cc b/src/sim/cxx_manager.cc index 71ee10b9f..7df3bcad4 100644 --- a/src/sim/cxx_manager.cc +++ b/src/sim/cxx_manager.cc @@ -444,72 +444,74 @@ CxxConfigManager::bindAllPorts() void CxxConfigManager::bindPort( - SimObject *master_object, const std::string &master_port_name, - PortID master_port_index, - SimObject *slave_object, const std::string &slave_port_name, - PortID slave_port_index) + SimObject *requestor_object, const std::string &request_port_name, + PortID request_port_index, + SimObject *responder_object, const std::string &response_port_name, + PortID response_port_index) { - /* FIXME, check slave_port_index against connection_count + /* FIXME, check response_port_index against connection_count * defined for port, need getPortConnectionCount and a * getCxxConfigDirectoryEntry for each object. */ /* It would be nice to be able to catch the errors from these calls. */ - Port &master_port = master_object->getPort( - master_port_name, master_port_index); - Port &slave_port = slave_object->getPort( - slave_port_name, slave_port_index); - - if (master_port.isConnected()) { - throw Exception(master_object->name(), csprintf( - "Master port: %s[%d] is already connected\n", master_port_name, - master_port_index)); + Port &request_port = requestor_object->getPort( + request_port_name, request_port_index); + Port &response_port = responder_object->getPort( + response_port_name, response_port_index); + + if (request_port.isConnected()) { + throw Exception(requestor_object->name(), csprintf( + "Request port: %s[%d] is already connected\n", request_port_name, + request_port_index)); } - if (slave_port.isConnected()) { - throw Exception(slave_object->name(), csprintf( - "Slave port: %s[%d] is already connected\n", slave_port_name, - slave_port_index)); + if (response_port.isConnected()) { + throw Exception(responder_object->name(), csprintf( + "Response port: %s[%d] is already connected\n", response_port_name, + response_port_index)); } DPRINTF(CxxConfig, "Binding port %s.%s[%d]" " to %s:%s[%d]\n", - master_object->name(), master_port_name, master_port_index, - slave_object->name(), slave_port_name, slave_port_index); + requestor_object->name(), request_port_name, request_port_index, + responder_object->name(), response_port_name, response_port_index); - master_port.bind(slave_port); + request_port.bind(response_port); } void -CxxConfigManager::bindMasterPort(SimObject *object, +CxxConfigManager::bindRequestPort(SimObject *object, const CxxConfigDirectoryEntry::PortDesc &port, const std::vector &peers) { - unsigned int master_port_index = 0; + unsigned int request_port_index = 0; for (auto peer_i = peers.begin(); peer_i != peers.end(); ++peer_i) { const std::string &peer = *peer_i; - std::string slave_object_name; - std::string slave_port_name; - unsigned int slave_port_index; + std::string response_object_name; + std::string response_port_name; + unsigned int response_port_index; - parsePort(peer, slave_object_name, slave_port_name, - slave_port_index); + parsePort(peer, response_object_name, response_port_name, + response_port_index); - std::string slave_instance_name = rename(slave_object_name); + std::string response_instance_name = rename(response_object_name); - if (objectsByName.find(slave_instance_name) == objectsByName.end()) { + if (objectsByName.find(response_instance_name) + == objectsByName.end()) { throw Exception(object->name(), csprintf( - "Can't find slave port object: %s", slave_instance_name)); + "Can't find response port object: %s", + response_instance_name)); } - SimObject *slave_object = objectsByName[slave_instance_name]; + SimObject *responder_object = objectsByName[response_instance_name]; - bindPort(object, port.name, master_port_index, - slave_object, slave_port_name, slave_port_index); + bindPort(object, port.name, request_port_index, + responder_object, response_port_name, response_port_index); - master_port_index++; + request_port_index++; } } @@ -540,14 +542,14 @@ CxxConfigManager::bindObjectPorts(SimObject *object) /* Only handle master ports as binding only needs to happen once * for each observed pair of ports */ - if (port->isMaster) { + if (port->isRequestor) { if (!port->isVector && peers.size() > 1) { throw Exception(instance_name, csprintf( "Too many connections to non-vector port %s (%d)\n", port->name, peers.size())); } - bindMasterPort(object, *port, peers); + bindRequestPort(object, *port, peers); } } } diff --git a/src/sim/cxx_manager.hh b/src/sim/cxx_manager.hh index 30339adba..e2cbadd0b 100644 --- a/src/sim/cxx_manager.hh +++ b/src/sim/cxx_manager.hh @@ -126,14 +126,14 @@ class CxxConfigManager std::list renamings; /** Bind a single connection between two objects' ports */ - void bindPort(SimObject *masterObject, const std::string &masterPort, - PortID masterPortIndex, SimObject *slaveObject, - const std::string &slavePort, PortID slavePortIndex); + void bindPort(SimObject *requestorObject, const std::string &requestPort, + PortID requestPortIndex, SimObject *responderObject, + const std::string &responsePort, PortID responsePortIndex); - /** Bind a single (possibly vectored) master port to peers from the + /** Bind a single (possibly vectored) request port to peers from the * unparsed list peers with elements in the .ini connection format: * path(.path)*.port[index] */ - void bindMasterPort(SimObject *object, + void bindRequestPort(SimObject *object, const CxxConfigDirectoryEntry::PortDesc &port, const std::vector &peers); diff --git a/src/sim/probe/mem.hh b/src/sim/probe/mem.hh index fed7bcf98..444c38e4f 100644 --- a/src/sim/probe/mem.hh +++ b/src/sim/probe/mem.hh @@ -56,7 +56,7 @@ struct PacketInfo { uint32_t size; Request::FlagsType flags; Addr pc; - MasterID master; + RequestorID id; explicit PacketInfo(const PacketPtr& pkt) : cmd(pkt->cmd), @@ -64,7 +64,7 @@ struct PacketInfo { size(pkt->getSize()), flags(pkt->req->getFlags()), pc(pkt->req->hasPC() ? pkt->req->getPC() : 0), - master(pkt->req->masterId()) { } + id(pkt->req->requestorId()) { } }; /** diff --git a/src/sim/system.cc b/src/sim/system.cc index 8185f13e9..cb412a866 100644 --- a/src/sim/system.cc +++ b/src/sim/system.cc @@ -246,14 +246,14 @@ System::System(Params *p) _cacheLineSize == 64 || _cacheLineSize == 128)) warn_once("Cache line size is neither 16, 32, 64 nor 128 bytes.\n"); - // Get the generic system master IDs - MasterID tmp_id M5_VAR_USED; - tmp_id = getMasterId(this, "writebacks"); - assert(tmp_id == Request::wbMasterId); - tmp_id = getMasterId(this, "functional"); - assert(tmp_id == Request::funcMasterId); - tmp_id = getMasterId(this, "interrupt"); - assert(tmp_id == Request::intMasterId); + // Get the generic system requestor IDs + RequestorID tmp_id M5_VAR_USED; + tmp_id = getRequestorId(this, "writebacks"); + assert(tmp_id == Request::wbRequestorId); + tmp_id = getRequestorId(this, "functional"); + assert(tmp_id == Request::funcRequestorId); + tmp_id = getRequestorId(this, "interrupt"); + assert(tmp_id == Request::intRequestorId); // increment the number of running systems numSystemsRunning++; @@ -420,28 +420,28 @@ System::isMemAddr(Addr addr) const } void -System::addDeviceMemory(MasterID masterId, AbstractMemory *deviceMemory) +System::addDeviceMemory(RequestorID requestor_id, AbstractMemory *deviceMemory) { - if (!deviceMemMap.count(masterId)) { - deviceMemMap.insert(std::make_pair(masterId, deviceMemory)); + if (!deviceMemMap.count(requestor_id)) { + deviceMemMap.insert(std::make_pair(requestor_id, deviceMemory)); } } bool System::isDeviceMemAddr(PacketPtr pkt) const { - const MasterID& mid = pkt->masterId(); + const RequestorID& id = pkt->requestorId(); - return (deviceMemMap.count(mid) && - deviceMemMap.at(mid)->getAddrRange().contains(pkt->getAddr())); + return (deviceMemMap.count(id) && + deviceMemMap.at(id)->getAddrRange().contains(pkt->getAddr())); } AbstractMemory * -System::getDeviceMemory(MasterID mid) const +System::getDeviceMemory(RequestorID id) const { - panic_if(!deviceMemMap.count(mid), - "No device memory found for MasterID %d\n", mid); - return deviceMemMap.at(mid); + panic_if(!deviceMemMap.count(id), + "No device memory found for RequestorID %d\n", id); + return deviceMemMap.at(id); } void @@ -544,73 +544,74 @@ printSystems() } std::string -System::stripSystemName(const std::string& master_name) const +System::stripSystemName(const std::string& requestor_name) const { - if (startswith(master_name, name())) { - return master_name.substr(name().size()); + if (startswith(requestor_name, name())) { + return requestor_name.substr(name().size()); } else { - return master_name; + return requestor_name; } } -MasterID -System::lookupMasterId(const SimObject* obj) const +RequestorID +System::lookupRequestorId(const SimObject* obj) const { - MasterID id = Request::invldMasterId; + RequestorID id = Request::invldRequestorId; // number of occurrences of the SimObject pointer - // in the master list. + // in the requestor list. auto obj_number = 0; - for (int i = 0; i < masters.size(); i++) { - if (masters[i].obj == obj) { + for (int i = 0; i < requestors.size(); i++) { + if (requestors[i].obj == obj) { id = i; obj_number++; } } fatal_if(obj_number > 1, - "Cannot lookup MasterID by SimObject pointer: " - "More than one master is sharing the same SimObject\n"); + "Cannot lookup RequestorID by SimObject pointer: " + "More than one requestor is sharing the same SimObject\n"); return id; } -MasterID -System::lookupMasterId(const std::string& master_name) const +RequestorID +System::lookupRequestorId(const std::string& requestor_name) const { - std::string name = stripSystemName(master_name); + std::string name = stripSystemName(requestor_name); - for (int i = 0; i < masters.size(); i++) { - if (masters[i].masterName == name) { + for (int i = 0; i < requestors.size(); i++) { + if (requestors[i].req_name == name) { return i; } } - return Request::invldMasterId; + return Request::invldRequestorId; } -MasterID -System::getGlobalMasterId(const std::string& master_name) +RequestorID +System::getGlobalRequestorId(const std::string& requestor_name) { - return _getMasterId(nullptr, master_name); + return _getRequestorId(nullptr, requestor_name); } -MasterID -System::getMasterId(const SimObject* master, std::string submaster) +RequestorID +System::getRequestorId(const SimObject* requestor, std::string subrequestor) { - auto master_name = leafMasterName(master, submaster); - return _getMasterId(master, master_name); + auto requestor_name = leafRequestorName(requestor, subrequestor); + return _getRequestorId(requestor, requestor_name); } -MasterID -System::_getMasterId(const SimObject* master, const std::string& master_name) +RequestorID +System::_getRequestorId(const SimObject* requestor, + const std::string& requestor_name) { - std::string name = stripSystemName(master_name); + std::string name = stripSystemName(requestor_name); // CPUs in switch_cpus ask for ids again after switching - for (int i = 0; i < masters.size(); i++) { - if (masters[i].masterName == name) { + for (int i = 0; i < requestors.size(); i++) { + if (requestors[i].req_name == name) { return i; } } @@ -620,39 +621,40 @@ System::_getMasterId(const SimObject* master, const std::string& master_name) // they will be too small if (Stats::enabled()) { - fatal("Can't request a masterId after regStats(). " + fatal("Can't request a requestorId after regStats(). " "You must do so in init().\n"); } - // Generate a new MasterID incrementally - MasterID master_id = masters.size(); + // Generate a new RequestorID incrementally + RequestorID requestor_id = requestors.size(); - // Append the new Master metadata to the group of system Masters. - masters.emplace_back(master, name, master_id); + // Append the new Requestor metadata to the group of system Requestors. + requestors.emplace_back(requestor, name, requestor_id); - return masters.back().masterId; + return requestors.back().id; } std::string -System::leafMasterName(const SimObject* master, const std::string& submaster) +System::leafRequestorName(const SimObject* requestor, + const std::string& subrequestor) { - if (submaster.empty()) { - return master->name(); + if (subrequestor.empty()) { + return requestor->name(); } else { - // Get the full master name by appending the submaster name to - // the root SimObject master name - return master->name() + "." + submaster; + // Get the full requestor name by appending the subrequestor name to + // the root SimObject requestor name + return requestor->name() + "." + subrequestor; } } std::string -System::getMasterName(MasterID master_id) +System::getRequestorName(RequestorID requestor_id) { - if (master_id >= masters.size()) - fatal("Invalid master_id passed to getMasterName()\n"); + if (requestor_id >= requestors.size()) + fatal("Invalid requestor_id passed to getRequestorName()\n"); - const auto& master_info = masters[master_id]; - return master_info.masterName; + const auto& requestor_info = requestors[requestor_id]; + return requestor_info.req_name; } System * diff --git a/src/sim/system.hh b/src/sim/system.hh index 8b31b2fe6..7d77c48dd 100644 --- a/src/sim/system.hh +++ b/src/sim/system.hh @@ -55,7 +55,7 @@ #include "cpu/base.hh" #include "cpu/pc_event.hh" #include "enums/MemoryMode.hh" -#include "mem/mem_master.hh" +#include "mem/mem_requestor.hh" #include "mem/physical.hh" #include "mem/port.hh" #include "mem/port_proxy.hh" @@ -76,7 +76,7 @@ class System : public SimObject, public PCEventScope /** * Private class for the system port which is only used as a - * master for debug access and for non-structural entities that do + * requestor for debug access and for non-structural entities that do * not have a port of their own. */ class SystemPort : public RequestPort @@ -99,7 +99,7 @@ class System : public SimObject, public PCEventScope SystemPort _systemPort; // Map of memory address ranges for devices with their own backing stores - std::unordered_map deviceMemMap; + std::unordered_map deviceMemMap; public: @@ -358,22 +358,23 @@ class System : public SimObject, public PCEventScope /** * Add a physical memory range for a device. The ranges added here will - * be considered a non-PIO memory address if the masterId of the packet + * be considered a non-PIO memory address if the requestorId of the packet * and range match something in the device memory map. */ - void addDeviceMemory(MasterID masterID, AbstractMemory *deviceMemory); + void addDeviceMemory(RequestorID requestorId, + AbstractMemory *deviceMemory); /** * Similar to isMemAddr but for devices. Checks if a physical address * of the packet match an address range of a device corresponding to the - * MasterId of the request. + * RequestorId of the request. */ bool isDeviceMemAddr(PacketPtr pkt) const; /** * Return a pointer to the device memory. */ - AbstractMemory *getDeviceMemory(MasterID masterID) const; + AbstractMemory *getDeviceMemory(RequestorID _id) const; /** * Get the architecture. @@ -419,98 +420,99 @@ class System : public SimObject, public PCEventScope uint32_t numWorkIds; /** This array is a per-system list of all devices capable of issuing a - * memory system request and an associated string for each master id. - * It's used to uniquely id any master in the system by name for things + * memory system request and an associated string for each requestor id. + * It's used to uniquely id any requestor in the system by name for things * like cache statistics. */ - std::vector masters; + std::vector requestors; ThermalModel * thermalModel; protected: /** - * Strips off the system name from a master name + * Strips off the system name from a requestor name */ - std::string stripSystemName(const std::string& master_name) const; + std::string stripSystemName(const std::string& requestor_name) const; public: /** * Request an id used to create a request object in the system. All objects * that intend to issues requests into the memory system must request an id - * in the init() phase of startup. All master ids must be fixed by the + * in the init() phase of startup. All requestor ids must be fixed by the * regStats() phase that immediately precedes it. This allows objects in - * the memory system to understand how many masters may exist and - * appropriately name the bins of their per-master stats before the stats - * are finalized. + * the memory system to understand how many requestors may exist and + * appropriately name the bins of their per-requestor stats before the + * stats are finalized. * - * Registers a MasterID: + * Registers a RequestorID: * This method takes two parameters, one of which is optional. - * The first one is the master object, and it is compulsory; in case - * a object has multiple (sub)masters, a second parameter must be - * provided and it contains the name of the submaster. The method will - * create a master's name by concatenating the SimObject name with the - * eventual submaster string, separated by a dot. + * The first one is the requestor object, and it is compulsory; in case + * a object has multiple (sub)requestors, a second parameter must be + * provided and it contains the name of the subrequestor. The method will + * create a requestor's name by concatenating the SimObject name with the + * eventual subrequestor string, separated by a dot. * * As an example: - * For a cpu having two masters: a data master and an instruction master, + * For a cpu having two requestors: a data requestor and an + * instruction requestor, * the method must be called twice: * - * instMasterId = getMasterId(cpu, "inst"); - * dataMasterId = getMasterId(cpu, "data"); + * instRequestorId = getRequestorId(cpu, "inst"); + * dataRequestorId = getRequestorId(cpu, "data"); * - * and the masters' names will be: + * and the requestors' names will be: * - "cpu.inst" * - "cpu.data" * - * @param master SimObject related to the master - * @param submaster String containing the submaster's name - * @return the master's ID. + * @param requestor SimObject related to the requestor + * @param subrequestor String containing the subrequestor's name + * @return the requestor's ID. */ - MasterID getMasterId(const SimObject* master, - std::string submaster = std::string()); + RequestorID getRequestorId(const SimObject* requestor, + std::string subrequestor = std::string()); /** - * Registers a GLOBAL MasterID, which is a MasterID not related + * Registers a GLOBAL RequestorID, which is a RequestorID not related * to any particular SimObject; since no SimObject is passed, - * the master gets registered by providing the full master name. + * the requestor gets registered by providing the full requestor name. * - * @param masterName full name of the master - * @return the master's ID. + * @param requestorName full name of the requestor + * @return the requestor's ID. */ - MasterID getGlobalMasterId(const std::string& master_name); + RequestorID getGlobalRequestorId(const std::string& requestor_name); /** * Get the name of an object for a given request id. */ - std::string getMasterName(MasterID master_id); + std::string getRequestorName(RequestorID requestor_id); /** - * Looks up the MasterID for a given SimObject - * returns an invalid MasterID (invldMasterId) if not found. + * Looks up the RequestorID for a given SimObject + * returns an invalid RequestorID (invldRequestorId) if not found. */ - MasterID lookupMasterId(const SimObject* obj) const; + RequestorID lookupRequestorId(const SimObject* obj) const; /** - * Looks up the MasterID for a given object name string - * returns an invalid MasterID (invldMasterId) if not found. + * Looks up the RequestorID for a given object name string + * returns an invalid RequestorID (invldRequestorId) if not found. */ - MasterID lookupMasterId(const std::string& name) const; + RequestorID lookupRequestorId(const std::string& name) const; - /** Get the number of masters registered in the system */ - MasterID maxMasters() { return masters.size(); } + /** Get the number of requestors registered in the system */ + RequestorID maxRequestors() { return requestors.size(); } protected: - /** helper function for getMasterId */ - MasterID _getMasterId(const SimObject* master, - const std::string& master_name); + /** helper function for getRequestorId */ + RequestorID _getRequestorId(const SimObject* requestor, + const std::string& requestor_name); /** - * Helper function for constructing the full (sub)master name - * by providing the root master and the relative submaster name. + * Helper function for constructing the full (sub)requestor name + * by providing the root requestor and the relative subrequestor name. */ - std::string leafMasterName(const SimObject* master, - const std::string& submaster); + std::string leafRequestorName(const SimObject* requestor, + const std::string& subrequestor); public: diff --git a/src/systemc/tests/tlm/multi_sockets/MultiSocketSimpleSwitchAT.h b/src/systemc/tests/tlm/multi_sockets/MultiSocketSimpleSwitchAT.h index 3e03db312..f43218fe8 100644 --- a/src/systemc/tests/tlm/multi_sockets/MultiSocketSimpleSwitchAT.h +++ b/src/systemc/tests/tlm/multi_sockets/MultiSocketSimpleSwitchAT.h @@ -34,7 +34,7 @@ This class is a simple crossbar switch through which an arbitrary number of initiators may communicate in parallel as long as they do not talk to the same target. -If two masters address the same target at the same point of time, +If two requestors address the same target at the same point of time, the choice who will be allowed to communicate is done non-deterministically (based on the SystemC process exectution order). diff --git a/src/systemc/tlm_bridge/TlmBridge.py b/src/systemc/tlm_bridge/TlmBridge.py index 185146470..0a2aaa719 100644 --- a/src/systemc/tlm_bridge/TlmBridge.py +++ b/src/systemc/tlm_bridge/TlmBridge.py @@ -37,7 +37,7 @@ class Gem5ToTlmBridgeBase(SystemC_ScModule): system = Param.System(Parent.any, "system") - gem5 = ResponsePort('gem5 slave port') + gem5 = ResponsePort('gem5 response port') addr_ranges = VectorParam.AddrRange([], 'Addresses served by this port\'s TLM side') @@ -49,7 +49,7 @@ class TlmToGem5BridgeBase(SystemC_ScModule): system = Param.System(Parent.any, "system") - gem5 = RequestPort('gem5 master port') + gem5 = RequestPort('gem5 request port') class Gem5ToTlmBridge32(Gem5ToTlmBridgeBase): diff --git a/src/systemc/tlm_bridge/gem5_to_tlm.cc b/src/systemc/tlm_bridge/gem5_to_tlm.cc index f3bc1d5ca..ffcd53127 100644 --- a/src/systemc/tlm_bridge/gem5_to_tlm.cc +++ b/src/systemc/tlm_bridge/gem5_to_tlm.cc @@ -128,7 +128,7 @@ Gem5ToTlmBridge::pec( // Did another request arrive while blocked, schedule a retry. if (needToSendRequestRetry) { needToSendRequestRetry = false; - bsp.sendRetryReq(); + bridgeResponsePort.sendRetryReq(); } } if (phase == tlm::BEGIN_RESP) { @@ -147,11 +147,11 @@ Gem5ToTlmBridge::pec( */ if (extension.isPipeThrough()) { if (packet->isResponse()) { - need_retry = !bsp.sendTimingResp(packet); + need_retry = !bridgeResponsePort.sendTimingResp(packet); } } else if (packet->needsResponse()) { packet->makeResponse(); - need_retry = !bsp.sendTimingResp(packet); + need_retry = !bridgeResponsePort.sendTimingResp(packet); } if (need_retry) { @@ -381,7 +381,7 @@ Gem5ToTlmBridge::recvRespRetry() PacketPtr packet = Gem5SystemC::Gem5Extension::getExtension(trans).getPacket(); - bool need_retry = !bsp.sendTimingResp(packet); + bool need_retry = !bridgeResponsePort.sendTimingResp(packet); sc_assert(!need_retry); @@ -442,7 +442,8 @@ Gem5ToTlmBridge::invalidate_direct_mem_ptr( template Gem5ToTlmBridge::Gem5ToTlmBridge( Params *params, const sc_core::sc_module_name &mn) : - Gem5ToTlmBridgeBase(mn), bsp(std::string(name()) + ".gem5", *this), + Gem5ToTlmBridgeBase(mn), + bridgeResponsePort(std::string(name()) + ".gem5", *this), socket("tlm_socket"), wrapper(socket, std::string(name()) + ".tlm", InvalidPortID), system(params->system), blockingRequest(nullptr), @@ -456,7 +457,7 @@ template Gem5ToTlmBridge::gem5_getPort(const std::string &if_name, int idx) { if (if_name == "gem5") - return bsp; + return bridgeResponsePort; else if (if_name == "tlm") return wrapper; @@ -467,7 +468,7 @@ template void Gem5ToTlmBridge::before_end_of_elaboration() { - bsp.sendRangeChange(); + bridgeResponsePort.sendRangeChange(); socket.register_nb_transport_bw(this, &Gem5ToTlmBridge::nb_transport_bw); socket.register_invalidate_direct_mem_ptr( diff --git a/src/systemc/tlm_bridge/gem5_to_tlm.hh b/src/systemc/tlm_bridge/gem5_to_tlm.hh index 7e69e3c64..1fe084028 100644 --- a/src/systemc/tlm_bridge/gem5_to_tlm.hh +++ b/src/systemc/tlm_bridge/gem5_to_tlm.hh @@ -85,7 +85,7 @@ template class Gem5ToTlmBridge : public Gem5ToTlmBridgeBase { private: - class BridgeSlavePort : public ResponsePort + class BridgeResponsePort : public ResponsePort { protected: Gem5ToTlmBridge &bridge; @@ -128,13 +128,13 @@ class Gem5ToTlmBridge : public Gem5ToTlmBridgeBase void recvRespRetry() override { bridge.recvRespRetry(); } public: - BridgeSlavePort(const std::string &name_, + BridgeResponsePort(const std::string &name_, Gem5ToTlmBridge &bridge_) : ResponsePort(name_, nullptr), bridge(bridge_) {} }; - BridgeSlavePort bsp; + BridgeResponsePort bridgeResponsePort; tlm_utils::simple_initiator_socket< Gem5ToTlmBridge, BITWIDTH> socket; sc_gem5::TlmInitiatorWrapper wrapper; diff --git a/src/systemc/tlm_bridge/tlm_to_gem5.cc b/src/systemc/tlm_bridge/tlm_to_gem5.cc index 2bfbcc495..3891f58af 100644 --- a/src/systemc/tlm_bridge/tlm_to_gem5.cc +++ b/src/systemc/tlm_bridge/tlm_to_gem5.cc @@ -67,7 +67,7 @@ namespace sc_gem5 { PacketPtr -payload2packet(MasterID masterId, tlm::tlm_generic_payload &trans) +payload2packet(RequestorID _id, tlm::tlm_generic_payload &trans) { MemCmd cmd; @@ -87,7 +87,7 @@ payload2packet(MasterID masterId, tlm::tlm_generic_payload &trans) Request::Flags flags; auto req = std::make_shared( - trans.get_address(), trans.get_data_length(), flags, masterId); + trans.get_address(), trans.get_data_length(), flags, _id); /* * Allocate a new Packet. The packet will be deleted when it returns from @@ -156,7 +156,7 @@ TlmToGem5Bridge::handleBeginReq(tlm::tlm_generic_payload &trans) extension->setPipeThrough(); pkt = extension->getPacket(); } else { - pkt = payload2packet(masterId, trans); + pkt = payload2packet(_id, trans); } auto tlmSenderState = new TlmSenderState(trans); @@ -281,7 +281,7 @@ TlmToGem5Bridge::b_transport(tlm::tlm_generic_payload &trans, extension->setPipeThrough(); pkt = extension->getPacket(); } else { - pkt = payload2packet(masterId, trans); + pkt = payload2packet(_id, trans); } MemBackdoorPtr backdoor = nullptr; @@ -318,7 +318,7 @@ TlmToGem5Bridge::transport_dbg(tlm::tlm_generic_payload &trans) extension->setPipeThrough(); bmp.sendFunctional(extension->getPacket()); } else { - auto pkt = payload2packet(masterId, trans); + auto pkt = payload2packet(_id, trans); if (pkt) { bmp.sendFunctional(pkt); destroyPacket(pkt); @@ -344,7 +344,7 @@ TlmToGem5Bridge::get_direct_mem_ptr(tlm::tlm_generic_payload &trans, extension->setPipeThrough(); pkt = extension->getPacket(); } else { - pkt = payload2packet(masterId, trans); + pkt = payload2packet(_id, trans); pkt->req->setFlags(Request::NO_ACCESS); } @@ -484,7 +484,7 @@ TlmToGem5Bridge::TlmToGem5Bridge( bmp(std::string(name()) + "master", *this), socket("tlm_socket"), wrapper(socket, std::string(name()) + ".tlm", InvalidPortID), system(params->system), - masterId(params->system->getGlobalMasterId( + _id(params->system->getGlobalRequestorId( std::string("[systemc].") + name())) { } diff --git a/src/systemc/tlm_bridge/tlm_to_gem5.hh b/src/systemc/tlm_bridge/tlm_to_gem5.hh index e2e7540f5..f1e3e081a 100644 --- a/src/systemc/tlm_bridge/tlm_to_gem5.hh +++ b/src/systemc/tlm_bridge/tlm_to_gem5.hh @@ -89,7 +89,7 @@ class TlmToGem5Bridge : public TlmToGem5BridgeBase TlmSenderState(tlm::tlm_generic_payload &trans) : trans(trans) {} }; - class BridgeMasterPort : public RequestPort + class BridgeRequestPort : public RequestPort { protected: TlmToGem5Bridge &bridge; @@ -103,7 +103,7 @@ class TlmToGem5Bridge : public TlmToGem5BridgeBase void recvRangeChange() override { bridge.recvRangeChange(); } public: - BridgeMasterPort(const std::string &name_, + BridgeRequestPort(const std::string &name_, TlmToGem5Bridge &bridge_) : RequestPort(name_, nullptr), bridge(bridge_) {} @@ -119,7 +119,7 @@ class TlmToGem5Bridge : public TlmToGem5BridgeBase bool responseInProgress; - BridgeMasterPort bmp; + BridgeRequestPort bmp; tlm_utils::simple_target_socket< TlmToGem5Bridge, BITWIDTH> socket; sc_gem5::TlmTargetWrapper wrapper; @@ -171,7 +171,7 @@ class TlmToGem5Bridge : public TlmToGem5BridgeBase void before_end_of_elaboration() override; - const MasterID masterId; + const RequestorID _id; }; } // namespace sc_gem5 diff --git a/tests/gem5/x86-boot-tests/system/system.py b/tests/gem5/x86-boot-tests/system/system.py index bffd08a72..b050b6f3a 100755 --- a/tests/gem5/x86-boot-tests/system/system.py +++ b/tests/gem5/x86-boot-tests/system/system.py @@ -318,7 +318,7 @@ class MySystem(System): class CowDisk(IdeDisk): def __init__(self, filename): super(CowDisk, self).__init__() - self.driveID = 'master' + self.driveID = 'device0' self.image = CowDiskImage(child=RawDiskImage(read_only=True), read_only=False) self.image.child.image_file = filename