RubyPort::RubyPort(const Params *p)
: MemObject(p), m_version(p->version), m_controller(NULL),
- m_mandatory_q_ptr(NULL),
- pio_port(csprintf("%s-pio-port", name()), this),
- m_usingRubyTester(p->using_ruby_tester),
- drainManager(NULL), ruby_system(p->ruby_system), system(p->system),
+ m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester),
+ pioMasterPort(csprintf("%s.pio-master-port", name()), this),
+ pioSlavePort(csprintf("%s.pio-slave-port", name()), this),
+ memMasterPort(csprintf("%s.mem-master-port", name()), this),
+ memSlavePort(csprintf("%s-mem-slave-port", name()), this,
+ p->ruby_system, p->access_phys_mem, -1),
+ gotAddrRanges(p->port_master_connection_count), drainManager(NULL),
+ ruby_system(p->ruby_system), system(p->system),
access_phys_mem(p->access_phys_mem)
{
assert(m_version != -1);
// create the slave ports based on the number of connected ports
for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
- slave_ports.push_back(new M5Port(csprintf("%s-slave%d", name(), i),
- this, ruby_system,
- access_phys_mem, i));
+ slave_ports.push_back(new MemSlavePort(csprintf("%s.slave%d", name(),
+ i), this, ruby_system, access_phys_mem, i));
}
// create the master ports based on the number of connected ports
for (size_t i = 0; i < p->port_master_connection_count; ++i) {
- master_ports.push_back(new PioPort(csprintf("%s-master%d", name(), i),
- this));
+ master_ports.push_back(new PioMasterPort(csprintf("%s.master%d",
+ name(), i), this));
}
}
BaseMasterPort &
RubyPort::getMasterPort(const std::string &if_name, PortID idx)
{
- if (if_name == "pio_port") {
- return pio_port;
+ if (if_name == "mem_master_port") {
+ return memMasterPort;
+ }
+
+ if (if_name == "pio_master_port") {
+ return pioMasterPort;
}
// used by the x86 CPUs to connect the interrupt PIO and interrupt slave
BaseSlavePort &
RubyPort::getSlavePort(const std::string &if_name, PortID idx)
{
+ if (if_name == "mem_slave_port") {
+ return memSlavePort;
+ }
+
+ if (if_name == "pio_slave_port")
+ return pioSlavePort;
+
// used by the CPUs to connect the caches to the interconnect, and
// for the x86 case also the interrupt master
if (if_name != "slave") {
}
}
-RubyPort::PioPort::PioPort(const std::string &_name,
+RubyPort::PioMasterPort::PioMasterPort(const std::string &_name,
+ RubyPort *_port)
+ : QueuedMasterPort(_name, _port, queue), queue(*_port, *this)
+{
+ DPRINTF(RubyPort, "Created master pioport on sequencer %s\n", _name);
+}
+
+RubyPort::PioSlavePort::PioSlavePort(const std::string &_name,
+ RubyPort *_port)
+ : QueuedSlavePort(_name, _port, queue), queue(*_port, *this)
+{
+ DPRINTF(RubyPort, "Created slave pioport on sequencer %s\n", _name);
+}
+
+RubyPort::MemMasterPort::MemMasterPort(const std::string &_name,
RubyPort *_port)
- : QueuedMasterPort(_name, _port, queue), queue(*_port, *this),
- ruby_port(_port)
+ : QueuedMasterPort(_name, _port, queue), queue(*_port, *this)
{
- DPRINTF(RubyPort, "creating master port on ruby sequencer %s\n", _name);
+ DPRINTF(RubyPort, "Created master memport on ruby sequencer %s\n", _name);
}
-RubyPort::M5Port::M5Port(const std::string &_name, RubyPort *_port,
+RubyPort::MemSlavePort::MemSlavePort(const std::string &_name, RubyPort *_port,
RubySystem *_system, bool _access_phys_mem, PortID id)
: QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
- ruby_port(_port), ruby_system(_system),
- access_phys_mem(_access_phys_mem)
+ ruby_system(_system), access_phys_mem(_access_phys_mem)
{
- DPRINTF(RubyPort, "creating slave port on ruby sequencer %s\n", _name);
+ DPRINTF(RubyPort, "Created slave memport on ruby sequencer %s\n", _name);
}
-Tick
-RubyPort::M5Port::recvAtomic(PacketPtr pkt)
+bool
+RubyPort::PioMasterPort::recvTimingResp(PacketPtr pkt)
{
- panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
- return 0;
+ RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
+ DPRINTF(RubyPort, "Response for address: 0x%#x\n", pkt->getAddr());
+
+ // send next cycle
+ ruby_port->pioSlavePort.schedTimingResp(
+ pkt, curTick() + g_system_ptr->clockPeriod());
+ return true;
}
-bool
-RubyPort::recvTimingResp(PacketPtr pkt, PortID master_port_id)
+bool RubyPort::MemMasterPort::recvTimingResp(PacketPtr pkt)
{
// got a response from a device
assert(pkt->isResponse());
DPRINTF(RubyPort, "Pio response for address %#x, going to %d\n",
pkt->getAddr(), pkt->getDest());
- // Retrieve the port from the destination field
- assert(pkt->getDest() < slave_ports.size());
+ // First we must retrieve the request port from the sender State
+ RubyPort::SenderState *senderState =
+ safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
+ MemSlavePort *port = senderState->port;
+ assert(port != NULL);
+ delete senderState;
// attempt to send the response in the next cycle
- slave_ports[pkt->getDest()]->schedTimingResp(pkt, curTick() +
- g_system_ptr->clockPeriod());
+ port->schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
return true;
}
bool
-RubyPort::M5Port::recvTimingReq(PacketPtr pkt)
+RubyPort::PioSlavePort::recvTimingReq(PacketPtr pkt)
{
- DPRINTF(RubyPort,
- "Timing access for address %#x on port %d\n", pkt->getAddr(),
- id);
+ RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
+
+ for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
+ AddrRangeList l = ruby_port->master_ports[i]->getAddrRanges();
+ for (auto it = l.begin(); it != l.end(); ++it) {
+ if (it->contains(pkt->getAddr())) {
+ ruby_port->master_ports[i]->sendTimingReq(pkt);
+ return true;
+ }
+ }
+ }
+ panic("Should never reach here!\n");
+}
+
+bool
+RubyPort::MemSlavePort::recvTimingReq(PacketPtr pkt)
+{
+ DPRINTF(RubyPort, "Timing request for address %#x on port %d\n",
+ pkt->getAddr(), id);
+ RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
if (pkt->memInhibitAsserted())
panic("RubyPort should never see an inhibited request\n");
- // Save the port id to be used later to route the response
- pkt->setSrc(id);
-
// Check for pio requests and directly send them to the dedicated
// pio port.
if (!isPhysMemAddress(pkt->getAddr())) {
- assert(ruby_port->pio_port.isConnected());
- DPRINTF(RubyPort,
- "Request for address 0x%#x is assumed to be a pio request\n",
+ assert(ruby_port->memMasterPort.isConnected());
+ DPRINTF(RubyPort, "Request address %#x assumed to be a pio address\n",
pkt->getAddr());
+ // Save the port in the sender state object to be used later to
+ // route the response
+ pkt->pushSenderState(new SenderState(this));
+
// send next cycle
- ruby_port->pio_port.schedTimingReq(pkt,
+ ruby_port->memMasterPort.schedTimingReq(pkt,
curTick() + g_system_ptr->clockPeriod());
return true;
}
+ // Save the port id to be used later to route the response
+ pkt->setSrc(id);
+
assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
RubySystem::getBlockSizeBytes());
ruby_port->addToRetryList(this);
}
- DPRINTF(RubyPort,
- "Request for address %#x did not issue because %s\n",
+ DPRINTF(RubyPort, "Request for address %#x did not issued because %s\n",
pkt->getAddr(), RequestStatus_to_string(requestStatus));
return false;
}
void
-RubyPort::M5Port::recvFunctional(PacketPtr pkt)
+RubyPort::MemSlavePort::recvFunctional(PacketPtr pkt)
{
- DPRINTF(RubyPort, "Functional access caught for address %#x\n",
- pkt->getAddr());
+ DPRINTF(RubyPort, "Functional access for address: %#x\n", pkt->getAddr());
+ RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
// Check for pio requests and directly send them to the dedicated
// pio port.
if (!isPhysMemAddress(pkt->getAddr())) {
- assert(ruby_port->pio_port.isConnected());
- DPRINTF(RubyPort, "Request for address 0x%#x is a pio request\n",
- pkt->getAddr());
- panic("RubyPort::PioPort::recvFunctional() not implemented!\n");
+ assert(ruby_port->memMasterPort.isConnected());
+ DPRINTF(RubyPort, "Pio Request for address: 0x%#x\n", pkt->getAddr());
+ panic("RubyPort::PioMasterPort::recvFunctional() not implemented!\n");
}
assert(pkt->getAddr() + pkt->getSize() <=
} else if (pkt->isWrite()) {
accessSucceeded = ruby_system->functionalWrite(pkt);
} else {
- panic("RubyPort: unsupported functional command %s\n",
- pkt->cmdString());
+ panic("Unsupported functional command %s\n", pkt->cmdString());
}
// Unless the requester explicitly said otherwise, generate an error if
slave_ports[pkt->getSrc()]->hitCallback(pkt);
//
- // If we had to stall the M5Ports, wake them up because the sequencer
+ // If we had to stall the MemSlavePorts, wake them up because the sequencer
// likely has free resources now.
//
if (!retryList.empty()) {
// list. Therefore we want to clear the retryList before calling
// sendRetry.
//
- std::vector<M5Port*> curRetryList(retryList);
+ std::vector<MemSlavePort *> curRetryList(retryList);
retryList.clear();
{
int count = 0;
- if (pio_port.isConnected()) {
- count += pio_port.drain(dm);
+ if (memMasterPort.isConnected()) {
+ count += memMasterPort.drain(dm);
DPRINTF(Config, "count after pio check %d\n", count);
}
DPRINTF(Config, "count after slave port check %d\n", count);
}
- for (std::vector<PioPort*>::iterator p = master_ports.begin();
+ for (std::vector<PioMasterPort *>::iterator p = master_ports.begin();
p != master_ports.end(); ++p) {
count += (*p)->drain(dm);
DPRINTF(Config, "count after master port check %d\n", count);
}
DPRINTF(Config, "final count %d\n", count);
-
return count;
}
}
void
-RubyPort::M5Port::hitCallback(PacketPtr pkt)
+RubyPort::MemSlavePort::hitCallback(PacketPtr pkt)
{
bool needsResponse = pkt->needsResponse();
DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
if (accessPhysMem) {
+ RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
ruby_port->system->getPhysMem().access(pkt);
} else if (needsResponse) {
pkt->makeResponse();
}
AddrRangeList
-RubyPort::M5Port::getAddrRanges() const
+RubyPort::PioSlavePort::getAddrRanges() const
{
// at the moment the assumption is that the master does not care
AddrRangeList ranges;
+ RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
+
+ for (size_t i = 0; i < ruby_port->master_ports.size(); ++i) {
+ ranges.splice(ranges.begin(),
+ ruby_port->master_ports[i]->getAddrRanges());
+ }
+ for (AddrRangeConstIter r = ranges.begin(); r != ranges.end(); ++r)
+ DPRINTF(RubyPort, "%s\n", r->to_string());
return ranges;
}
bool
-RubyPort::M5Port::isPhysMemAddress(Addr addr) const
+RubyPort::MemSlavePort::isPhysMemAddress(Addr addr) const
{
+ RubyPort *ruby_port = static_cast<RubyPort *>(&owner);
return ruby_port->system->isMemAddr(addr);
}
}
}
}
+
+void
+RubyPort::PioMasterPort::recvRangeChange()
+{
+ RubyPort &r = static_cast<RubyPort &>(owner);
+ r.gotAddrRanges--;
+ if (r.gotAddrRanges == 0) {
+ r.pioSlavePort.sendRangeChange();
+ }
+}
class RubyPort : public MemObject
{
public:
- class M5Port : public QueuedSlavePort
+ class MemMasterPort : public QueuedMasterPort
+ {
+ private:
+ MasterPacketQueue queue;
+
+ public:
+ MemMasterPort(const std::string &_name, RubyPort *_port);
+
+ protected:
+ bool recvTimingResp(PacketPtr pkt);
+ void recvRangeChange() {}
+ };
+
+ class MemSlavePort : public QueuedSlavePort
{
private:
SlavePacketQueue queue;
- RubyPort *ruby_port;
RubySystem* ruby_system;
bool access_phys_mem;
public:
- M5Port(const std::string &_name, RubyPort *_port,
+ MemSlavePort(const std::string &_name, RubyPort *_port,
RubySystem*_system, bool _access_phys_mem, PortID id);
void hitCallback(PacketPtr pkt);
void evictionCallback(const Address& address);
protected:
bool recvTimingReq(PacketPtr pkt);
- Tick recvAtomic(PacketPtr pkt);
+
+ Tick recvAtomic(PacketPtr pkt)
+ { panic("RubyPort::MemSlavePort::recvAtomic() not implemented!\n"); }
+
void recvFunctional(PacketPtr pkt);
- AddrRangeList getAddrRanges() const;
+
+ AddrRangeList getAddrRanges() const
+ { AddrRangeList ranges; return ranges; }
private:
bool isPhysMemAddress(Addr addr) const;
};
- class PioPort : public QueuedMasterPort
+ class PioMasterPort : public QueuedMasterPort
{
private:
-
MasterPacketQueue queue;
- RubyPort *ruby_port;
public:
- PioPort(const std::string &_name, RubyPort *_port);
+ PioMasterPort(const std::string &_name, RubyPort *_port);
protected:
- bool recvTimingResp(PacketPtr pkt)
- { return ruby_port->recvTimingResp(pkt, id); }
+ bool recvTimingResp(PacketPtr pkt);
+ void recvRangeChange();
};
+ class PioSlavePort : public QueuedSlavePort
+ {
+ private:
+ SlavePacketQueue queue;
+
+ public:
+ PioSlavePort(const std::string &_name, RubyPort *_port);
+
+ protected:
+ bool recvTimingReq(PacketPtr pkt);
+
+ Tick recvAtomic(PacketPtr pkt)
+ { panic("recvAtomic not supported with ruby!"); }
+
+ void recvFunctional(PacketPtr pkt)
+ { panic("recvFunctional should never be called on pio slave port!"); }
+
+ AddrRangeList getAddrRanges() const;
+ };
+
+ struct SenderState : public Packet::SenderState
+ {
+ MemSlavePort *port;
+ SenderState(MemSlavePort * _port) : port(_port)
+ {}
+ };
+
typedef RubyPortParams Params;
RubyPort(const Params *p);
virtual ~RubyPort() {}
unsigned int drain(DrainManager *dm);
protected:
- const std::string m_name;
void ruby_hit_callback(PacketPtr pkt);
void testDrainComplete();
void ruby_eviction_callback(const Address& address);
uint32_t m_version;
AbstractController* m_controller;
MessageBuffer* m_mandatory_q_ptr;
- PioPort pio_port;
bool m_usingRubyTester;
private:
- void addToRetryList(M5Port * port)
+ void addToRetryList(MemSlavePort * port)
{
assert(std::find(retryList.begin(), retryList.end(), port) ==
retryList.end());
unsigned int getChildDrainCount(DrainManager *dm);
+ PioMasterPort pioMasterPort;
+ PioSlavePort pioSlavePort;
+ MemMasterPort memMasterPort;
+ MemSlavePort memSlavePort;
+ unsigned int gotAddrRanges;
+
/** Vector of M5 Ports attached to this Ruby port. */
- typedef std::vector<M5Port*>::iterator CpuPortIter;
- std::vector<M5Port*> slave_ports;
- std::vector<PioPort*> master_ports;
+ typedef std::vector<MemSlavePort *>::iterator CpuPortIter;
+ std::vector<MemSlavePort *> slave_ports;
+ std::vector<PioMasterPort *> master_ports;
DrainManager *drainManager;
// Based on similar code in the M5 bus. Stores pointers to those ports
// that should be called when the Sequencer becomes available after a stall.
//
- std::vector<M5Port*> retryList;
+ std::vector<MemSlavePort *> retryList;
bool access_phys_mem;
};