/*
- * Copyright (c) 2012 ARM Limited
+ * Copyright (c) 2012-2013 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
: MemObject(p), m_version(p->version), m_controller(NULL),
m_mandatory_q_ptr(NULL),
pio_port(csprintf("%s-pio-port", name()), this),
- m_usingRubyTester(p->using_ruby_tester), m_request_cnt(0),
+ m_usingRubyTester(p->using_ruby_tester),
drainManager(NULL), ruby_system(p->ruby_system), system(p->system),
- waitingOnSequencer(false), access_phys_mem(p->access_phys_mem)
+ access_phys_mem(p->access_phys_mem)
{
assert(m_version != -1);
// create the slave ports based on the number of connected ports
for (size_t i = 0; i < p->port_slave_connection_count; ++i) {
slave_ports.push_back(new M5Port(csprintf("%s-slave%d", name(), i),
- this, ruby_system, access_phys_mem));
+ this, ruby_system,
+ access_phys_mem, i));
}
// create the master ports based on the number of connected ports
RubyPort::PioPort::PioPort(const std::string &_name,
RubyPort *_port)
- : QueuedMasterPort(_name, _port, queue), queue(*_port, *this)
+ : QueuedMasterPort(_name, _port, queue), queue(*_port, *this),
+ ruby_port(_port)
{
DPRINTF(RubyPort, "creating master port on ruby sequencer %s\n", _name);
}
RubyPort::M5Port::M5Port(const std::string &_name, RubyPort *_port,
- RubySystem *_system, bool _access_phys_mem)
- : QueuedSlavePort(_name, _port, queue), queue(*_port, *this),
+ RubySystem *_system, bool _access_phys_mem, PortID id)
+ : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
ruby_port(_port), ruby_system(_system),
- _onRetryList(false), access_phys_mem(_access_phys_mem)
+ access_phys_mem(_access_phys_mem)
{
DPRINTF(RubyPort, "creating slave port on ruby sequencer %s\n", _name);
}
return 0;
}
-
bool
-RubyPort::PioPort::recvTimingResp(PacketPtr pkt)
+RubyPort::recvTimingResp(PacketPtr pkt, PortID master_port_id)
{
+ // got a response from a device
+ assert(pkt->isResponse());
+
// In FS mode, ruby memory will receive pio responses from devices
// and it must forward these responses back to the particular CPU.
- DPRINTF(RubyPort, "Pio response for address %#x\n", pkt->getAddr());
+ DPRINTF(RubyPort, "Pio response for address %#x, going to %d\n",
+ pkt->getAddr(), pkt->getDest());
- // First we must retrieve the request port from the sender State
- RubyPort::SenderState *senderState =
- safe_cast<RubyPort::SenderState *>(pkt->popSenderState());
- M5Port *port = senderState->port;
- assert(port != NULL);
- delete senderState;
+ // Retrieve the port from the destination field
+ assert(pkt->getDest() < slave_ports.size());
- port->sendTimingResp(pkt);
+ // attempt to send the response in the next cycle
+ slave_ports[pkt->getDest()]->schedTimingResp(pkt, curTick() +
+ g_system_ptr->clockPeriod());
return true;
}
RubyPort::M5Port::recvTimingReq(PacketPtr pkt)
{
DPRINTF(RubyPort,
- "Timing access caught for address %#x\n", pkt->getAddr());
-
- //dsm: based on SimpleTimingPort::recvTimingReq(pkt);
+ "Timing access for address %#x on port %d\n", pkt->getAddr(),
+ id);
if (pkt->memInhibitAsserted())
panic("RubyPort should never see an inhibited request\n");
- // Save the port in the sender state object to be used later to
- // route the response
- pkt->pushSenderState(new SenderState(this));
+ // Save the port id to be used later to route the response
+ pkt->setSrc(id);
// Check for pio requests and directly send them to the dedicated
// pio port.
RequestStatus requestStatus = ruby_port->makeRequest(pkt);
// If the request successfully issued then we should return true.
- // Otherwise, we need to delete the senderStatus we just created and return
- // false.
+ // Otherwise, we need to tell the port to retry at a later point
+ // and return false.
if (requestStatus == RequestStatus_Issued) {
- DPRINTF(RubyPort, "Request %#x issued\n", pkt->getAddr());
+ DPRINTF(RubyPort, "Request %s 0x%x issued\n", pkt->cmdString(),
+ pkt->getAddr());
return true;
}
"Request for address %#x did not issue because %s\n",
pkt->getAddr(), RequestStatus_to_string(requestStatus));
- SenderState* senderState = safe_cast<SenderState*>(pkt->senderState);
- pkt->senderState = senderState->predecessor;
- delete senderState;
return false;
}
void
RubyPort::ruby_hit_callback(PacketPtr pkt)
{
- // Retrieve the request port from the sender State
- RubyPort::SenderState *senderState =
- safe_cast<RubyPort::SenderState *>(pkt->senderState);
- M5Port *port = senderState->port;
- assert(port != NULL);
+ DPRINTF(RubyPort, "Hit callback for %s 0x%x\n", pkt->cmdString(),
+ pkt->getAddr());
+
+ // The packet was destined for memory and has not yet been turned
+ // into a response
+ assert(system->isMemAddr(pkt->getAddr()));
+ assert(pkt->isRequest());
- // pop the sender state from the packet
- pkt->senderState = senderState->predecessor;
- delete senderState;
+ // As it has not yet been turned around, the source field tells us
+ // which port it came from.
+ assert(pkt->getSrc() < slave_ports.size());
- port->hitCallback(pkt);
+ slave_ports[pkt->getSrc()]->hitCallback(pkt);
//
// If we had to stall the M5Ports, wake them up because the sequencer
// likely has free resources now.
//
- if (waitingOnSequencer) {
+ if (!retryList.empty()) {
//
// Record the current list of ports to retry on a temporary list before
// calling sendRetry on those ports. sendRetry will cause an
// list. Therefore we want to clear the retryList before calling
// sendRetry.
//
- std::list<M5Port*> curRetryList(retryList);
+ std::vector<M5Port*> curRetryList(retryList);
retryList.clear();
- waitingOnSequencer = false;
-
- for (std::list<M5Port*>::iterator i = curRetryList.begin();
- i != curRetryList.end(); ++i) {
+
+ for (auto i = curRetryList.begin(); i != curRetryList.end(); ++i) {
DPRINTF(RubyPort,
"Sequencer may now be free. SendRetry to port %s\n",
(*i)->name());
- (*i)->onRetryList(false);
(*i)->sendRetry();
}
}
}
bool
-RubyPort::M5Port::isPhysMemAddress(Addr addr)
+RubyPort::M5Port::isPhysMemAddress(Addr addr) const
{
return ruby_port->system->isMemAddr(addr);
}
/*
- * Copyright (c) 2012 ARM Limited
+ * Copyright (c) 2012-2013 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
SlavePacketQueue queue;
RubyPort *ruby_port;
RubySystem* ruby_system;
- bool _onRetryList;
bool access_phys_mem;
public:
M5Port(const std::string &_name, RubyPort *_port,
- RubySystem*_system, bool _access_phys_mem);
+ RubySystem*_system, bool _access_phys_mem, PortID id);
void hitCallback(PacketPtr pkt);
void evictionCallback(const Address& address);
-
- bool onRetryList()
- { return _onRetryList; }
-
- void onRetryList(bool newVal)
- { _onRetryList = newVal; }
protected:
- virtual bool recvTimingReq(PacketPtr pkt);
- virtual Tick recvAtomic(PacketPtr pkt);
- virtual void recvFunctional(PacketPtr pkt);
- virtual AddrRangeList getAddrRanges() const;
+ bool recvTimingReq(PacketPtr pkt);
+ Tick recvAtomic(PacketPtr pkt);
+ void recvFunctional(PacketPtr pkt);
+ AddrRangeList getAddrRanges() const;
private:
- bool isPhysMemAddress(Addr addr);
+ bool isPhysMemAddress(Addr addr) const;
};
- friend class M5Port;
-
class PioPort : public QueuedMasterPort
{
private:
MasterPacketQueue queue;
+ RubyPort *ruby_port;
public:
PioPort(const std::string &_name, RubyPort *_port);
protected:
- virtual bool recvTimingResp(PacketPtr pkt);
- };
-
- friend class PioPort;
-
- struct SenderState : public Packet::SenderState
- {
- M5Port* port;
-
- SenderState(M5Port* _port) : port(_port)
- {}
+ bool recvTimingResp(PacketPtr pkt)
+ { return ruby_port->recvTimingResp(pkt, id); }
};
typedef RubyPortParams Params;
void testDrainComplete();
void ruby_eviction_callback(const Address& address);
+ /**
+ * Called by the PIO port when receiving a timing response.
+ *
+ * @param pkt Response packet
+ * @param master_port_id Port id of the PIO port
+ *
+ * @return Whether successfully sent
+ */
+ bool recvTimingResp(PacketPtr pkt, PortID master_port_id);
+
uint32_t m_version;
AbstractController* m_controller;
MessageBuffer* m_mandatory_q_ptr;
private:
void addToRetryList(M5Port * port)
{
- if (!port->onRetryList()) {
- port->onRetryList(true);
- retryList.push_back(port);
- waitingOnSequencer = true;
- }
+ assert(std::find(retryList.begin(), retryList.end(), port) ==
+ retryList.end());
+ retryList.push_back(port);
}
unsigned int getChildDrainCount(DrainManager *dm);
- uint16_t m_port_id;
- uint64_t m_request_cnt;
-
/** Vector of M5 Ports attached to this Ruby port. */
typedef std::vector<M5Port*>::iterator CpuPortIter;
std::vector<M5Port*> slave_ports;
// Based on similar code in the M5 bus. Stores pointers to those ports
// that should be called when the Sequencer becomes available after a stall.
//
- std::list<M5Port*> retryList;
+ std::vector<M5Port*> retryList;
- bool waitingOnSequencer;
bool access_phys_mem;
};