# consistent with the NetDest list. Therefore the l1 controller nodes must be
# listed before the directory nodes and directory nodes before dma nodes, etc.
#
-
-# net_nodes = []
l1_cntrl_nodes = []
dir_cntrl_nodes = []
# controller constructors are called before the network constructor
#
for (i, cpu) in enumerate(cpus):
- l1_cntrl = L1Cache_Controller()
- cpu_seq = RubySequencer(controller = l1_cntrl,
- icache = L1Cache(controller = l1_cntrl),
- dcache = L1Cache(controller = l1_cntrl))
- cpu.controller = l1_cntrl
- cpu.sequencer = cpu_seq
- cpu.test = cpu_seq.port
- cpu_seq.funcmem_port = system.physmem.port
- cpu.functional = system.funcmem.port
+ #
+ # First create the Ruby objects associated with this cpu
+ # Eventually this code should go in a python file specific to the
+ # MOESI_hammer protocol
+ #
+ l1i_cache = L1Cache()
+ l1d_cache = L1Cache()
+ l2_cache = L2Cache()
+
+ cpu_seq = RubySequencer(icache = l1i_cache,
+ dcache = l1d_cache,
+ funcmem_port = system.physmem.port)
+
+ l1_cntrl = L1Cache_Controller(version = i,
+ sequencer = cpu_seq,
+ L1IcacheMemory = l1i_cache,
+ L1DcacheMemory = l1d_cache,
+ L2cacheMemory = l2_cache)
+
dir_cntrl = Directory_Controller(version = i,
directory = RubyDirectoryMemory(),
- memory_control = RubyMemoryControl())
+ memBuffer = RubyMemoryControl())
- # net_nodes += [l1_cntrl, dir_cntrl]
+ #
+ # As noted above: Two independent list are track to maintain the order of
+ # nodes/controllers assumed by the ruby network
+ #
l1_cntrl_nodes.append(l1_cntrl)
dir_cntrl_nodes.append(dir_cntrl)
+ #
+ # Finally tie the memtester ports to the correct system ports
+ #
+ cpu.test = cpu_seq.port
+ cpu.functional = system.funcmem.port
+
+
#
# Important: the topology constructor must be called before the network
# constructor.
*/
machine(L1Cache, "AMD Hammer-like protocol")
-: int cache_response_latency = 12,
+: Sequencer * sequencer,
+ CacheMemory * L1IcacheMemory,
+ CacheMemory * L1DcacheMemory,
+ CacheMemory * L2cacheMemory,
+ int cache_response_latency = 12,
int issue_latency = 2
{
// STRUCTURE DEFINITIONS
MessageBuffer mandatoryQueue, ordered="false";
- Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry") {
bool Sharers, desc="On a GetS, did we find any other sharers in the system";
}
- external_type(CacheMemory) {
- bool cacheAvail(Address);
- Address cacheProbe(Address);
- void allocate(Address, Entry);
- void deallocate(Address);
- Entry lookup(Address);
- void changePermission(Address, AccessPermission);
- bool isTagPresent(Address);
- void profileMiss(CacheMsg);
- }
-
external_type(TBETable) {
TBE lookup(Address);
void allocate(Address);
}
TBETable TBEs, template_hack="<L1Cache_TBE>";
- CacheMemory L1IcacheMemory, factory='RubySystem::getCache(m_cfg["icache"])';
- CacheMemory L1DcacheMemory, factory='RubySystem::getCache(m_cfg["dcache"])';
- CacheMemory L2cacheMemory, factory='RubySystem::getCache(m_cfg["l2cache"])';
Entry getCacheEntry(Address addr), return_by_ref="yes" {
if (L2cacheMemory.isTagPresent(addr)) {
- return L2cacheMemory[addr];
+ return static_cast(Entry, L2cacheMemory[addr]);
} else if (L1DcacheMemory.isTagPresent(addr)) {
- return L1DcacheMemory[addr];
+ return static_cast(Entry, L1DcacheMemory[addr]);
} else {
- return L1IcacheMemory[addr];
+ return static_cast(Entry, L1IcacheMemory[addr]);
}
}
action(ss_copyFromL1toL2, "\s", desc="Copy data block from L1 (I or D) to L2") {
if (L1DcacheMemory.isTagPresent(address)) {
- L2cacheMemory[address] := L1DcacheMemory[address];
+ static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1DcacheMemory[address]).Dirty;
+ static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1DcacheMemory[address]).DataBlk;
} else {
- L2cacheMemory[address] := L1IcacheMemory[address];
+ static_cast(Entry, L2cacheMemory[address]).Dirty := static_cast(Entry, L1IcacheMemory[address]).Dirty;
+ static_cast(Entry, L2cacheMemory[address]).DataBlk := static_cast(Entry, L1IcacheMemory[address]).DataBlk;
}
}
action(tt_copyFromL2toL1, "\t", desc="Copy data block from L2 to L1 (I or D)") {
if (L1DcacheMemory.isTagPresent(address)) {
- L1DcacheMemory[address] := L2cacheMemory[address];
+ static_cast(Entry, L1DcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
+ static_cast(Entry, L1DcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
} else {
- L1IcacheMemory[address] := L2cacheMemory[address];
+ static_cast(Entry, L1IcacheMemory[address]).Dirty := static_cast(Entry, L2cacheMemory[address]).Dirty;
+ static_cast(Entry, L1IcacheMemory[address]).DataBlk := static_cast(Entry, L2cacheMemory[address]).DataBlk;
}
}
*/
machine(Directory, "AMD Hammer-like protocol")
-: int memory_controller_latency = 12
+: DirectoryMemory * directory,
+ MemoryControl * memBuffer,
+ int memory_controller_latency = 12
{
MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false";
// TYPES
// DirectoryEntry
- structure(Entry, desc="...") {
+ structure(Entry, desc="...", interface="AbstractEntry") {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
}
- external_type(DirectoryMemory) {
- Entry lookup(Address);
- bool isPresent(Address);
- }
-
- external_type(MemoryControl, inport="yes", outport="yes") {
-
- }
-
// TBE entries for DMA requests
structure(TBE, desc="TBE entries for outstanding DMA requests") {
Address PhysicalAddress, desc="physical address";
// ** OBJECTS **
- DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
-
- MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
-
TBETable TBEs, template_hack="<Directory_TBE>";
+ Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
+ return static_cast(Entry, directory[addr]);
+ }
+
State getState(Address addr) {
if (TBEs.isPresent(addr)) {
return TBEs[addr].TBEState;
} else {
- return directory[addr].DirectoryState;
+ return getDirectoryEntry(addr).DirectoryState;
}
}
if (TBEs.isPresent(addr)) {
TBEs[addr].TBEState := state;
}
- directory[addr].DirectoryState := state;
+ getDirectoryEntry(addr).DirectoryState := state;
}
MessageBuffer triggerQueue, ordered="true";
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := directory[address].DataBlk;
+ out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
DEBUG_EXPR(out_msg);
}
}
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := directory[address].DataBlk;
+ out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
DEBUG_EXPR(out_msg);
}
}
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
- directory[address].DataBlk := in_msg.DataBlk;
+ getDirectoryEntry(address).DataBlk := in_msg.DataBlk;
DEBUG_EXPR(in_msg.Address);
DEBUG_EXPR(in_msg.DataBlk);
}
}
action(dwt_writeDmaDataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
- directory[address].DataBlk := TBEs[address].DataBlk;
- directory[address].DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
+ getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
+ getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DmaDataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
}
action(a_assertCacheData, "ac", desc="Assert that a cache provided the data") {
// implementation. We include the data in the "dataless"
// message so we can assert the clean data matches the datablock
// in memory
- assert(directory[address].DataBlk == in_msg.DataBlk);
+ assert(getDirectoryEntry(address).DataBlk == in_msg.DataBlk);
}
}
void profileNack(Address, int, int, uint64);
}
+external_type(AbstractEntry, primitive="yes");
+
+external_type(DirectoryMemory) {
+ AbstractEntry lookup(Address);
+ bool isPresent(Address);
+}
+
+external_type(AbstractCacheEntry, primitive="yes");
+
+external_type(CacheMemory) {
+ bool cacheAvail(Address);
+ Address cacheProbe(Address);
+ void allocate(Address, AbstractCacheEntry);
+ void deallocate(Address);
+ AbstractCacheEntry lookup(Address);
+ void changePermission(Address, AccessPermission);
+ bool isTagPresent(Address);
+ void profileMiss(CacheMsg);
+}
+
+external_type(MemoryControl, inport="yes", outport="yes") {
+
+}
+
external_type(TimerTable, inport="yes") {
bool isReady();
Address readyAddress();
}
+
+
target = generated_dir.File(basename(source))
env.Command(target, source, MakeIncludeAction)
+MakeInclude('slicc_interface/AbstractEntry.hh')
MakeInclude('slicc_interface/AbstractCacheEntry.hh')
MakeInclude('slicc_interface/AbstractProtocol.hh')
MakeInclude('slicc_interface/Message.hh')
#include "mem/ruby/common/Global.hh"
#include "mem/ruby/common/Address.hh"
#include "mem/protocol/AccessPermission.hh"
+#include "mem/ruby/slicc_interface/AbstractEntry.hh"
class DataBlock;
-class AbstractCacheEntry {
+class AbstractCacheEntry : public AbstractEntry {
public:
// Constructors
AbstractCacheEntry();
// Destructor, prevent it from instantiation
virtual ~AbstractCacheEntry() = 0;
- // Public Methods
-
- // The methods below are those called by ruby runtime, add when it is
- // absolutely necessary and should all be virtual function.
- virtual DataBlock& getDataBlk() = 0;
-
-
- virtual void print(ostream& out) const = 0;
-
// Data Members (m_ prefix)
Address m_Address; // Address of this block, required by CacheMemory
Time m_LastRef; // Last time this block was referenced, required by CacheMemory
--- /dev/null
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "mem/ruby/slicc_interface/AbstractEntry.hh"
+
+// Must define constructor and destructor in subclasses
+AbstractEntry::AbstractEntry() {
+}
+
+AbstractEntry::~AbstractEntry() {
+}
+
--- /dev/null
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AbstractEntry_H
+#define AbstractEntry_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/protocol/AccessPermission.hh"
+
+class DataBlock;
+
+class AbstractEntry {
+public:
+ // Constructors
+ AbstractEntry();
+
+ // Destructor, prevent it from instantiation
+ virtual ~AbstractEntry() = 0;
+
+ // Public Methods
+
+ // The methods below are those called by ruby runtime, add when it is
+ // absolutely necessary and should all be virtual function.
+ virtual DataBlock& getDataBlk() = 0;
+
+
+ virtual void print(ostream& out) const = 0;
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const AbstractEntry& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const AbstractEntry& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //AbstractEntry_H
+
SimObject('Controller.py')
+Source('AbstractEntry.cc')
Source('AbstractCacheEntry.cc')
Source('RubySlicc_Profiler_interface.cc')
Source('RubySlicc_ComponentMapping.cc')
latency = Param.Int("");
assoc = Param.Int("");
replacement_policy = Param.String("PSEUDO_LRU", "");
- controller = Param.RubyController("");
int CacheMemory::m_num_last_level_caches = 0;
MachineType CacheMemory::m_last_level_machine_type = MachineType_FIRST;
-// Output operator declaration
-//ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj);
-
// ******************* Definitions *******************
// Output operator definition
CacheMemory::CacheMemory(const Params *p)
: SimObject(p)
{
- int cache_size = p->size;
+ m_cache_size = p->size;
m_latency = p->latency;
m_cache_assoc = p->assoc;
- string policy = p->replacement_policy;
- m_controller = p->controller;
-
- int num_lines = cache_size/RubySystem::getBlockSizeBytes();
- m_cache_num_sets = num_lines / m_cache_assoc;
- m_cache_num_set_bits = log_int(m_cache_num_sets);
- assert(m_cache_num_set_bits > 0);
-
- if(policy == "PSEUDO_LRU")
- m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
- else if (policy == "LRU")
- m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc);
- else
- assert(false);
-
+ m_policy = p->replacement_policy;
}
void CacheMemory::init()
{
+ m_cache_num_sets = (m_cache_size / m_cache_assoc) / RubySystem::getBlockSizeBytes();
+ assert(m_cache_num_sets > 1);
+ m_cache_num_set_bits = log_int(m_cache_num_sets);
+ assert(m_cache_num_set_bits > 0);
+
+ if(m_policy == "PSEUDO_LRU")
+ m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else if (m_policy == "LRU")
+ m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else
+ assert(false);
+
m_num_last_level_caches =
MachineType_base_count(MachineType_FIRST);
#if 0
void CacheMemory::printConfig(ostream& out)
{
out << "Cache config: " << m_cache_name << endl;
- if (m_controller != NULL)
- out << " controller: " << m_controller->getName() << endl;
out << " cache_associativity: " << m_cache_assoc << endl;
out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl;
const int cache_num_sets = 1 << m_cache_num_set_bits;
// Destructor
~CacheMemory();
- // factory
- // static CacheMemory* createCache(int level, int num, char split_type, AbstractCacheEntry* (*entry_factory)());
- // static CacheMemory* getCache(int cache_id);
-
// Public Methods
void printConfig(ostream& out);
private:
const string m_cache_name;
- AbstractController* m_controller;
int m_latency;
// Data Members (m_prefix)
CacheProfiler* m_profiler_ptr;
+ int m_cache_size;
+ string m_policy;
int m_cache_num_sets;
int m_cache_num_set_bits;
int m_cache_assoc;
#include "mem/ruby/system/System.hh"
#include "mem/ruby/system/DirectoryMemory.hh"
#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
-#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/gems_common/util.hh"
int DirectoryMemory::m_num_directories = 0;
m_version = p->version;
m_size_bytes = p->size_mb * static_cast<uint64>(1<<20);
m_size_bits = log_int(m_size_bytes);
- m_controller = p->controller;
}
void DirectoryMemory::init()
void DirectoryMemory::printConfig(ostream& out) const
{
out << "DirectoryMemory module config: " << m_name << endl;
- out << " controller: " << m_controller->getName() << endl;
out << " version: " << m_version << endl;
out << " memory_bits: " << m_size_bits << endl;
out << " memory_size_bytes: " << m_size_bytes << endl;
#include "sim/sim_object.hh"
#include "params/RubyDirectoryMemory.hh"
-class AbstractController;
-
class DirectoryMemory : public SimObject {
public:
// Constructors
private:
const string m_name;
- AbstractController* m_controller;
// Data Members (m_ prefix)
Directory_Entry **m_entries;
// int m_size; // # of memory module blocks this directory is responsible for
cxx_class = 'DirectoryMemory'
version = Param.Int(0, "")
size_mb = Param.Int(1024, "")
- controller = Param.RubyController(Parent.any, "")
#include "mem/ruby/system/RubyPort.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
-//void (*RubyPort::m_hit_callback)(int64_t) = NULL;
uint16_t RubyPort::m_num_ports = 0;
+RubyPort::RequestMap RubyPort::pending_cpu_requests;
+
RubyPort::RubyPort(const Params *p)
- : MemObject(p)
+ : MemObject(p),
+ funcMemPort(csprintf("%s-funcmem_port", name()), this)
{
m_version = p->version;
assert(m_version != -1);
- m_controller = p->controller;
- assert(m_controller != NULL);
- m_mandatory_q_ptr = m_controller->getMandatoryQueue();
+ m_controller = NULL;
+ m_mandatory_q_ptr = NULL;
m_port_id = m_num_ports++;
m_request_cnt = 0;
- m_hit_callback = NULL;
+ m_hit_callback = ruby_hit_callback;
+ pio_port = NULL;
assert(m_num_ports <= 2048); // see below for reason
}
+void RubyPort::init()
+{
+ assert(m_controller != NULL);
+ m_mandatory_q_ptr = m_controller->getMandatoryQueue();
+}
+
Port *
RubyPort::getPort(const std::string &if_name, int idx)
{
+ if (if_name == "port") {
+ return new M5Port(csprintf("%s-port%d", name(), idx), this);
+ } else if (if_name == "pio_port") {
+ //
+ // ensure there is only one pio port
+ //
+ assert(pio_port == NULL);
+
+ pio_port = new PioPort(csprintf("%s-pio-port%d", name(), idx),
+ this);
+
+ return pio_port;
+ } else if (if_name == "funcmem_port") {
+ return &funcMemPort;
+ }
return NULL;
}
+
+RubyPort::PioPort::PioPort(const std::string &_name,
+ RubyPort *_port)
+ : SimpleTimingPort(_name, _port)
+{
+ DPRINTF(Ruby, "creating port to ruby sequencer to cpu %s\n", _name);
+ ruby_port = _port;
+}
+
+RubyPort::M5Port::M5Port(const std::string &_name,
+ RubyPort *_port)
+ : SimpleTimingPort(_name, _port)
+{
+ DPRINTF(Ruby, "creating port from ruby sequcner to cpu %s\n", _name);
+ ruby_port = _port;
+}
+
+Tick
+RubyPort::PioPort::recvAtomic(PacketPtr pkt)
+{
+ panic("RubyPort::PioPort::recvAtomic() not implemented!\n");
+ return 0;
+}
+
+
+Tick
+RubyPort::M5Port::recvAtomic(PacketPtr pkt)
+{
+ panic("RubyPort::M5Port::recvAtomic() not implemented!\n");
+ return 0;
+}
+
+
+bool
+RubyPort::PioPort::recvTiming(PacketPtr pkt)
+{
+ //
+ // In FS mode, ruby memory will receive pio responses from devices and
+ // it must forward these responses back to the particular CPU.
+ //
+ DPRINTF(MemoryAccess,
+ "Pio response for address %#x\n",
+ pkt->getAddr());
+
+ assert(pkt->isResponse());
+
+ //
+ // First we must retrieve the request port from the sender State
+ //
+ RubyPort::SenderState *senderState =
+ safe_cast<RubyPort::SenderState *>(pkt->senderState);
+ M5Port *port = senderState->port;
+ assert(port != NULL);
+
+ // pop the sender state from the packet
+ pkt->senderState = senderState->saved;
+ delete senderState;
+
+ port->sendTiming(pkt);
+
+ return true;
+}
+
+bool
+RubyPort::M5Port::recvTiming(PacketPtr pkt)
+{
+ DPRINTF(MemoryAccess,
+ "Timing access caught for address %#x\n",
+ pkt->getAddr());
+
+ //dsm: based on SimpleTimingPort::recvTiming(pkt);
+
+ //
+ // After checking for pio responses, the remainder of packets
+ // received by ruby should only be M5 requests, which should never
+ // get nacked. There used to be code to hanldle nacks here, but
+ // I'm pretty sure it didn't work correctly with the drain code,
+ // so that would need to be fixed if we ever added it back.
+ //
+ assert(pkt->isRequest());
+
+ if (pkt->memInhibitAsserted()) {
+ warn("memInhibitAsserted???");
+ // snooper will supply based on copy of packet
+ // still target's responsibility to delete packet
+ delete pkt;
+ return true;
+ }
+
+ //
+ // Check for pio requests and directly send them to the dedicated
+ // pio port.
+ //
+ if (!isPhysMemAddress(pkt->getAddr())) {
+ assert(ruby_port->pio_port != NULL);
+
+ //
+ // Save the port in the sender state object to be used later to
+ // route the response
+ //
+ pkt->senderState = new SenderState(this, pkt->senderState);
+
+ return ruby_port->pio_port->sendTiming(pkt);
+ }
+
+ //
+ // For DMA and CPU requests, translate them to ruby requests before
+ // sending them to our assigned ruby port.
+ //
+ RubyRequestType type = RubyRequestType_NULL;
+ Addr pc = 0;
+ if (pkt->isRead()) {
+ if (pkt->req->isInstFetch()) {
+ type = RubyRequestType_IFETCH;
+ pc = pkt->req->getPC();
+ } else {
+ type = RubyRequestType_LD;
+ }
+ } else if (pkt->isWrite()) {
+ type = RubyRequestType_ST;
+ } else if (pkt->isReadWrite()) {
+ type = RubyRequestType_RMW_Write;
+ }
+
+ RubyRequest ruby_request(pkt->getAddr(), pkt->getPtr<uint8_t>(),
+ pkt->getSize(), pc, type,
+ RubyAccessMode_Supervisor);
+
+ // Submit the ruby request
+ int64_t req_id = ruby_port->makeRequest(ruby_request);
+ if (req_id == -1) {
+ return false;
+ }
+
+ // Save the request for the callback
+ RubyPort::pending_cpu_requests[req_id] = new RequestCookie(pkt, this);
+
+ return true;
+}
+
+void
+RubyPort::ruby_hit_callback(int64_t req_id)
+{
+ //
+ // Note: This single fuction can be called by cpu and dma ports,
+ // as well as the functional port.
+ //
+ RequestMap::iterator i = pending_cpu_requests.find(req_id);
+ if (i == pending_cpu_requests.end())
+ panic("could not find pending request %d\n", req_id);
+
+ RequestCookie *cookie = i->second;
+ pending_cpu_requests.erase(i);
+
+ Packet *pkt = cookie->pkt;
+ M5Port *port = cookie->m5Port;
+ delete cookie;
+
+ port->hitCallback(pkt);
+}
+
+void
+RubyPort::M5Port::hitCallback(PacketPtr pkt)
+{
+
+ bool needsResponse = pkt->needsResponse();
+
+ DPRINTF(MemoryAccess, "Hit callback needs response %d\n",
+ needsResponse);
+
+ ruby_port->funcMemPort.sendFunctional(pkt);
+
+ // turn packet around to go back to requester if response expected
+ if (needsResponse) {
+ // recvAtomic() should already have turned packet into
+ // atomic response
+ assert(pkt->isResponse());
+ DPRINTF(MemoryAccess, "Sending packet back over port\n");
+ sendTiming(pkt);
+ } else {
+ delete pkt;
+ }
+ DPRINTF(MemoryAccess, "Hit callback done!\n");
+}
+
+bool
+RubyPort::M5Port::sendTiming(PacketPtr pkt)
+{
+ schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
+ return true;
+}
+
+bool
+RubyPort::PioPort::sendTiming(PacketPtr pkt)
+{
+ schedSendTiming(pkt, curTick + 1); //minimum latency, must be > 0
+ return true;
+}
+
+bool
+RubyPort::M5Port::isPhysMemAddress(Addr addr)
+{
+ AddrRangeList physMemAddrList;
+ bool snoop = false;
+ ruby_port->funcMemPort.getPeerAddressRanges(physMemAddrList, snoop);
+ for(AddrRangeIter iter = physMemAddrList.begin();
+ iter != physMemAddrList.end();
+ iter++) {
+ if (addr >= iter->start && addr <= iter->end) {
+ DPRINTF(MemoryAccess, "Request found in %#llx - %#llx range\n",
+ iter->start, iter->end);
+ return true;
+ }
+ }
+ assert(isPioAddress(addr));
+ return false;
+}
+
+bool
+RubyPort::M5Port::isPioAddress(Addr addr)
+{
+ AddrRangeList pioAddrList;
+ bool snoop = false;
+ if (ruby_port->pio_port == NULL) {
+ return false;
+ }
+
+ ruby_port->pio_port->getPeerAddressRanges(pioAddrList, snoop);
+ for(AddrRangeIter iter = pioAddrList.begin();
+ iter != pioAddrList.end();
+ iter++) {
+ if (addr >= iter->start && addr <= iter->end) {
+ DPRINTF(MemoryAccess, "Pio request found in %#llx - %#llx range\n",
+ iter->start, iter->end);
+ return true;
+ }
+ }
+ return false;
+}
+
class RubyPort : public MemObject {
public:
+
+ class M5Port : public SimpleTimingPort
+ {
+
+ RubyPort *ruby_port;
+
+ public:
+ M5Port(const std::string &_name,
+ RubyPort *_port);
+ bool sendTiming(PacketPtr pkt);
+ void hitCallback(PacketPtr pkt);
+
+ protected:
+ virtual bool recvTiming(PacketPtr pkt);
+ virtual Tick recvAtomic(PacketPtr pkt);
+
+ private:
+ bool isPioAddress(Addr addr);
+ bool isPhysMemAddress(Addr addr);
+ };
+
+ friend class M5Port;
+
+ class PioPort : public SimpleTimingPort
+ {
+
+ RubyPort *ruby_port;
+
+ public:
+ PioPort(const std::string &_name,
+ RubyPort *_port);
+ bool sendTiming(PacketPtr pkt);
+
+ protected:
+ virtual bool recvTiming(PacketPtr pkt);
+ virtual Tick recvAtomic(PacketPtr pkt);
+ };
+
+ friend class PioPort;
+
+ struct SenderState : public Packet::SenderState
+ {
+ M5Port* port;
+ Packet::SenderState *saved;
+
+ SenderState(M5Port* _port,
+ Packet::SenderState *sender_state = NULL)
+ : port(_port), saved(sender_state)
+ {}
+ };
+
typedef RubyPortParams Params;
RubyPort(const Params *p);
- virtual ~RubyPort() {}
+ virtual ~RubyPort() {}
+
+ void init();
Port *getPort(const std::string &if_name, int idx);
- virtual int64_t makeRequest(const RubyRequest & request) = 0;
+ virtual int64_t makeRequest(const RubyRequest & request) = 0;
- void registerHitCallback(void (*hit_callback)(int64_t request_id)) {
- assert(m_hit_callback == NULL); // can't assign hit_callback twice
- m_hit_callback = hit_callback;
- }
+ void registerHitCallback(void (*hit_callback)(int64_t request_id)) {
+ //
+ // Can't assign hit_callback twice and by default it is set to the
+ // RubyPort's default callback function.
+ //
+ assert(m_hit_callback == ruby_hit_callback);
+ m_hit_callback = hit_callback;
+ }
+
+ //
+ // Called by the controller to give the sequencer a pointer.
+ // A pointer to the controller is needed for atomic support.
+ //
+ void setController(AbstractController* _cntrl) { m_controller = _cntrl; }
protected:
const string m_name;
int m_version;
AbstractController* m_controller;
MessageBuffer* m_mandatory_q_ptr;
+ PioPort* pio_port;
private:
static uint16_t m_num_ports;
uint16_t m_port_id;
uint64_t m_request_cnt;
+
+ struct RequestCookie {
+ Packet *pkt;
+ M5Port *m5Port;
+ RequestCookie(Packet *p, M5Port *m5p)
+ : pkt(p), m5Port(m5p)
+ {}
+ };
+
+ typedef std::map<int64_t, RequestCookie*> RequestMap;
+ static RequestMap pending_cpu_requests;
+ static void ruby_hit_callback(int64_t req_id);
+
+ FunctionalPort funcMemPort;
};
#endif
CacheMemory* m_dataCache_ptr;
CacheMemory* m_instCache_ptr;
- // indicates what processor on the chip this sequencer is associated with
- int m_controller_type;
-
Map<Address, SequencerRequest*> m_writeRequestTable;
Map<Address, SequencerRequest*> m_readRequestTable;
// Global outstanding request count, across all request tables
from m5.params import *
+from m5.proxy import *
from MemObject import MemObject
class RubyPort(MemObject):
type = 'RubyPort'
abstract = True
port = VectorPort("M5 port")
- controller = Param.RubyController("")
version = Param.Int(0, "")
+ pio_port = Port("Ruby_pio_port")
class RubySequencer(RubyPort):
type = 'RubySequencer'
from slicc.symbols import Var
class FormalParamAST(AST):
- def __init__(self, slicc, type_ast, ident, default = None):
+ def __init__(self, slicc, type_ast, ident, default = None, pointer = False):
super(FormalParamAST, self).__init__(slicc)
self.type_ast = type_ast
self.ident = ident
self.default = default
+ self.pointer = pointer
def __repr__(self):
return "[FormalParamAST: %s]" % self.ident
for actual_type, expected_type in \
zip(paramTypes, obj_type.methods[methodId].param_types):
- if actual_type != expected_type:
+ if actual_type != expected_type and \
+ str(actual_type["interface"]) != str(expected_type):
self.error("Type mismatch: expected: %s actual: %s",
expected_type, actual_type)
methodId = obj_type.methodId(self.proc_name, paramTypes)
prefix = ""
+ implements_interface = False
if methodId not in obj_type.methods:
- self.error("Invalid method call: Type '%s' does not have a method '%s'",
- obj_type, methodId)
+ #
+ # The initial method check has failed, but before generating an
+ # error we must check whether any of the paramTypes implement
+ # an interface. If so, we must check if the method ids using
+ # the inherited types exist.
+ #
+ # This code is a temporary fix and only checks for the methodId
+ # where all paramTypes are converted to their inherited type. The
+ # right way to do this is to replace slicc's simple string
+ # comparison for determining the correct overloaded method, with a
+ # more robust param by param check.
+ #
+ implemented_paramTypes = []
+ for paramType in paramTypes:
+ implemented_paramType = paramType
+ if paramType.isInterface:
+ implements_interface = True
+ implemented_paramType.abstract_ident = paramType["interface"]
+ else:
+ implemented_paramType.abstract_ident = paramType.c_ident
+
+ implemented_paramTypes.append(implemented_paramType)
+
+ if implements_interface:
+ implementedMethodId = obj_type.methodIdAbstract(self.proc_name,
+ implemented_paramTypes)
+ else:
+ implementedMethodId = ""
+
+ if implementedMethodId not in obj_type.methods:
+ self.error("Invalid method call: " \
+ "Type '%s' does not have a method '%s' nor '%s'",
+ obj_type, methodId, implementedMethodId)
+ else:
+ #
+ # Replace the methodId with the implementedMethodId found in
+ # the method list.
+ #
+ methodId = implementedMethodId
+
return_type = obj_type.methods[methodId].return_type
if return_type.isInterface:
prefix = "static_cast<%s &>" % return_type.c_ident
--- /dev/null
+# Copyright (c) 2009 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from slicc.ast.ExprAST import ExprAST
+
+class StaticCastAST(ExprAST):
+ def __init__(self, slicc, type_ast, expr_ast):
+ super(StaticCastAST, self).__init__(slicc)
+
+ self.type_ast = type_ast
+ self.expr_ast = expr_ast
+
+ def __repr__(self):
+ return "[StaticCastAST: %r]" % self.expr_ast
+
+ def generate(self, code):
+ actual_type, ecode = self.expr_ast.inline(True)
+ code('static_cast<${{self.type_ast.type.c_ident}} &>($ecode)')
+
+ if not "interface" in self.type_ast.type:
+ self.expr_ast.error("static cast only premitted for those types " \
+ "that implement inherit an interface")
+
+ # The interface type should match
+ if str(actual_type) != str(self.type_ast.type["interface"]):
+ self.expr_ast.error("static cast miss-match, type is '%s'," \
+ "but inherited type is '%s'",
+ actual_type, self.type_ast.type["interface"])
+
+ return self.type_ast.type
+
from slicc.ast.ReturnStatementAST import *
from slicc.ast.StatementAST import *
from slicc.ast.StatementListAST import *
+from slicc.ast.StaticCastAST import *
from slicc.ast.TransitionDeclAST import *
from slicc.ast.TypeAST import *
from slicc.ast.TypeDeclAST import *
'copy_head' : 'COPY_HEAD',
'check_allocate' : 'CHECK_ALLOCATE',
'check_stop_slots' : 'CHECK_STOP_SLOTS',
+ 'static_cast' : 'STATIC_CAST',
'if' : 'IF',
'else' : 'ELSE',
'return' : 'RETURN',
"param : type ident"
p[0] = ast.FormalParamAST(self, p[1], p[2])
+ def p_param__pointer(self, p):
+ "param : type STAR ident"
+ p[0] = ast.FormalParamAST(self, p[1], p[3], None, True)
+
def p_param__default(self, p):
"param : type ident '=' NUMBER"
p[0] = ast.FormalParamAST(self, p[1], p[2], p[4])
"statement : CHECK_STOP_SLOTS '(' var ',' STRING ',' STRING ')' SEMI"
p[0] = ast.CheckStopStatementAST(self, p[3], p[5], p[7])
+ def p_statement__static_cast(self, p):
+ "aexpr : STATIC_CAST '(' type ',' expr ')'"
+ p[0] = ast.StaticCastAST(self, p[3], p[5])
+
def p_statement__return(self, p):
"statement : RETURN expr SEMI"
p[0] = ast.ReturnStatementAST(self, p[2])
from slicc.symbols.Var import Var
import slicc.generate.html as html
+python_class_map = {"int": "Int",
+ "string": "String",
+ "bool": "Bool",
+ "CacheMemory": "RubyCache",
+ "Sequencer": "RubySequencer",
+ "DirectoryMemory": "RubyDirectoryMemory",
+ "MemoryControl": "RubyMemoryControl",
+ }
+
class StateMachine(Symbol):
def __init__(self, symtab, ident, location, pairs, config_parameters):
super(StateMachine, self).__init__(symtab, ident, location, pairs)
self.table = None
self.config_parameters = config_parameters
for param in config_parameters:
- var = Var(symtab, param.name, location, param.type_ast.type,
- "m_%s" % param.name, {}, self)
+ if param.pointer:
+ var = Var(symtab, param.name, location, param.type_ast.type,
+ "(*m_%s_ptr)" % param.name, {}, self)
+ else:
+ var = Var(symtab, param.name, location, param.type_ast.type,
+ "m_%s" % param.name, {}, self)
self.symtab.registerSym(param.name, var)
self.states = orderdict()
dflt_str = ''
if param.default is not None:
dflt_str = str(param.default) + ', '
- code('${{param.name}} = Param.Int(${dflt_str}"")')
+ if python_class_map.has_key(param.type_ast.type.c_ident):
+ python_type = python_class_map[param.type_ast.type.c_ident]
+ code('${{param.name}} = Param.${{python_type}}(${dflt_str}"")')
+ else:
+ self.error("Unknown c++ to python class conversion for c++ " \
+ "type: '%s'. Please update the python_class_map " \
+ "in StateMachine.py", param.type_ast.type.c_ident)
code.dedent()
code.write(path, '%s.py' % py_ident)
code.indent()
# added by SS
for param in self.config_parameters:
- code('int m_${{param.ident}};')
+ if param.pointer:
+ code('${{param.type_ast.type}}* m_${{param.ident}}_ptr;')
+ else:
+ code('${{param.type_ast.type}} m_${{param.ident}};')
code('''
int m_number_of_TBEs;
m_number_of_TBEs = p->number_of_TBEs;
''')
code.indent()
+
+ #
+ # After initializing the universal machine parameters, initialize the
+ # this machines config parameters. Also detemine if these configuration
+ # params include a sequencer. This information will be used later for
+ # contecting the sequencer back to the L1 cache controller.
+ #
+ contains_sequencer = False
for param in self.config_parameters:
- code('m_${{param.name}} = p->${{param.name}};')
+ if param.name == "sequencer":
+ contains_sequencer = True
+ if param.pointer:
+ code('m_${{param.name}}_ptr = p->${{param.name}};')
+ else:
+ code('m_${{param.name}} = p->${{param.name}};')
+
+ #
+ # For the l1 cache controller, add the special atomic support which
+ # includes passing the sequencer a pointer to the controller.
+ #
+ if self.ident == "L1Cache":
+ if not contains_sequencer:
+ self.error("The L1Cache controller must include the sequencer " \
+ "configuration parameter")
+
+ code('''
+m_sequencer_ptr->setController(this);
+''')
code('m_num_controllers++;')
for var in self.objects:
def __init__(self, table, ident, location, pairs, machine=None):
super(Type, self).__init__(table, ident, location, pairs)
self.c_ident = ident
+ self.abstract_ident = ""
if machine:
if self.isExternal or self.isPrimitive:
if "external_name" in self:
def methodId(self, name, param_type_vec):
return '_'.join([name] + [ pt.c_ident for pt in param_type_vec ])
+ def methodIdAbstract(self, name, param_type_vec):
+ return '_'.join([name] + [ pt.abstract_ident for pt in param_type_vec ])
+
def methodAdd(self, name, return_type, param_type_vec):
ident = self.methodId(name, param_type_vec)
if ident in self.methods: