--- /dev/null
+# Copyright (c) 2006-2007 The Regents of The University of Michigan
+# Copyright (c) 2009 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Brad Beckmann
+
+import m5
+from m5.objects import *
+from m5.defines import buildEnv
+from m5.util import addToPath
+
+#
+# Note: the cache latency is only used by the sequencer on fast path hits
+#
+class Cache(RubyCache):
+ latency = 3
+
+def create_system(options, phys_mem, piobus, dma_devices):
+
+ if buildEnv['PROTOCOL'] != 'MI_example':
+ panic("This script requires the MI_example protocol to be built.")
+
+ cpu_sequencers = []
+
+ #
+ # The ruby network creation expects the list of nodes in the system to be
+ # consistent with the NetDest list. Therefore the l1 controller nodes must be
+ # listed before the directory nodes and directory nodes before dma nodes, etc.
+ #
+ l1_cntrl_nodes = []
+ dir_cntrl_nodes = []
+ dma_cntrl_nodes = []
+
+ #
+ # Must create the individual controllers before the network to ensure the
+ # controller constructors are called before the network constructor
+ #
+
+ for i in xrange(options.num_cpus):
+ #
+ # First create the Ruby objects associated with this cpu
+ # Only one cache exists for this protocol, so by default use the L1D
+ # config parameters.
+ #
+ cache = Cache(size = options.l1d_size,
+ assoc = options.l1d_assoc)
+
+ #
+ # Only one unified L1 cache exists. Can cache instructions and data.
+ #
+ cpu_seq = RubySequencer(icache = cache,
+ dcache = cache,
+ physMemPort = phys_mem.port,
+ physmem = phys_mem)
+
+ if piobus != None:
+ cpu_seq.pio_port = piobus.port
+
+ l1_cntrl = L1Cache_Controller(version = i,
+ sequencer = cpu_seq,
+ cacheMemory = cache)
+ #
+ # Add controllers and sequencers to the appropriate lists
+ #
+ cpu_sequencers.append(cpu_seq)
+ l1_cntrl_nodes.append(l1_cntrl)
+
+ phys_mem_size = long(phys_mem.range.second) - long(phys_mem.range.first) + 1
+ mem_module_size = phys_mem_size / options.num_dirs
+
+ for i in xrange(options.num_dirs):
+ #
+ # Create the Ruby objects associated with the directory controller
+ #
+
+ mem_cntrl = RubyMemoryControl(version = i)
+
+ dir_size = MemorySize('0B')
+ dir_size.value = mem_module_size
+
+ dir_cntrl = Directory_Controller(version = i,
+ directory = \
+ RubyDirectoryMemory(version = i,
+ size = dir_size),
+ memBuffer = mem_cntrl)
+
+ dir_cntrl_nodes.append(dir_cntrl)
+
+ for i, dma_device in enumerate(dma_devices):
+ #
+ # Create the Ruby objects associated with the dma controller
+ #
+ dma_seq = DMASequencer(version = i,
+ physMemPort = phys_mem.port,
+ physmem = phys_mem)
+
+ dma_cntrl = DMA_Controller(version = i,
+ dma_sequencer = dma_seq)
+
+ dma_cntrl.dma_sequencer.port = dma_device.dma
+ dma_cntrl_nodes.append(dma_cntrl)
+
+ all_cntrls = l1_cntrl_nodes + dir_cntrl_nodes + dma_cntrl_nodes
+
+ return (cpu_sequencers, dir_cntrl_nodes, all_cntrls)
from m5.util import addToPath
import MOESI_hammer
+import MI_example
def create_system(options, physmem, piobus = None, dma_devices = []):
physmem, \
piobus, \
dma_devices)
+ elif protocol == "MI_example":
+ (cpu_sequencers, dir_cntrls, all_cntrls) = \
+ MI_example.create_system(options, \
+ physmem, \
+ piobus, \
+ dma_devices)
else:
print "Error: unsupported ruby protocol"
sys.exit(1)
machine(L1Cache, "MI Example L1 Cache")
-: int cache_response_latency,
- int issue_latency
+: Sequencer * sequencer,
+ CacheMemory * cacheMemory,
+ int cache_response_latency = 12,
+ int issue_latency = 2
{
// NETWORK BUFFERS
// STRUCTURE DEFINITIONS
MessageBuffer mandatoryQueue, ordered="false";
- Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
// CacheEntry
structure(Entry, desc="...", interface="AbstractCacheEntry") {
}
- external_type(CacheMemory) {
- bool cacheAvail(Address);
- Address cacheProbe(Address);
- void allocate(Address, Entry);
- void deallocate(Address);
- Entry lookup(Address);
- void changePermission(Address, AccessPermission);
- bool isTagPresent(Address);
- void profileMiss(CacheMsg);
- }
-
// TBE fields
structure(TBE, desc="...") {
State TBEState, desc="Transient state";
// STRUCTURES
- CacheMemory cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
-
TBETable TBEs, template_hack="<L1Cache_TBE>";
}
}
+ Entry getCacheEntry(Address addr), return_by_ref="yes" {
+ return static_cast(Entry, cacheMemory[addr]);
+ }
State getState(Address addr) {
return TBEs[addr].TBEState;
}
else if (cacheMemory.isTagPresent(addr)) {
- return cacheMemory[addr].CacheState;
+ return getCacheEntry(addr).CacheState;
}
else {
return State:I;
}
if (cacheMemory.isTagPresent(addr)) {
- cacheMemory[addr].CacheState := state;
+ getCacheEntry(addr).CacheState := state;
if (state == State:M) {
cacheMemory.changePermission(addr, AccessPermission:Read_Write);
} else {
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
- out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.MessageSize := MessageSizeType:Data;
}
}
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := cacheMemory[address].DataBlk;
+ out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.MessageSize := MessageSizeType:Response_Data;
}
}
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
- DEBUG_EXPR(cacheMemory[address].DataBlk);
- sequencer.readCallback(address, cacheMemory[address].DataBlk);
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.readCallback(address, getCacheEntry(address).DataBlk);
}
action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
- DEBUG_EXPR(cacheMemory[address].DataBlk);
- sequencer.writeCallback(address, cacheMemory[address].DataBlk);
+ DEBUG_EXPR(getCacheEntry(address).DataBlk);
+ sequencer.writeCallback(address, getCacheEntry(address).DataBlk);
}
action(u_writeDataToCache, "u", desc="Write data to the cache") {
peek(responseNetwork_in, ResponseMsg) {
- cacheMemory[address].DataBlk := in_msg.DataBlk;
+ getCacheEntry(address).DataBlk := in_msg.DataBlk;
}
}
}
action(x_copyDataFromCacheToTBE, "x", desc="Copy data from cache to TBE") {
- TBEs[address].DataBlk := cacheMemory[address].DataBlk;
+ TBEs[address].DataBlk := getCacheEntry(address).DataBlk;
}
action(z_stall, "z", desc="stall") {
machine(Directory, "Directory protocol")
-: int directory_latency
+: DirectoryMemory * directory,
+ MemoryControl * memBuffer,
+ int directory_latency = 12
{
MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false";
// TYPES
// DirectoryEntry
- structure(Entry, desc="...") {
+ structure(Entry, desc="...", interface="AbstractEntry") {
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
NetDest Sharers, desc="Sharers for this block";
NetDest Owner, desc="Owner of this block";
}
- external_type(DirectoryMemory) {
- Entry lookup(Address);
- bool isPresent(Address);
- void invalidateBlock(Address);
- }
-
- external_type(MemoryControl, inport="yes", outport="yes") {
-
- }
-
-
// TBE entries for DMA requests
structure(TBE, desc="TBE entries for outstanding DMA requests") {
Address PhysicalAddress, desc="physical address";
}
// ** OBJECTS **
- DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory"])';
-
- MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_control"])';
-
TBETable TBEs, template_hack="<Directory_TBE>";
+ Entry getDirectoryEntry(Address addr), return_by_ref="yes" {
+ return static_cast(Entry, directory[addr]);
+ }
+
State getState(Address addr) {
if (TBEs.isPresent(addr)) {
return TBEs[addr].TBEState;
} else if (directory.isPresent(addr)) {
- return directory[addr].DirectoryState;
+ return getDirectoryEntry(addr).DirectoryState;
} else {
return State:I;
}
if (directory.isPresent(addr)) {
if (state == State:I) {
- assert(directory[addr].Owner.count() == 0);
- assert(directory[addr].Sharers.count() == 0);
+ assert(getDirectoryEntry(addr).Owner.count() == 0);
+ assert(getDirectoryEntry(addr).Sharers.count() == 0);
} else if (state == State:M) {
- assert(directory[addr].Owner.count() == 1);
- assert(directory[addr].Sharers.count() == 0);
+ assert(getDirectoryEntry(addr).Owner.count() == 1);
+ assert(getDirectoryEntry(addr).Sharers.count() == 0);
}
- directory[addr].DirectoryState := state;
+ getDirectoryEntry(addr).DirectoryState := state;
}
}
} else if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:GETX, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
- if (directory[in_msg.Address].Owner.isElement(in_msg.Requestor)) {
+ if (getDirectoryEntry(in_msg.Address).Owner.isElement(in_msg.Requestor)) {
trigger(Event:PUTX, in_msg.Address);
} else {
trigger(Event:PUTX_NotOwner, in_msg.Address);
}
action(c_clearOwner, "c", desc="Clear the owner field") {
- directory[address].Owner.clear();
+ getDirectoryEntry(address).Owner.clear();
}
action(d_sendData, "d", desc="Send data to requestor") {
action(e_ownerIsRequestor, "e", desc="The owner is now the requestor") {
peek(requestQueue_in, RequestMsg) {
- directory[address].Owner.clear();
- directory[address].Owner.add(in_msg.Requestor);
+ getDirectoryEntry(address).Owner.clear();
+ getDirectoryEntry(address).Owner.add(in_msg.Requestor);
}
}
action(f_forwardRequest, "f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
APPEND_TRANSITION_COMMENT("Own: ");
- APPEND_TRANSITION_COMMENT(directory[in_msg.Address].Owner);
+ APPEND_TRANSITION_COMMENT(getDirectoryEntry(in_msg.Address).Owner);
APPEND_TRANSITION_COMMENT("Req: ");
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination := directory[in_msg.Address].Owner;
+ out_msg.Destination := getDirectoryEntry(in_msg.Address).Owner;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
- out_msg.Destination := directory[in_msg.PhysicalAddress].Owner;
+ out_msg.Destination := getDirectoryEntry(in_msg.PhysicalAddress).Owner;
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
peek(requestQueue_in, RequestMsg) {
// assert(in_msg.Dirty);
// assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
- directory[in_msg.Address].DataBlk := in_msg.DataBlk;
- //directory[in_msg.Address].DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
+ getDirectoryEntry(in_msg.Address).DataBlk := in_msg.DataBlk;
+ //getDirectoryEntry(in_msg.Address).DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
}
}
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
- directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
+ getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
out_msg.Sender := machineID;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.DataBlk := getDirectoryEntry(in_msg.Address).DataBlk;
DEBUG_EXPR(out_msg);
}
}
out_msg.Sender := machineID;
//out_msg.OriginalRequestorMachId := machineID;
out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := directory[address].DataBlk;
+ out_msg.DataBlk := getDirectoryEntry(address).DataBlk;
DEBUG_EXPR(out_msg);
}
}
}
action(w_writeDataToMemoryFromTBE, "\w", desc="Write date to directory memory from TBE") {
- //directory[address].DataBlk := TBEs[address].DataBlk;
- directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
+ //getDirectoryEntry(address).DataBlk := TBEs[address].DataBlk;
+ getDirectoryEntry(address).DataBlk.copyPartial(TBEs[address].DataBlk,
+ addressOffset(TBEs[address].PhysicalAddress),
+ TBEs[address].Len);
}
machine(DMA, "DMA Controller")
-: int request_latency
+: DMASequencer * dma_sequencer,
+ int request_latency
{
MessageBuffer responseFromDir, network="From", virtual_network="4", ordered="true", no_vector="true";
Ack, desc="DMA write to memory completed";
}
- external_type(DMASequencer) {
- void ackCallback();
- void dataCallback(DataBlock);
- }
-
MessageBuffer mandatoryQueue, ordered="false", no_vector="true";
- DMASequencer dma_sequencer, factory='RubySystem::getDMASequencer(m_cfg["dma_sequencer"])', no_vector="true";
State cur_state, no_vector="true";
State getState(Address addr) {
external_type(DirectoryMemory) {
AbstractEntry lookup(Address);
bool isPresent(Address);
+ void invalidateBlock(Address);
}
external_type(AbstractCacheEntry, primitive="yes");