template <class KEY_TYPE, class VALUE_TYPE>
VALUE_TYPE& Map<KEY_TYPE, VALUE_TYPE>::lookup(const KEY_TYPE& key) const
{
+ if (!exist(key))
+ cerr << *this << " is looking for " << key << endl;
assert(exist(key));
return m_map[key];
}
-machine(L1Cache, "MI Example L1 Cache"): LATENCY_CACHE_RESPONSE_LATENCY LATENCY_ISSUE_LATENCY {
+machine(L1Cache, "MI Example L1 Cache")
+: int cache_response_latency,
+ int issue_latency
+{
// NETWORK BUFFERS
MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="true";
// ACTIONS
action(a_issueRequest, "a", desc="Issue a request") {
- enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
- out_msg.Address := address;
+ enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
+ out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
}
action(b_issuePUT, "b", desc="Issue a PUT request") {
- enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.Requestor := machineID;
action(e_sendData, "e", desc="Send data from cache to requestor") {
peek(forwardRequestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
peek(forwardRequestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
-machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_DIRECTORY_LATENCY LATENCY_MEMORY_LATENCY {
+machine(Directory, "Directory protocol")
+: int directory_latency
+{
MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false";
// TBE entries for DMA requests
structure(TBE, desc="TBE entries for outstanding DMA requests") {
+ Address PhysicalAddress, desc="physical address";
State TBEState, desc="Transient State";
DataBlock DataBlk, desc="Data to be written (DMA write only)";
- int Offset, desc="...";
int Len, desc="...";
}
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
peek(memQueue_in, MemoryMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.OriginalRequestorMachId;
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
action(d_sendData, "d", desc="Send data to requestor") {
peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
peek(memQueue_in, MemoryMsg) {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
out_msg.PhysicalAddress := address;
out_msg.LineAddress := address;
out_msg.Type := DMAResponseType:DATA;
action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
peek(requestQueue_in, RequestMsg) {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
out_msg.PhysicalAddress := address;
out_msg.LineAddress := address;
out_msg.Type := DMAResponseType:DATA;
}
action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
out_msg.PhysicalAddress := address;
out_msg.LineAddress := address;
out_msg.Type := DMAResponseType:ACK;
APPEND_TRANSITION_COMMENT(directory[in_msg.Address].Owner);
APPEND_TRANSITION_COMMENT("Req: ");
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
}
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
- directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
+ directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
TBEs[address].DataBlk := in_msg.DataBlk;
- TBEs[address].Offset := in_msg.Offset;
+ TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
TBEs[address].Len := in_msg.Len;
}
}
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
}
// action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
// peek(dmaRequestQueue_in, DMARequestMsg) {
-// enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+// enqueue(memQueue_out, MemoryMsg, latency="1") {
// out_msg.Address := address;
// out_msg.Type := MemoryRequestType:MEMORY_WB;
// out_msg.OriginalRequestorMachId := machineID;
action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
//out_msg.OriginalRequestorMachId := machineID;
//out_msg.DataBlk := in_msg.DataBlk;
- out_msg.DataBlk.copyPartial(in_msg.DataBlk, in_msg.Offset, in_msg.Len);
+ out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
//out_msg.DataBlk := in_msg.DataBlk;
- out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
+ out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
-machine(DMA, "DMA Controller") {
+machine(DMA, "DMA Controller")
+: int request_latency
+{
MessageBuffer responseFromDir, network="From", virtual_network="4", ordered="true", no_vector="true";
MessageBuffer reqToDirectory, network="To", virtual_network="5", ordered="false", no_vector="true";
out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
- in_port(dmaRequestQueue_in, DMARequestMsg, mandatoryQueue, desc="...") {
+ in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- if (in_msg.Type == DMARequestType:READ ) {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
- } else if (in_msg.Type == DMARequestType:WRITE) {
+ } else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress);
} else {
error("Invalid request type");
}
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg) {
- out_msg.PhysicalAddress := address;
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
out_msg.DataBlk := in_msg.DataBlk;
}
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg) {
- out_msg.PhysicalAddress := address;
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;
out_msg.DataBlk := in_msg.DataBlk;
Address LineAddress, desc="Line address for this request";
NetDest Destination, desc="Destination";
DataBlock DataBlk, desc="DataBlk attached to this request";
- int Offset, desc="The offset into the datablock";
int Len, desc="The length of the request";
MessageSizeType MessageSize, desc="size category of the message";
}
*
*/
-machine(L1Cache, "Directory protocol") {
+machine(L1Cache, "Directory protocol")
+ : int request_latency,
+ int l2_select_low_bit,
+ int l2_select_high_bit
+{
// NODE L1 CACHE
// From this node's L1 cache TO the network
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
- void allocate(Address);
+ void allocate(Address, Entry);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
- Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+ Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
TBETable TBEs, template_hack="<L1Cache_TBE>";
- CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
- CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+ CacheMemory L1IcacheMemory, factory='RubySystem::getCache(m_cfg["icache"])';
+ CacheMemory L1DcacheMemory, factory='RubySystem::getCache(m_cfg["dcache"])';
TimerTable useTimerTable;
Entry getCacheEntry(Address addr), return_by_ref="yes" {
assert(in_msg.Destination.isElement(machineID));
DEBUG_EXPR("MRM_DEBUG: L1 received");
DEBUG_EXPR(in_msg.Type);
- if (in_msg.Type == CoherenceRequestType:GETX) {
+if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_READ || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
trigger(Event:Own_GETX, in_msg.Address);
} else {
// ** INSTRUCTION ACCESS ***
// Check to see if it is in the OTHER L1
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.Address);
+ trigger(Event:L1_Replacement, in_msg.LineAddress);
}
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
- if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
// No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
}
}
} else {
// *** DATA ACCESS ***
// Check to see if it is in the OTHER L1
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.Address);
+ trigger(Event:L1_Replacement, in_msg.LineAddress);
}
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The tag matches for the L1, so the L1 ask the L2 for it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
- if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
// No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
}
}
}
action(a_issueGETS, "a", desc="Issue GETS") {
peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Request_Control;
out_msg.AccessMode := in_msg.AccessMode;
out_msg.Prefetch := in_msg.Prefetch;
action(b_issueGETX, "b", desc="Issue GETX") {
peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Request_Control;
out_msg.AccessMode := in_msg.AccessMode;
out_msg.Prefetch := in_msg.Prefetch;
}
action(d_issuePUTX, "d", desc="Issue PUTX") {
- // enqueue(writebackNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
+ enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(dd_issuePUTO, "\d", desc="Issue PUTO") {
- // enqueue(writebackNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
+ enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTO;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
- // enqueue(writebackNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
+ enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTS;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(e_sendData, "e", desc="Send data from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L2Cache) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.DataBlk := getCacheEntry(address).DataBlk;
// out_msg.Dirty := getCacheEntry(address).Dirty;
out_msg.Dirty := false;
DEBUG_EXPR(in_msg.Address);
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
}
action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.Dirty := getCacheEntry(address).Dirty;
out_msg.Acks := 0; // irrelevant
action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L2Cache) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.Dirty := getCacheEntry(address).Dirty;
out_msg.Acks := in_msg.Acks;
DEBUG_EXPR("Sending exclusive data to L2");
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
action(f_sendAck, "f", desc="Send ack from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L1Cache) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
}
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.Acks := 0 - 1; // -1
out_msg.MessageSize := MessageSizeType:Response_Control;
}
}
action(g_sendUnblock, "g", desc="Send unblock to memory") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Unblock_Control;
}
}
action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Unblock_Control;
}
}
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
peek(responseToL1Cache_in, ResponseMsg) {
DEBUG_EXPR("MRM_DEBUG: L1 decrementNumberOfMessages");
- DEBUG_EXPR(id);
DEBUG_EXPR(in_msg.Acks);
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
}
action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L1Cache) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
}
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.DataBlk := TBEs[address].DataBlk;
// out_msg.Dirty := TBEs[address].Dirty;
out_msg.Dirty := false;
action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L1Cache) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
}
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.DataBlk := TBEs[address].DataBlk;
out_msg.Dirty := TBEs[address].Dirty;
out_msg.Acks := in_msg.Acks;
// L2 will usually request data for a writeback
action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.Dirty := TBEs[address].Dirty;
if (TBEs[address].Dirty) {
out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
if (L1DcacheMemory.isTagPresent(address) == false) {
- L1DcacheMemory.allocate(address);
+ L1DcacheMemory.allocate(address, new Entry);
}
}
action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
if (L1IcacheMemory.isTagPresent(address) == false) {
- L1IcacheMemory.allocate(address);
+ L1IcacheMemory.allocate(address, new Entry);
}
}
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
peek(mandatoryQueue_in, CacheMsg) {
- profile_miss(in_msg, id);
+ // profile_miss(in_msg);
}
}
*
*/
-machine(L2Cache, "Token protocol") {
+machine(L2Cache, "Token protocol")
+: int response_latency,
+ int request_latency
+{
// L2 BANK QUEUES
// From local bank of L2 cache TO the network
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
- void allocate(Address);
+ void allocate(Address, Entry);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
- CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)+"_L2"';
+ CacheMemory L2cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
if (L2cacheMemory.isTagPresent(addr)) {
return L2cacheMemory[addr];
+ } else {
+ return L2cacheMemory[addr];
}
}
in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_READ || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_GETX, in_msg.Address);
} else {
action(a_issueGETS, "a", desc="issue local request globally") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.RequestorMachine := MachineType:L2Cache;
action(a_issueGETX, "\a", desc="issue local request globally") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.RequestorMachine := MachineType:L2Cache;
}
action(b_issuePUTX, "b", desc="Issue PUTX") {
- enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.RequestorMachine := MachineType:L2Cache;
}
action(b_issuePUTO, "\b", desc="Issue PUTO") {
- enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTO;
out_msg.Requestor := machineID;
/* PUTO, but local sharers exist */
action(b_issuePUTO_ls, "\bb", desc="Issue PUTO") {
- enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTO_SHARERS;
out_msg.Requestor := machineID;
}
action(c_sendDataFromTBEToL1GETS, "c", desc="Send data from TBE to L1 requestors in TBE") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
}
action(c_sendDataFromTBEToL1GETX, "\c", desc="Send data from TBE to L1 requestors in TBE") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
}
action(c_sendExclusiveDataFromTBEToL1GETS, "\cc", desc="Send data from TBE to L1 requestors in TBE") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
}
action(c_sendDataFromTBEToFwdGETX, "cc", desc="Send data from TBE to external GETX") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
}
action(c_sendDataFromTBEToFwdGETS, "ccc", desc="Send data from TBE to external GETX") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
}
action(c_sendExclusiveDataFromTBEToFwdGETS, "\ccc", desc="Send data from TBE to external GETX") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
action(d_sendDataToL1GETS, "d", desc="Send data directly to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
action(d_sendDataToL1GETX, "\d", desc="Send data and a token from TBE to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
action(dd_sendDataToFwdGETX, "dd", desc="send data") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
action(dd_sendDataToFwdGETS, "\dd", desc="send data") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
action(dd_sendExclusiveDataToFwdGETS, "\d\d", desc="send data") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
}
action(e_sendAck, "e", desc="Send ack with the tokens we've collected thus far.") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
action(e_sendAckToL1Requestor, "\e", desc="Send ack with the tokens we've collected thus far.") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
}
action(e_sendAckToL1RequestorFromTBE, "eee", desc="Send ack with the tokens we've collected thus far.") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
DEBUG_EXPR(address);
DEBUG_EXPR(getLocalSharers(address));
- DEBUG_EXPR(id);
DEBUG_EXPR(L2_TBEs[address].NumIntPendingAcks);
if (isLocalOwnerValid(address)) {
L2_TBEs[address].NumIntPendingAcks := L2_TBEs[address].NumIntPendingAcks + 1;
DEBUG_EXPR(getLocalOwner(address));
}
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
if (countLocalSharers(address) > 0) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
}
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := in_msg.Requestor;
L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
}
}
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := L2_TBEs[address].L1_GetX_ID;
action(f_sendUnblock, "f", desc="Send unblock to global directory") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK;
out_msg.Destination.add(map_Address_to_Directory(address));
action(f_sendExclusiveUnblock, "\f", desc="Send unblock to global directory") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
out_msg.Destination.add(map_Address_to_Directory(address));
action(j_forwardGlobalRequestToLocalOwner, "j", desc="Forward external request to local owner") {
peek(requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := machineID;
action(k_forwardLocalGETSToLocalSharer, "k", desc="Forward local request to local sharer/owner") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := in_msg.Requestor;
}
action(k_forwardLocalGETXToLocalOwner, "\k", desc="Forward local request to local owner") {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := L2_TBEs[address].L1_GetX_ID;
// same as previous except that it assumes to TBE is present to get number of acks
action(kk_forwardLocalGETXToLocalExclusive, "kk", desc="Forward local request to local owner") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := in_msg.Requestor;
action(kk_forwardLocalGETSToLocalOwner, "\kk", desc="Forward local request to local owner") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := in_msg.Requestor;
action(l_writebackAckNeedData, "l", desc="Send writeback ack to L1 requesting data") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
// out_msg.Type := CoherenceResponseType:WRITEBACK_SEND_DATA;
out_msg.Type := CoherenceRequestType:WB_ACK_DATA;
action(l_writebackAckDropData, "\l", desc="Send writeback ack to L1 indicating to drop data") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
// out_msg.Type := CoherenceResponseType:WRITEBACK_ACK;
out_msg.Type := CoherenceRequestType:WB_ACK;
action(ll_writebackNack, "\ll", desc="Send writeback nack to L1") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := machineID;
action( qq_sendDataFromTBEToMemory, "qq", desc="Send data from TBE to directory") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L2Cache;
}
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
- L2cacheMemory.allocate(address);
+ L2cacheMemory.allocate(address, new Entry);
}
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
peek(L1requestNetwork_in, RequestMsg) {
// AccessModeType not implemented
- profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
+ // profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
}
}
* $Id$
*/
-machine(Directory, "Directory protocol") {
+machine(Directory, "Directory protocol")
+: int directory_latency
+{
// ** IN QUEUES **
MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
-
+
MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
OO, desc="Blocked, was in owned";
MO, desc="Blocked, going to owner or maybe modified";
MM, desc="Blocked, going to modified";
+ MM_DMA, desc="Blocked, going to I";
MI, desc="Blocked on a writeback";
MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
OS, desc="Blocked on a writeback";
OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
+
+ XI_M, desc="In a stable state, going to I, waiting for the memory controller";
+ XI_U, desc="In a stable state, going to I, waiting for an unblock";
+ OI_D, desc="In O, going to I, waiting for data";
}
// Events
Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+ DMA_READ, desc="DMA Read";
+ DMA_WRITE, desc="DMA Write";
+ Data, desc="Data to directory";
}
// TYPES
int WaitingUnblocks, desc="Number of acks we're waiting for";
}
+ structure(TBE, desc="...") {
+ Address address, desc="Address for this entry";
+ int Len, desc="Length of request";
+ DataBlock DataBlk, desc="DataBlk";
+ MachineID Requestor, desc="original requestor";
+ }
+
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ // to simulate detailed DRAM
+ external_type(MemoryControl, inport="yes", outport="yes") {
+
+ }
+
// ** OBJECTS **
- DirectoryMemory directory, constructor_hack="i";
+ DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
+ MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
+ TBETable TBEs, template_hack="<Directory_TBE>";
State getState(Address addr) {
return directory[addr].DirectoryState;
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
out_port(goo1_out, ResponseMsg, goo1);
+ out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
trigger(Event:Dirty_Writeback, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
trigger(Event:Clean_Writeback, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data, in_msg.Address);
} else {
error("Invalid message");
}
trigger(Event:PUTO, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
trigger(Event:PUTO_SHARERS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
+ trigger(Event:DMA_READ, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
+ trigger(Event:DMA_WRITE, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
} else {
+ DEBUG_EXPR(in_msg.Type);
error("Invalid message");
}
}
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
directory[address].Sharers.clear();
}
- action(d_sendData, "d", desc="Send data to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
- // enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ action(d_sendDataMsg, "d", desc="Send data to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
-
- if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- } else {
- out_msg.Type := CoherenceResponseType:DATA;
- }
-
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ //out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := false; // By definition, the block is now clean
- out_msg.Acks := directory[address].Sharers.count();
- if (directory[address].Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
+ out_msg.Acks := in_msg.Acks;
+ if (in_msg.ReadX) {
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ } else {
+ out_msg.Type := CoherenceResponseType:DATA;
}
out_msg.MessageSize := MessageSizeType:Response_Data;
}
action(f_forwardRequest, "f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
}
}
+ action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
peek(requestQueue_in, RequestMsg) {
if ((directory[in_msg.Address].Sharers.count() > 1) ||
((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := in_msg.Requestor;
}
}
- action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
+ action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
assert(directory[address].WaitingUnblocks >= 0);
}
+ action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue();
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // These are not used by memory but are passed back here with the read data:
+ out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0);
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ if (TBEs.isPresent(address)) {
+ out_msg.OriginalRequestorMachId := TBEs[address].Requestor;
+ }
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // Not used:
+ out_msg.ReadX := false;
+ out_msg.Acks := directory[address].Sharers.count(); // for dma requests
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // Not used:
+ out_msg.ReadX := false;
+ out_msg.Acks := directory[address].Sharers.count(); // for dma requests
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+
// action(z_stall, "z", desc="Cannot be handled right now.") {
// Special name recognized as do nothing case
// }
requestQueue_in.recycle();
}
+ action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.Type := CoherenceResponseType:DMA_ACK;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
+ peek(requestQueue_in, RequestMsg) {
+ directory[address].DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
+ }
+ }
+
+ action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
+ directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(address), TBEs[address].Len);
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ peek (requestQueue_in, RequestMsg) {
+ TBEs.allocate(address);
+ TBEs[address].Len := in_msg.Len;
+ TBEs[address].DataBlk := in_msg.DataBlk;
+ TBEs[address].Requestor := in_msg.Requestor;
+ }
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ }
+
+
+
// TRANSITIONS
transition(I, GETX, MM) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, DMA_READ, XI_M) {
+ qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
+ transition(I, DMA_WRITE, XI_M) {
+ qw_queueMemoryWBRequest2;
+ l_writeDMADataToMemory;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(XI_M, Memory_Data, XI_U) {
+ d_sendDataMsg; // ack count may be zero
+ q_popMemQueue;
+ }
+
+ transition(XI_M, Memory_Ack, XI_U) {
+ a_sendDMAAck; // ack count may be zero
+ q_popMemQueue;
+ }
+
+ transition(XI_U, Exclusive_Unblock, I) {
+ cc_clearSharers;
+ c_clearOwner;
+ j_popIncomingUnblockQueue;
+ }
+
transition(S, GETX, MM) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
+ transition(S, DMA_READ, XI_M) {
+ qf_queueMemoryFetchRequest;
+ g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, DMA_WRITE, XI_M) {
+ qw_queueMemoryWBRequest2;
+ l_writeDMADataToMemory;
+ g_sendInvalidations; // the DMA will collect invalidations
+ i_popIncomingRequestQueue;
+ }
+
transition(I, GETS, IS) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
transition({S, SS}, GETS, SS) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
i_popIncomingRequestQueue;
}
+ transition(O, DMA_READ, XI_U) {
+ f_forwardRequest; // this will cause the data to go to DMA directly
+ g_sendInvalidations; // this will cause acks to be sent to the DMA
+ i_popIncomingRequestQueue;
+ }
+
+ transition({O,M}, DMA_WRITE, OI_D) {
+ f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
+ g_sendInvalidations; // these go to the DMA Controller
+ v_allocateTBE;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(OI_D, Data, XI_M) {
+ qw_queueMemoryWBRequest;
+ l_writeDataToMemory;
+ l_writeDMADataToMemoryFromTBE;
+ w_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
transition({O, OO}, GETS, OO) {
f_forwardRequest;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
+ // no exclusive unblock will show up to the directory
+ transition(M, DMA_READ, XI_U) {
+ f_forwardRequest; // this will cause the data to go to DMA directly
+ i_popIncomingRequestQueue;
+ }
+
transition(M, GETS, MO) {
f_forwardRequest;
i_popIncomingRequestQueue;
}
- transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ}) {
zz_recycleRequest;
}
j_popIncomingUnblockQueue;
}
- transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ}) {
zz_recycleRequest;
}
c_clearOwner;
cc_clearSharers;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(MIS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(OS, Dirty_Writeback, S) {
c_clearOwner;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(OSS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition({OS, OSS}, Unblock, O) {
j_popIncomingUnblockQueue;
}
+
+ transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
+ d_sendDataMsg;
+ q_popMemQueue;
+ }
+
+ transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Ack) {
+ //a_sendAck;
+ q_popMemQueue;
+ }
+
}
--- /dev/null
+
+machine(DMA, "DMA Controller")
+: int request_latency,
+ int response_latency
+{
+
+ MessageBuffer goo1, network="From", virtual_network="0", ordered="false";
+ MessageBuffer goo2, network="From", virtual_network="1", ordered="false";
+ MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false";
+
+ MessageBuffer foo1, network="To", virtual_network="0", ordered="false";
+ MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false";
+ MessageBuffer respToDir, network="To", virtual_network="2", ordered="false";
+
+ enumeration(State, desc="DMA states", default="DMA_State_READY") {
+ READY, desc="Ready to accept a new request";
+ BUSY_RD, desc="Busy: currently processing a request";
+ BUSY_WR, desc="Busy: currently processing a request";
+ }
+
+ enumeration(Event, desc="DMA events") {
+ ReadRequest, desc="A new read request";
+ WriteRequest, desc="A new write request";
+ Data, desc="Data from a DMA memory read";
+ DMA_Ack, desc="DMA write to memory completed";
+ Inv_Ack, desc="Invalidation Ack from a sharer";
+ All_Acks, desc="All acks received";
+ }
+
+ structure(TBE, desc="...") {
+ Address address, desc="Physical address";
+ int NumAcks, default="0", desc="Number of Acks pending";
+ DataBlock DataBlk, desc="Data";
+ }
+
+ external_type(DMASequencer) {
+ void ackCallback();
+ void dataCallback(DataBlock);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ MessageBuffer mandatoryQueue, ordered="false";
+ MessageBuffer triggerQueue, ordered="true";
+ DMASequencer dma_sequencer, factory='RubySystem::getDMASequencer(m_cfg["dma_sequencer"])';
+ TBETable TBEs, template_hack="<DMA_TBE>";
+ State cur_state;
+
+ State getState(Address addr) {
+ return cur_state;
+ }
+ void setState(Address addr, State state) {
+ cur_state := state;
+ }
+
+ out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
+ out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
+ out_port(foo1_out, ResponseMsg, foo1, desc="...");
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
+
+ in_port(goo1_in, RequestMsg, goo1) {
+ if (goo1_in.isReady()) {
+ peek(goo1_in, RequestMsg) {
+ assert(false);
+ }
+ }
+ }
+
+ in_port(goo2_in, RequestMsg, goo2) {
+ if (goo2_in.isReady()) {
+ peek(goo2_in, RequestMsg) {
+ assert(false);
+ }
+ }
+ }
+
+ in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
+ if (dmaRequestQueue_in.isReady()) {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ if (in_msg.Type == SequencerRequestType:LD ) {
+ trigger(Event:ReadRequest, in_msg.PhysicalAddress);
+ } else if (in_msg.Type == SequencerRequestType:ST) {
+ trigger(Event:WriteRequest, in_msg.PhysicalAddress);
+ } else {
+ error("Invalid request type");
+ }
+ }
+ }
+ }
+
+ in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
+ if (dmaResponseQueue_in.isReady()) {
+ peek( dmaResponseQueue_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
+ trigger(Event:DMA_Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Inv_Ack, in_msg.Address);
+ } else {
+ error("Invalid response type");
+ }
+ }
+ }
+ }
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
+ if (triggerQueue_in.isReady()) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_Acks, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:DMA_READ;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:DMA_WRITE;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
+ dma_sequencer.ackCallback();
+ }
+
+ action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
+ if (TBEs[address].NumAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.Address := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+ action(u_updateAckCount, "u", desc="Update ack count") {
+ peek(dmaResponseQueue_in, ResponseMsg) {
+ TBEs[address].NumAcks := TBEs[address].NumAcks - in_msg.Acks;
+ }
+ }
+
+ action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
+ enqueue(respToDirectory_out, ResponseMsg, latency=response_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop request queue") {
+ dmaRequestQueue_in.dequeue();
+ }
+
+ action(p_popResponseQueue, "\p", desc="Pop request queue") {
+ dmaResponseQueue_in.dequeue();
+ }
+
+ action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
+ triggerQueue_in.dequeue();
+ }
+
+ action(t_updateTBEData, "t", desc="Update TBE Data") {
+ peek(dmaResponseQueue_in, ResponseMsg) {
+ TBEs[address].DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
+ dma_sequencer.dataCallback(TBEs[address].DataBlk);
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ TBEs.allocate(address);
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ }
+
+ action(z_stall, "z", desc="dma is busy..stall") {
+ // do nothing
+ }
+
+
+
+ transition(READY, ReadRequest, BUSY_RD) {
+ s_sendReadRequest;
+ v_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_RD, Inv_Ack) {
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_RD, Data) {
+ t_updateTBEData;
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_RD, All_Acks, READY) {
+ d_dataCallbackFromTBE;
+ u_sendExclusiveUnblockToDir;
+ w_deallocateTBE;
+ p_popTriggerQueue;
+ }
+
+ transition(READY, WriteRequest, BUSY_WR) {
+ s_sendWriteRequest;
+ v_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_WR, Inv_Ack) {
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_WR, DMA_Ack) {
+ u_updateAckCount; // actually increases
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_WR, All_Acks, READY) {
+ a_ackCallback;
+ u_sendExclusiveUnblockToDir;
+ w_deallocateTBE;
+ p_popTriggerQueue;
+ }
+}
WB_ACK_DATA, desc="Writeback ack";
WB_NACK, desc="Writeback neg. ack";
INV, desc="Invalidation";
+
+ DMA_READ, desc="DMA Read";
+ DMA_WRITE, desc="DMA Write";
}
// CoherenceResponseType
WRITEBACK_CLEAN_DATA, desc="Clean writeback (contains data)";
WRITEBACK_CLEAN_ACK, desc="Clean writeback (contains no data)";
WRITEBACK_DIRTY_DATA, desc="Dirty writeback (contains data)";
+
+ DMA_ACK, desc="Ack that a DMA write completed";
}
// TriggerType
// RequestMsg (and also forwarded requests)
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
+ int Len, desc="Length of Request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
MachineType RequestorMachine, desc="type of component";
NetDest Destination, desc="Multicast destination mask";
+ DataBlock DataBlk, desc="data for the cache line (DMA WRITE request)";
int Acks, desc="How many acks to expect";
MessageSizeType MessageSize, desc="size category of the message";
AccessModeType AccessMode, desc="user/supervisor access type";
MessageSizeType MessageSize, desc="size category of the message";
}
-GenericRequestType convertToGenericType(CoherenceRequestType type) {
- if(type == CoherenceRequestType:PUTX) {
- return GenericRequestType:PUTX;
- } else if(type == CoherenceRequestType:GETS) {
- return GenericRequestType:GETS;
- } else if(type == CoherenceRequestType:GETX) {
- return GenericRequestType:GETX;
- } else if(type == CoherenceRequestType:PUTS) {
- return GenericRequestType:PUTS;
- } else if(type == CoherenceRequestType:PUTX) {
- return GenericRequestType:PUTS;
- } else if(type == CoherenceRequestType:PUTO) {
- return GenericRequestType:PUTO;
- } else if(type == CoherenceRequestType:PUTO_SHARERS) {
- return GenericRequestType:PUTO;
- } else if(type == CoherenceRequestType:INV) {
- return GenericRequestType:INV;
- } else if(type == CoherenceRequestType:WB_ACK) {
- return GenericRequestType:WB_ACK;
- } else if(type == CoherenceRequestType:WB_ACK_DATA) {
- return GenericRequestType:WB_ACK;
- } else if(type == CoherenceRequestType:WB_NACK) {
- return GenericRequestType:NACK;
- } else {
- DEBUG_EXPR(type);
- error("invalid CoherenceRequestType");
- }
-}
MOESI_CMP_directory-msg.sm
MOESI_CMP_directory-L2cache.sm
MOESI_CMP_directory-L1cache.sm
+MOESI_CMP_directory-dma.sm
MOESI_CMP_directory-dir.sm
standard_CMP-protocol.sm
+++ /dev/null
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- */
-
-machine(Directory, "Directory protocol") {
-
- // ** IN QUEUES **
- MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
- MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
- MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
-
- MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
- MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
- MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
-
-
- // STATES
- enumeration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, desc="Invalid";
- S, desc="Shared";
- O, desc="Owner";
- M, desc="Modified";
-
- IS, desc="Blocked, was in idle";
- SS, desc="Blocked, was in shared";
- OO, desc="Blocked, was in owned";
- MO, desc="Blocked, going to owner or maybe modified";
- MM, desc="Blocked, going to modified";
-
- MI, desc="Blocked on a writeback";
- MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
- OS, desc="Blocked on a writeback";
- OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- GETX, desc="A GETX arrives";
- GETS, desc="A GETS arrives";
- PUTX, desc="A PUTX arrives";
- PUTO, desc="A PUTO arrives";
- PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
- Unblock, desc="An unblock message arrives";
- Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
- Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
- Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
- Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
- Memory_Data, desc="Fetched data from memory arrives";
- Memory_Ack, desc="Writeback Ack from memory arrives";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...") {
- State DirectoryState, desc="Directory state";
- DataBlock DataBlk, desc="data for the block";
- NetDest Sharers, desc="Sharers for this block";
- NetDest Owner, desc="Owner of this block";
- int WaitingUnblocks, desc="Number of acks we're waiting for";
- }
-
- external_type(DirectoryMemory) {
- Entry lookup(Address);
- bool isPresent(Address);
- }
-
- // to simulate detailed DRAM
- external_type(MemoryControl, inport="yes", outport="yes") {
-
- }
-
-
- // ** OBJECTS **
-
- DirectoryMemory directory, constructor_hack="i";
- MemoryControl memBuffer, constructor_hack="i";
-
- State getState(Address addr) {
- return directory[addr].DirectoryState;
- }
-
- void setState(Address addr, State state) {
- if (directory.isPresent(addr)) {
-
- if (state == State:I) {
- assert(directory[addr].Owner.count() == 0);
- assert(directory[addr].Sharers.count() == 0);
- }
-
- if (state == State:S) {
- assert(directory[addr].Owner.count() == 0);
- }
-
- if (state == State:O) {
- assert(directory[addr].Owner.count() == 1);
- assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
- }
-
- if (state == State:M) {
- assert(directory[addr].Owner.count() == 1);
- assert(directory[addr].Sharers.count() == 0);
- }
-
- if ((state != State:SS) && (state != State:OO)) {
- assert(directory[addr].WaitingUnblocks == 0);
- }
-
- if ( (directory[addr].DirectoryState != State:I) && (state == State:I) ) {
- directory[addr].DirectoryState := state;
- // disable coherence checker
- // sequencer.checkCoherence(addr);
- }
- else {
- directory[addr].DirectoryState := state;
- }
- }
- }
-
- // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
- bool isBlockShared(Address addr) {
- if (directory.isPresent(addr)) {
- if (directory[addr].DirectoryState == State:I) {
- return true;
- }
- }
- return false;
- }
-
- bool isBlockExclusive(Address addr) {
- if (directory.isPresent(addr)) {
- if (directory[addr].DirectoryState == State:I) {
- return true;
- }
- }
- return false;
- }
-
-
- // ** OUT_PORTS **
- out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
-// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
- out_port(goo1_out, ResponseMsg, goo1);
- out_port(memQueue_out, MemoryMsg, memBuffer);
-
- // ** IN_PORTS **
-
- in_port(foo1_in, ResponseMsg, foo1) {
-
- }
-
- // in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
- // if (unblockNetwork_in.isReady()) {
- in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
- if (unblockNetwork_in.isReady()) {
- peek(unblockNetwork_in, ResponseMsg) {
- if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
- if (directory[in_msg.Address].WaitingUnblocks == 1) {
- trigger(Event:Last_Unblock, in_msg.Address);
- } else {
- trigger(Event:Unblock, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
- trigger(Event:Exclusive_Unblock, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
- trigger(Event:Dirty_Writeback, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
- trigger(Event:Clean_Writeback, in_msg.Address);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
- peek(requestQueue_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:PUTX) {
- trigger(Event:PUTX, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:PUTO) {
- trigger(Event:PUTO, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
- trigger(Event:PUTO_SHARERS, in_msg.Address);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
- if (memQueue_in.isReady()) {
- peek(memQueue_in, MemoryMsg) {
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.Address);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- // Actions
-
- action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:WB_ACK;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:WB_NACK;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(c_clearOwner, "c", desc="Clear the owner field") {
- directory[address].Owner.clear();
- }
-
- action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
- directory[address].Sharers.addNetDest(directory[address].Owner);
- directory[address].Owner.clear();
- }
-
- action(cc_clearSharers, "\c", desc="Clear the sharers field") {
- directory[address].Sharers.clear();
- }
-
- action(d_sendDataMsg, "d", desc="Send data to requestor") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:Directory;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- //out_msg.DataBlk := directory[in_msg.Address].DataBlk;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := false; // By definition, the block is now clean
- out_msg.Acks := in_msg.Acks;
- if (in_msg.ReadX) {
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- } else {
- out_msg.Type := CoherenceResponseType:DATA;
- }
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
- peek(unblockNetwork_in, ResponseMsg) {
- directory[address].Owner.clear();
- directory[address].Owner.add(in_msg.Sender);
- }
- }
-
- action(f_forwardRequest, "f", desc="Forward request to owner") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
- out_msg.Acks := directory[address].Sharers.count();
- if (directory[address].Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
- }
- }
- }
-
- action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
- peek(requestQueue_in, RequestMsg) {
- if ((directory[in_msg.Address].Sharers.count() > 1) ||
- ((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := in_msg.Requestor;
- // out_msg.Destination := directory[in_msg.Address].Sharers;
- out_msg.Destination.addNetDest(directory[in_msg.Address].Sharers);
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Invalidate_Control;
- }
- }
- }
- }
-
- action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
- }
-
- action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
- unblockNetwork_in.dequeue();
- }
-
- action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
- peek(unblockNetwork_in, ResponseMsg) {
- assert(in_msg.Dirty);
- assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
- directory[in_msg.Address].DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
- }
- }
-
- action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
- peek(unblockNetwork_in, ResponseMsg) {
- assert(in_msg.Dirty == false);
- assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
-
- // NOTE: The following check would not be valid in a real
- // implementation. We include the data in the "dataless"
- // message so we can assert the clean data matches the datablock
- // in memory
- assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
- }
- }
-
- action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
- peek(unblockNetwork_in, ResponseMsg) {
- directory[address].Sharers.add(in_msg.Sender);
- }
- }
-
- action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
- directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
- }
-
- action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
- directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
- assert(directory[address].WaitingUnblocks >= 0);
- }
-
- action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
- }
-
- action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := false;
- // These are not used by memory but are passed back here with the read data:
- out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0);
- out_msg.Acks := directory[address].Sharers.count();
- if (directory[address].Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- DEBUG_EXPR(out_msg);
- }
- }
- }
-
- action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
- peek(unblockNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- //out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := false;
- // Not used:
- out_msg.ReadX := false;
- out_msg.Acks := 0;
- DEBUG_EXPR(out_msg);
- }
- }
- }
-
-
- // action(z_stall, "z", desc="Cannot be handled right now.") {
- // Special name recognized as do nothing case
- // }
-
- action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
- requestQueue_in.recycle();
- }
-
- // TRANSITIONS
-
- transition(I, GETX, MM) {
- qf_queueMemoryFetchRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(S, GETX, MM) {
- qf_queueMemoryFetchRequest;
- g_sendInvalidations;
- i_popIncomingRequestQueue;
- }
-
- transition(I, GETS, IS) {
- qf_queueMemoryFetchRequest;
- i_popIncomingRequestQueue;
- }
-
- transition({S, SS}, GETS, SS) {
- qf_queueMemoryFetchRequest;
- n_incrementOutstanding;
- i_popIncomingRequestQueue;
- }
-
- transition({I, S}, PUTO) {
- b_sendWriteBackNack;
- i_popIncomingRequestQueue;
- }
-
- transition({I, S, O}, PUTX) {
- b_sendWriteBackNack;
- i_popIncomingRequestQueue;
- }
-
- transition(O, GETX, MM) {
- f_forwardRequest;
- g_sendInvalidations;
- i_popIncomingRequestQueue;
- }
-
- transition({O, OO}, GETS, OO) {
- f_forwardRequest;
- n_incrementOutstanding;
- i_popIncomingRequestQueue;
- }
-
- transition(M, GETX, MM) {
- f_forwardRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(M, GETS, MO) {
- f_forwardRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(M, PUTX, MI) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- // happens if M->O transition happens on-chip
- transition(M, PUTO, MI) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- transition(M, PUTO_SHARERS, MIS) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- transition(O, PUTO, OS) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- transition(O, PUTO_SHARERS, OSS) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
-
- transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
- zz_recycleRequest;
- }
-
- transition({MM, MO}, Exclusive_Unblock, M) {
- cc_clearSharers;
- e_ownerIsUnblocker;
- j_popIncomingUnblockQueue;
- }
-
- transition(MO, Unblock, O) {
- m_addUnlockerToSharers;
- j_popIncomingUnblockQueue;
- }
-
- transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
- zz_recycleRequest;
- }
-
- transition(IS, GETS) {
- zz_recycleRequest;
- }
-
- transition(IS, Unblock, S) {
- m_addUnlockerToSharers;
- j_popIncomingUnblockQueue;
- }
-
- transition(IS, Exclusive_Unblock, M) {
- cc_clearSharers;
- e_ownerIsUnblocker;
- j_popIncomingUnblockQueue;
- }
-
- transition(SS, Unblock) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(SS, Last_Unblock, S) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(OO, Unblock) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(OO, Last_Unblock, O) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(MI, Dirty_Writeback, I) {
- c_clearOwner;
- cc_clearSharers;
- l_writeDataToMemory;
- qw_queueMemoryWBRequest;
- j_popIncomingUnblockQueue;
- }
-
- transition(MIS, Dirty_Writeback, S) {
- c_moveOwnerToSharer;
- l_writeDataToMemory;
- qw_queueMemoryWBRequest;
- j_popIncomingUnblockQueue;
- }
-
- transition(MIS, Clean_Writeback, S) {
- c_moveOwnerToSharer;
- j_popIncomingUnblockQueue;
- }
-
- transition(OS, Dirty_Writeback, S) {
- c_clearOwner;
- l_writeDataToMemory;
- qw_queueMemoryWBRequest;
- j_popIncomingUnblockQueue;
- }
-
- transition(OSS, Dirty_Writeback, S) {
- c_moveOwnerToSharer;
- l_writeDataToMemory;
- qw_queueMemoryWBRequest;
- j_popIncomingUnblockQueue;
- }
-
- transition(OSS, Clean_Writeback, S) {
- c_moveOwnerToSharer;
- j_popIncomingUnblockQueue;
- }
-
- transition(MI, Clean_Writeback, I) {
- c_clearOwner;
- cc_clearSharers;
- ll_checkDataInMemory;
- j_popIncomingUnblockQueue;
- }
-
- transition(OS, Clean_Writeback, S) {
- c_clearOwner;
- ll_checkDataInMemory;
- j_popIncomingUnblockQueue;
- }
-
- transition({MI, MIS}, Unblock, M) {
- j_popIncomingUnblockQueue;
- }
-
- transition({OS, OSS}, Unblock, O) {
- j_popIncomingUnblockQueue;
- }
-
- transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
- d_sendDataMsg;
- q_popMemQueue;
- }
-
- transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Ack) {
- //a_sendAck;
- q_popMemQueue;
- }
-
-}
+++ /dev/null
-MOESI_CMP_directory-msg.sm
-MOESI_CMP_directory-L2cache.sm
-MOESI_CMP_directory-L1cache.sm
-MOESI_CMP_directory_m-dir.sm
-standard_CMP-protocol.sm
// Mapping functions
// NodeID map_address_to_node(Address addr);
+MachineID mapAddressToRange(Address addr, MachineType type, int low, int high);
MachineID map_Address_to_DMA(Address addr);
MachineID map_Address_to_Directory(Address addr);
NodeID map_Address_to_DirectoryNode(Address addr);
-MachineID map_Address_to_CentralArbiterNode(Address addr);
-NodeID oldmap_L1RubyNode_to_L2Cache(Address addr, NodeID L1RubyNode);
-MachineID map_L1CacheMachId_to_L2Cache(Address addr, MachineID L1CacheMachId);
-MachineID map_L2ChipId_to_L2Cache(Address addr, NodeID L2ChipId);
-// MachineID map_L1RubyNode_to_Arb(NodeID L1RubyNode);
+
MachineID getL1MachineID(NodeID L1RubyNode);
NodeID getChipID(MachineID L2machID);
external_type(uint64, primitive="yes");
external_type(Time, primitive="yes", default="0");
external_type(Address);
-
+external_type(DataBlock, desc="..."){
+ void clear();
+ void copyPartial(DataBlock, int, int);
+}
// Declarations of external types that are common to all protocols
IO, desc="I/O";
REPLACEMENT, desc="Replacement";
COMMIT, desc="Commit version";
- LD_XACT, desc="Transactional Load";
- LDX_XACT, desc="Transactional Load-Intend-To-Modify";
- ST_XACT, desc="Transactional Store";
- BEGIN_XACT, desc="Begin Transaction";
- COMMIT_XACT, desc="Commit Transaction";
- ABORT_XACT, desc="Abort Transaction";
+ NULL, desc="Invalid request type";
+}
+
+enumeration(SequencerRequestType, desc="...", default="SequencerRequestType_NULL") {
+ LD, desc="Load";
+ ST, desc="Store";
NULL, desc="Invalid request type";
}
ST_XACT, desc="Transactional Store";
BEGIN_XACT, desc="Begin Transaction";
COMMIT_XACT, desc="Commit Transaction";
- ABORT_XACT, desc="Abort Transaction";
+ ABORT_XACT, desc="Abort Transaction";
+ DMA_READ, desc="DMA READ";
+ DMA_WRITE, desc="DMA WRITE";
NULL, desc="null request type";
}
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
+// CacheMsg
+structure(SequencerMsg, desc="...", interface="Message") {
+ Address LineAddress, desc="Line address for this request";
+ Address PhysicalAddress, desc="Physical address for this request";
+ SequencerRequestType Type, desc="Type of request (LD, ST, etc)";
+ Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ DataBlock DataBlk, desc="Data";
+ int Len, desc="size in bytes of access";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+}
+
// MaskPredictorType
enumeration(MaskPredictorType, "MaskPredictorType_Undefined", desc="...") {
Undefined, desc="Undefined";
void profileMemoryCLBsize(int size, int numStaleI);
// used by 2level exclusive cache protocols
-void profile_miss(CacheMsg msg, NodeID id);
+void profile_miss(CacheMsg msg);
// used by non-fast path protocols
void profile_L1Cache_miss(CacheMsg msg, NodeID l1cacheID);
// External Types
-external_type(DataBlock, desc="..."){
- void clear();
- void copyPartial(DataBlock, int, int);
-}
-
external_type(MessageBuffer, buffer="yes", inport="yes", outport="yes");
external_type(OutPort, primitive="yes");
bool distributedPersistentEnabled();
Address setOffset(Address addr, int offset);
Address makeLineAddress(Address addr);
-
+int addressOffset(Address addr);
num_memories = 2
memory_size_mb = 1024
NUM_DMA = 1
+protocol = "MI_example"
# check for overrides
+
for i in 0..$*.size-1 do
- if $*[i] == "-p"
+ if $*[i] == "-c"
+ protocol = $*[i+1]
+ i = i+1
+ elsif $*[i] == "-p"
num_cores = $*[i+1].to_i
i = i+1
elsif $*[i] == "-m"
net_ports = Array.new
iface_ports = Array.new
+assert(protocol == "MI_example", __FILE__ + " cannot be used with protocol " + protocol)
+
+require protocol+".rb"
+
num_cores.times { |n|
cache = SetAssociativeCache.new("l1u_"+n.to_s, L1_CACHE_SIZE_KB, L1_CACHE_LATENCY, L1_CACHE_ASSOC, "PSEUDO_LRU")
sequencer = Sequencer.new("Sequencer_"+n.to_s, cache, cache)
iface_ports << sequencer
net_ports << MI_example_CacheController.new("L1CacheController_"+n.to_s,
"L1Cache",
- [cache],
+ cache,
sequencer)
}
num_memories.times { |n|
NUM_DMA.times { |n|
dma_sequencer = DMASequencer.new("DMASequencer_"+n.to_s)
iface_ports << dma_sequencer
- net_ports << DMAController.new("DMAController_"+n.to_s, "DMA", dma_sequencer)
+ net_ports << MI_example_DMAController.new("DMAController_"+n.to_s, "DMA", dma_sequencer)
}
topology = CrossbarTopology.new("theTopology", net_ports)
--- /dev/null
+
+class MI_example_CacheController < L1CacheController
+ attr :cache
+ def initialize(obj_name, mach_type, cache, sequencer)
+ super(obj_name, mach_type, [cache], sequencer)
+ @cache = cache
+ end
+ def argv()
+ vec = super()
+ vec += " cache " + @cache.obj_name
+ vec += " issue_latency "+issue_latency.to_s
+ vec += " cache_response_latency "+cache_response_latency.to_s
+ end
+
+end
+
+class MI_example_DirectoryController < DirectoryController
+ def initialize(obj_name, mach_type, directory, memory_control)
+ super(obj_name, mach_type, directory, memory_control)
+ end
+ def argv()
+ vec = super()
+ vec += " directory_latency "+directory_latency.to_s
+ end
+end
+
+class MI_example_DMAController < DMAController
+ def initialize(obj_name, mach_type, dma_sequencer)
+ super(obj_name, mach_type, dma_sequencer)
+ end
+ def argv()
+ vec = super
+ vec += " request_latency "+request_latency.to_s
+ end
+end
--- /dev/null
+
+require "cfg.rb"
+
+def log_int(n)
+ assert(n.is_a?(Fixnum), "log_int takes a number for an argument")
+ counter = 0
+ while n >= 2 do
+ counter += 1
+ n = n >> 1
+ end
+ return counter
+end
+
+
+class MOESI_CMP_directory_L1CacheController < L1CacheController
+ attr :icache, :dcache
+ attr :num_l2_controllers
+ def initialize(obj_name, mach_type, icache, dcache, sequencer, num_l2_controllers)
+ super(obj_name, mach_type, [icache, dcache], sequencer)
+ @icache = icache
+ @dcache = dcache
+ @num_l2_controllers = num_l2_controllers
+ end
+ def argv()
+ num_select_bits = log_int(num_l2_controllers)
+ num_block_bits = log_int(RubySystem.block_size_bytes)
+
+ l2_select_low_bit = num_block_bits
+ l2_select_high_bit = num_block_bits + num_select_bits - 1
+
+ vec = super()
+ vec += " icache " + @icache.obj_name
+ vec += " dcache " + @dcache.obj_name
+ vec += " request_latency "+request_latency().to_s
+ vec += " l2_select_low_bit " + l2_select_low_bit.to_s
+ vec += " l2_select_high_bit " + l2_select_high_bit.to_s
+ return vec
+ end
+end
+
+class MOESI_CMP_directory_L2CacheController < CacheController
+ attr :cache
+ def initialize(obj_name, mach_type, cache)
+ super(obj_name, mach_type, [cache])
+ @cache = cache
+ end
+ def argv()
+ vec = super()
+ vec += " cache " + @cache.obj_name
+ vec += " request_latency "+request_latency().to_s
+ vec += " response_latency "+response_latency().to_s
+ return vec
+ end
+end
+
+
+class MOESI_CMP_directory_DirectoryController < DirectoryController
+ def initialize(obj_name, mach_type, directory, memory_control)
+ super(obj_name, mach_type, directory, memory_control)
+ end
+ def argv()
+ vec = super()
+ vec += " directory_latency "+directory_latency.to_s
+ return vec
+ end
+
+end
+
+class MOESI_CMP_directory_DMAController < DMAController
+ def initialize(obj_name, mach_type, dma_sequencer)
+ super(obj_name, mach_type, dma_sequencer)
+ end
+ def argv()
+ vec = super
+ vec += " request_latency "+request_latency.to_s
+ vec += " response_latency "+response_latency.to_s
+ return vec
+ end
+end
--- /dev/null
+#!/usr/bin/ruby
+#
+# Creates a homogeneous CMP system with a single unified cache per
+# core and a crossbar network. Uses the default parameters listed
+# below, which can be overridden using command line args.
+#
+
+require "cfg.rb"
+
+# default values
+
+num_cores = 2
+L1_ICACHE_SIZE_KB = 32
+L1_ICACHE_ASSOC = 8
+L1_ICACHE_LATENCY = 1
+L1_DCACHE_SIZE_KB = 32
+L1_DCACHE_ASSOC = 8
+L1_DCACHE_LATENCY = 1
+L2_CACHE_SIZE_KB = 2048 # total size (sum of all banks)
+L2_CACHE_ASSOC = 16
+L2_CACHE_LATENCY = 12
+num_l2_banks = num_cores
+num_memories = 1
+memory_size_mb = 1024
+NUM_DMA = 1
+
+protocol = "MOESI_CMP_directory"
+
+# check for overrides
+
+for i in 0..$*.size-1 do
+ if $*[i] == "-c" or $*[i] == "--protocol"
+ i += 1
+ protocol = $*[i]
+ elsif $*[i] == "-m"
+ num_memories = $*[i+1].to_i
+ i = i+1
+ elsif $*[i] == "-p"
+ num_cores = $*[i+1].to_i
+ i = i+1
+ elsif $*[i] == "-s"
+ memory_size_mb = $*[i+1].to_i
+ i = i + 1
+ end
+end
+
+net_ports = Array.new
+iface_ports = Array.new
+
+assert(protocol == "MOESI_CMP_directory", __FILE__+" cannot be used with protocol "+protocol);
+
+require protocol+".rb"
+
+num_cores.times { |n|
+ icache = SetAssociativeCache.new("l1i_"+n.to_s, L1_ICACHE_SIZE_KB, L1_ICACHE_LATENCY, L1_ICACHE_ASSOC, "PSEUDO_LRU")
+ dcache = SetAssociativeCache.new("l1d_"+n.to_s, L1_DCACHE_SIZE_KB, L1_DCACHE_LATENCY, L1_DCACHE_ASSOC, "PSEUDO_LRU")
+ sequencer = Sequencer.new("Sequencer_"+n.to_s, icache, dcache)
+ iface_ports << sequencer
+ if protocol == "MOESI_CMP_directory"
+ net_ports << MOESI_CMP_directory_L1CacheController.new("L1CacheController_"+n.to_s,
+ "L1Cache",
+ icache, dcache,
+ sequencer,
+ num_l2_banks)
+ end
+}
+num_l2_banks.times { |n|
+ cache = SetAssociativeCache.new("l2u_"+n.to_s, L2_CACHE_SIZE_KB/num_l2_banks, L2_CACHE_LATENCY, L2_CACHE_ASSOC, "PSEUDO_LRU")
+ if protocol == "MOESI_CMP_directory"
+ net_ports << MOESI_CMP_directory_L2CacheController.new("L2CacheController_"+n.to_s,
+ "L2Cache",
+ cache)
+ end
+}
+num_memories.times { |n|
+ directory = DirectoryMemory.new("DirectoryMemory_"+n.to_s, memory_size_mb/num_memories)
+ memory_control = MemoryControl.new("MemoryControl_"+n.to_s)
+ if protocol == "MOESI_CMP_directory"
+ net_ports << MOESI_CMP_directory_DirectoryController.new("DirectoryController_"+n.to_s,
+ "Directory",
+ directory,
+ memory_control)
+ end
+}
+NUM_DMA.times { |n|
+ dma_sequencer = DMASequencer.new("DMASequencer_"+n.to_s)
+ iface_ports << dma_sequencer
+ if protocol == "MOESI_CMP_directory"
+ net_ports << MOESI_CMP_directory_DMAController.new("DMAController_"+n.to_s,
+ "DMA",
+ dma_sequencer)
+ end
+}
+
+topology = CrossbarTopology.new("theTopology", net_ports)
+on_chip_net = Network.new("theNetwork", topology)
+
+RubySystem.init(iface_ports, on_chip_net)
end
}
str += LibRubyObject.printConstructors
+ #puts str.gsub('%',' ').gsub('#','\n')
return str
end
class CacheController < NetPort
- @@total_cache_controllers = 0
- attr :caches
- attr :sequencer
- def initialize(obj_name, mach_type, caches, sequencer)
+ @@total_cache_controllers = Hash.new
+
+ def initialize(obj_name, mach_type, caches)
super(obj_name, mach_type)
- @caches = caches
- @caches.each { |cache|
+ caches.each { |cache|
cache.controller = self
}
- @sequencer = sequencer
- @sequencer.controller = self
-
- @version = @@total_cache_controllers
- @@total_cache_controllers += 1
- @sequencer.version = @version
- buffer_size()
+ if !@@total_cache_controllers.has_key?(mach_type)
+ @@total_cache_controllers[mach_type] = 0
+ end
+ @version = @@total_cache_controllers[mach_type]
+ @@total_cache_controllers[mach_type] += 1
+
+ # call inhereted parameters
+ transitions_per_cycle
+ buffer_size
+ number_of_TBEs
+ recycle_latency
end
def argv()
vec = "version "+@version.to_s
- @caches.each { |cache|
- vec += " cache " + cache.obj_name
- }
- vec += " sequencer "+@sequencer.obj_name
vec += " transitions_per_cycle "+@params[:transitions_per_cycle].to_s
vec += " buffer_size "+@params[:buffer_size].to_s
vec += " number_of_TBEs "+@params[:number_of_TBEs].to_s
-
+ vec += " recycle_latency "+@params[:recycle_latency].to_s
end
def cppClassName()
end
end
+class L1CacheController < CacheController
+ attr :sequencer
+
+ def initialize(obj_name, mach_type, caches, sequencer)
+ super(obj_name, mach_type, caches)
+
+ @sequencer = sequencer
+ @sequencer.controller = self
+ @sequencer.version = @version
+ end
+
+ def argv()
+ vec = super()
+ vec += " sequencer "+@sequencer.obj_name
+ end
+end
+
class DirectoryController < NetPort
@@total_directory_controllers = 0
attr :directory
end
def argv()
- "version "+@version.to_s+" dma_sequencer "+@dma_sequencer.obj_name+" transitions_per_cycle "+@params[:transitions_per_cycle].to_s + " buffer_size "+@params[:buffer_size].to_s + " number_of_TBEs "+@params[:number_of_TBEs].to_s
+ "version "+@version.to_s+" dma_sequencer "+@dma_sequencer.obj_name+" transitions_per_cycle "+@params[:transitions_per_cycle].to_s + " buffer_size "+@params[:buffer_size].to_s + " number_of_TBEs "+@params[:number_of_TBEs].to_s + " recycle_latency "+@params[:recycle_latency].to_s
end
def cppClassName()
end
def printTopology()
- topology.printFile
+ topology().printFile
end
def cppClassName()
"SimpleNetwork"
end
-class MI_example_CacheController < CacheController
- def initialize(obj_name, mach_type, caches, sequencer)
- super(obj_name, mach_type, caches, sequencer)
- end
- def argv()
- vec = super()
- vec += " issue_latency "+issue_latency.to_s
- vec += " cache_response_latency "+cache_response_latency.to_s
- end
-
-end
-
-class MI_example_DirectoryController < DirectoryController
- def initialize(obj_name, mach_type, directory, memory_control)
- super(obj_name, mach_type, directory, memory_control)
- end
- def argv()
- vec = super()
- vec += " to_mem_ctrl_latency "+to_mem_ctrl_latency.to_s
- vec += " directory_latency "+directory_latency.to_s
- vec += " memory_latency "+memory_latency.to_s
- end
-
-end
-
#added by SS
class GarnetNetwork < Network
def initialize(name, topo)
default_param :all_instructions, Boolean, false
end
-#added by SS
-class MI_example_CacheController < CacheController
- default_param :issue_latency, Integer, 2
- default_param :cache_response_latency, Integer, 12
-end
-
-class MI_example_DirectoryController < DirectoryController
- default_param :to_mem_ctrl_latency, Integer, 1
- default_param :directory_latency, Integer, 6
- default_param :memory_latency, Integer, 158
-end
-
-
#added by SS
class MemoryControl < LibRubyObject
end
+###### Protocols #######
+
+## MI_example protocol
+
+class MI_example_CacheController < L1CacheController
+ default_param :issue_latency, Integer, 2
+ default_param :cache_response_latency, Integer, 12
+end
+
+class MI_example_DirectoryController < DirectoryController
+ default_param :directory_latency, Integer, 6
+end
+
+class MI_example_DMAController < DMAController
+ default_param :request_latency, Integer, 6
+end
+
+## MOESI_CMP_directory protocol
+
+class MOESI_CMP_directory_L1CacheController < L1CacheController
+ default_param :request_latency, Integer, 2
+end
+
+class MOESI_CMP_directory_L2CacheController < CacheController
+ default_param :request_latency, Integer, 2
+ default_param :response_latency, Integer, 2
+end
+
+class MOESI_CMP_directory_DirectoryController < DirectoryController
+ default_param :directory_latency, Integer, 6
+end
+
+class MOESI_CMP_directory_DMAController < DMAController
+ default_param :request_latency, Integer, 6
+ default_param :response_latency, Integer, 6
+end
+
class RubySystem
# Random seed used by the simulation. If set to "rand", the seed
return dma;
}
+inline
+MachineID mapAddressToRange(const Address & addr, MachineType type, int low_bit, int high_bit)
+{
+ MachineID mach = {type, 0};
+ mach.num = addr.bitSelect(low_bit, high_bit);
+ return mach;
+}
+
extern inline NodeID machineIDToNodeID(MachineID machID)
{
return machID.num;
return result;
}
+extern inline int addressOffset(Address addr)
+{
+ return addr.getOffset();
+}
+
#endif //SLICC_UTIL_H
#include "mem/ruby/slicc_interface/AbstractController.hh"
/* SLICC generated types */
-#include "mem/protocol/DMARequestMsg.hh"
-#include "mem/protocol/DMARequestType.hh"
-#include "mem/protocol/DMAResponseMsg.hh"
+#include "mem/protocol/SequencerMsg.hh"
+#include "mem/protocol/SequencerRequestType.hh"
#include "mem/ruby/system/System.hh"
DMASequencer::DMASequencer(const string & name)
active_request.bytes_issued = 0;
active_request.id = makeUniqueRequestID();
- DMARequestMsg msg;
+ SequencerMsg msg;
msg.getPhysicalAddress() = Address(paddr);
msg.getLineAddress() = line_address(msg.getPhysicalAddress());
- msg.getType() = write ? DMARequestType_WRITE : DMARequestType_READ;
- msg.getOffset() = paddr & m_data_block_mask;
- msg.getLen() = (msg.getOffset() + len) <= RubySystem::getBlockSizeBytes() ?
+ msg.getType() = write ? SequencerRequestType_ST : SequencerRequestType_LD;
+ int offset = paddr & m_data_block_mask;
+ msg.getLen() = (offset + len) <= RubySystem::getBlockSizeBytes() ?
len :
- RubySystem::getBlockSizeBytes() - msg.getOffset();
- if (write) {
- msg.getType() = DMARequestType_WRITE;
- msg.getDataBlk().setData(data, msg.getOffset(), msg.getLen());
- } else {
- msg.getType() = DMARequestType_READ;
- }
+ RubySystem::getBlockSizeBytes() - offset;
+ if (write)
+ msg.getDataBlk().setData(data, offset, msg.getLen());
m_mandatory_q_ptr->enqueue(msg);
active_request.bytes_issued += msg.getLen();
return;
}
- DMARequestMsg msg;
+ SequencerMsg msg;
msg.getPhysicalAddress() = Address(active_request.start_paddr +
active_request.bytes_completed);
assert((msg.getPhysicalAddress().getAddress() & m_data_block_mask) == 0);
msg.getLineAddress() = line_address(msg.getPhysicalAddress());
- msg.getOffset() = 0;
- msg.getType() = (active_request.write ? DMARequestType_WRITE :
- DMARequestType_READ);
+ msg.getType() = (active_request.write ? SequencerRequestType_ST :
+ SequencerRequestType_LD);
msg.getLen() = (active_request.len -
active_request.bytes_completed < RubySystem::getBlockSizeBytes() ?
active_request.len - active_request.bytes_completed :
if (active_request.write) {
msg.getDataBlk().setData(&active_request.data[active_request.bytes_completed],
0, msg.getLen());
- msg.getType() = DMARequestType_WRITE;
+ msg.getType() = SequencerRequestType_ST;
} else {
- msg.getType() = DMARequestType_READ;
+ msg.getType() = SequencerRequestType_LD;
}
m_mandatory_q_ptr->enqueue(msg);
active_request.bytes_issued += msg.getLen();
if ( (*it) == "version" )
m_version = atoi( (*(++it)).c_str() );
else if ( (*it) == "size_mb" ) {
- m_size_bytes = atoi((*(++it)).c_str()) * (1<<20);
+ m_size_bytes = atoi((*(++it)).c_str()) * static_cast<uint64>(1<<20);
m_size_bits = log_int(m_size_bytes);
} else if ( (*it) == "controller" ) {
m_controller = RubySystem::getController((*(++it)));
- } else
+ } else {
+ cerr << "DirectoryMemory: Unkown config parameter: " << (*it) << endl;
assert(0);
+ }
}
assert(m_controller != NULL);
int mapAddressToLocalIdx(PhysAddress address);
static int mapAddressToDirectoryVersion(PhysAddress address);
- int getSize() { return m_size_bytes; }
+ uint64 getSize() { return m_size_bytes; }
// Public Methods
void printConfig(ostream& out) const;
// Data Members (m_ prefix)
Directory_Entry **m_entries;
// int m_size; // # of memory module blocks this directory is responsible for
- uint32 m_size_bytes;
- uint32 m_size_bits;
+ uint64 m_size_bytes;
+ uint64 m_size_bits;
int m_num_entries;
int m_version;
#include "mem/gems_common/Map.hh"
#include "mem/protocol/AccessPermission.hh"
#include "mem/ruby/common/Address.hh"
-#include "mem/ruby/slicc_interface/AbstractChip.hh"
template<class ENTRY>
class PerfectCacheLineState {
ENTRY m_entry;
};
+template<class ENTRY>
+extern inline
+ostream& operator<<(ostream& out, const PerfectCacheLineState<ENTRY>& obj)
+{
+ return out;
+}
+
template<class ENTRY>
class PerfectCacheMemory {
public:
// Constructors
- PerfectCacheMemory(AbstractChip* chip_ptr);
+ PerfectCacheMemory();
// Destructor
//~PerfectCacheMemory();
// Data Members (m_prefix)
Map<Address, PerfectCacheLineState<ENTRY> > m_map;
- AbstractChip* m_chip_ptr;
};
// Output operator declaration
template<class ENTRY>
extern inline
-PerfectCacheMemory<ENTRY>::PerfectCacheMemory(AbstractChip* chip_ptr)
+PerfectCacheMemory<ENTRY>::PerfectCacheMemory()
{
- m_chip_ptr = chip_ptr;
}
// STATIC METHODS
static RubyPort* getPortOnly(const string & name) {
assert(m_ports.count(name) == 1); return m_ports[name]; }
static RubyPort* getPort(const string & name, void (*hit_callback)(int64_t)) {
+ if (m_ports.count(name) != 1){
+ cerr << "Port " << name << " has " << m_ports.count(name) << " instances" << endl;
+ }
assert(m_ports.count(name) == 1); m_ports[name]->registerHitCallback(hit_callback); return m_ports[name]; }
static Network* getNetwork() { assert(m_network_ptr != NULL); return m_network_ptr; }
static Topology* getTopology(const string & name) { assert(m_topologies.count(name) == 1); return m_topologies[name]; }
#include "mem/ruby/system/TimerTable.hh"
#include "mem/ruby/eventqueue/RubyEventQueue.hh"
-TimerTable::TimerTable(Chip* chip_ptr)
+TimerTable::TimerTable()
{
- assert(chip_ptr != NULL);
m_consumer_ptr = NULL;
- m_chip_ptr = chip_ptr;
m_next_valid = false;
m_next_address = Address(0);
m_next_time = 0;
#include "mem/gems_common/Map.hh"
#include "mem/ruby/common/Address.hh"
class Consumer;
-class Chip;
class TimerTable {
public:
// Constructors
- TimerTable(Chip* chip_ptr);
+ TimerTable();
// Destructor
//~TimerTable();
// Data Members (m_prefix)
Map<Address, Time> m_map;
- Chip* m_chip_ptr;
mutable bool m_next_valid;
mutable Time m_next_time; // Only valid if m_next_valid is true
mutable Address m_next_address; // Only valid if m_next_valid is true
// Constructors
AST(Map<string, string> pairs) { m_pairs = pairs; };
AST() {};
-
+
// Destructor
virtual ~AST() {};
-
+
// Public Methods
virtual void print(ostream& out) const = 0;
void error(string err_msg) const { m_location.error(err_msg); };
string embedError(string err_msg) const { return m_location.embedError(err_msg); };
void warning(string err_msg) const { m_location.warning(err_msg); };
-
+
const Location& getLocation() const { return m_location; };
-
+
const Map<string, string>& getPairs() const { return m_pairs; };
Map<string, string>& getPairs() { return m_pairs; };
-
+
private:
// Private Methods
-
+
// Private copy constructor and assignment operator
// AST(const AST& obj);
// AST& operator=(const AST& obj);
-
+
// Data Members (m_ prefix)
Location m_location;
Map<string, string> m_pairs;
*
*/
+
#include "mem/slicc/ast/ActionDeclAST.hh"
#include "mem/slicc/symbols/Action.hh"
+#include "mem/slicc/ast/StatementListAST.hh"
ActionDeclAST::ActionDeclAST(string* ident_ptr,
PairListAST* pairs_ptr,
#include "mem/slicc/slicc_global.hh"
#include "mem/slicc/ast/DeclAST.hh"
-#include "mem/slicc/ast/StatementListAST.hh"
+
+class StatementListAST;
class ActionDeclAST : public DeclAST {
public:
code += ".enqueue(out_msg";
if (getPairs().exist("latency")) {
- code += ", m_LATENCY_" + getPairs().lookup("latency");
+ bool is_number = true;
+ string val = getPairs().lookup("latency");
+ for (int i=0; i<val.size(); i++)
+ if (!isdigit(val[i])) is_number = false;
+ if (is_number)
+ code += ", " + getPairs().lookup("latency");
+ else
+ code += ", m_" + getPairs().lookup("latency");
}
code += ");\n";
#include "mem/slicc/ast/FormalParamAST.hh"
#include "mem/slicc/ast/StatementAST.hh"
+#include "mem/slicc/ast/TypeAST.hh"
#include "mem/slicc/symbols/SymbolTable.hh"
FormalParamAST::~FormalParamAST()
delete m_type_ast_ptr;
}
+string FormalParamAST::getTypeName() const
+{
+ return m_type_ast_ptr->toString();
+}
+
+Type* FormalParamAST::getType() const
+{
+ return m_type_ast_ptr->lookupType();
+}
+
Type* FormalParamAST::generate(string& code) const
{
string param = "param_" + *m_ident_ptr;
#define FORMALPARAMAST_H
#include "mem/slicc/slicc_global.hh"
-#include "mem/slicc/ast/TypeAST.hh"
+#include "mem/slicc/ast/AST.hh"
+
+class TypeAST;
class FormalParamAST : public AST {
Type* generate(string& code) const;
void print(ostream& out) const { out << "[FormalParamAST: " << *m_ident_ptr << "]"; }
string getName() const { return *m_ident_ptr; }
+ string getTypeName() const;
+ Type* getType() const;
private:
// Private Methods
*/
#include "mem/slicc/ast/FuncDeclAST.hh"
+#include "mem/slicc/ast/FormalParamAST.hh"
#include "mem/slicc/symbols/SymbolTable.hh"
#include "mem/slicc/main.hh"
#include "mem/slicc/ast/DeclAST.hh"
#include "mem/slicc/ast/TypeFieldAST.hh"
#include "mem/slicc/ast/TypeAST.hh"
-#include "mem/slicc/ast/FormalParamAST.hh"
+
+class FormalParamsAST;
class FuncDeclAST : public DeclAST {
public:
*/
#include "mem/slicc/ast/MachineAST.hh"
+#include "mem/slicc/ast/FormalParamAST.hh"
#include "mem/slicc/symbols/SymbolTable.hh"
MachineAST::MachineAST(string* ident_ptr,
PairListAST* pairs_ptr,
- Vector<TypeFieldAST*>* config_params_ptr,
- std::vector<std::string*>* latency_vector,
+ Vector<FormalParamAST*>* config_parameters,
DeclListAST* decl_list_ptr)
: DeclAST(pairs_ptr)
{
m_ident_ptr = ident_ptr;
m_pairs_ptr = pairs_ptr;
- m_config_params_ptr = config_params_ptr;
+ m_config_parameters = config_parameters;
m_decl_list_ptr = decl_list_ptr;
- m_latency_vector = latency_vector;
}
MachineAST::~MachineAST()
g_sym_table.pushFrame();
// Create a new machine
- machine_ptr = new StateMachine(*m_ident_ptr, getLocation(), getPairs(), m_latency_vector);
+ machine_ptr = new StateMachine(*m_ident_ptr, getLocation(), getPairs(), m_config_parameters);
g_sym_table.newCurrentMachine(machine_ptr);
// Generate code for all the internal decls
#include "mem/slicc/ast/TypeFieldAST.hh"
#include "mem/slicc/symbols/StateMachine.hh"
+class FormalParamAST;
+
class MachineAST : public DeclAST {
public:
// Constructors
MachineAST(string* ident_ptr,
PairListAST* pairs_ptr,
- Vector<TypeFieldAST*>* config_params_ptr,
- std::vector<std::string*>* latency_vector,
+ Vector<FormalParamAST*>* config_parameters,
DeclListAST* decl_list_ptr);
// Destructor
MachineAST& operator=(const MachineAST& obj);
// Data Members (m_ prefix)
- std::vector<std::string*>* m_latency_vector;
+ Vector<FormalParamAST*>* m_config_parameters;
string* m_ident_ptr;
DeclListAST* m_decl_list_ptr;
- Vector<TypeFieldAST*>* m_config_params_ptr;
PairListAST* m_pairs_ptr;
};
'NOT', 'AND', 'OR',
'PLUS', 'DASH', 'STAR', 'SLASH',
'DOUBLE_COLON', 'SEMICOLON',
- 'ASSIGN', 'DOT', 'LATENCY',
+ 'ASSIGN', 'DOT',
'IDENT', 'LIT_BOOL', 'FLOATNUMBER', 'NUMBER', 'STRING' ]
tokens += reserved.values()
| d_func_def"""
p[0] = p[1]
-def p_latency(p):
- """latency : LATENCY"""
- pass
-
-def p_latencies(p):
- """latencies : latency latencies
- | empty"""
- return []
-
def p_d_machine(p):
- """d_machine : MACHINE '(' ident pair_l ')' '{' decl_l '}'
- | MACHINE '(' ident pair_l ')' ':' type_members '{' decl_l '}'
- | MACHINE '(' ident pair_l ')' ':' latencies '{' decl_l '}'"""
+ """d_machine : MACHINE '(' ident pair_l ')' ':' param_l '{' decl_l '}'"""
if len(p) == 9:
decl_l = p[7]
for filename in filenames:
lex.lexer.lineno = 1
try:
+ print "parsing ",filename
results = yacc.parse(file(filename, 'r').read())
- except (TokenError, ParseError), e:
- raise type(e), tuple([filename] + [ i for i in e ])
+ except (ParseError,TokenError), e:
+ print "File ",filename," ",e
+ raise e
+ #except ParseError, e:
+ # print "File ",filename," "e
+ # raise e, tuple([filename] + [ i for i in e ])
+
+ #except ParseError, e:
+ # print e
+
for result in results:
result.add(hh, cc)
%type <expr_ptr> expr literal enumeration
%type <expr_vector_ptr> expr_list
-%type <stdstring_vector_ptr> myrule
-
%type <pair_ptr> pair
%type <pair_list_ptr> pair_list pairs
| { $$ = new Vector<DeclAST*>; }
;
-decl: MACHINE_DECL '(' ident pair_list ')' ':' myrule '{' decl_list '}' { $$ = new MachineAST($3, $4, NULL, $7, $9); }
-// | MACHINE_DECL '(' ident pair_list ')' ':' type_members '{' decl_list '}' { $$ = new MachineAST($3, $4, $7, string_vector, $9); }
- | MACHINE_DECL '(' ident pair_list ')' '{' decl_list '}' { $$ = new MachineAST($3, $4, NULL, new vector<string*>(), $7); }
+decl: MACHINE_DECL '(' ident pair_list ')' ':' formal_param_list '{' decl_list '}' { $$ = new MachineAST($3, $4, $7, $9); }
| ACTION_DECL '(' ident pair_list ')' statement_list { $$ = new ActionDeclAST($3, $4, $6); }
| IN_PORT_DECL '(' ident ',' type ',' var pair_list ')' statement_list { $$ = new InPortDeclAST($3, $5, $7, $8, $10); }
| OUT_PORT_DECL '(' ident ',' type ',' var pair_list ')' SEMICOLON { $$ = new OutPortDeclAST($3, $5, $7, $8); }
field: ident { $$ = $1; }
;
-myrule: myrule IDENT { $1->push_back($2); }
- | IDENT { $$ = new vector<string*>(1, $1); }
- ;
-
%%
extern FILE *yyin;
#include "mem/slicc/symbols/SymbolTable.hh"
#include "mem/gems_common/util.hh"
#include "mem/gems_common/Vector.hh"
+#include "mem/slicc/ast/FormalParamAST.hh"
#include <set>
-StateMachine::StateMachine(string ident, const Location& location, const Map<string, string>& pairs, std::vector<std::string*>* latency_vector)
+StateMachine::StateMachine(string ident, const Location& location, const Map<string, string>& pairs, Vector<FormalParamAST*>* config_parameters)
: Symbol(ident, location, pairs)
{
m_table_built = false;
- m_latency_vector = *latency_vector;
+ m_config_parameters = config_parameters;
+
+ for (int i=0; i< m_config_parameters->size(); i++) {
+ Var* var = new Var(m_config_parameters->ref(i)->getName(),
+ location,
+ m_config_parameters->ref(i)->getType(),
+ "m_"+m_config_parameters->ref(i)->getName(),
+ Map<string, string>(),
+ this);
+ g_sym_table.registerSym(m_config_parameters->ref(i)->getName(), var);
+ }
}
StateMachine::~StateMachine()
out << "private:" << endl;
//added by SS
// found_to_mem = 0;
- std::vector<std::string*>::const_iterator it;
- for(it=m_latency_vector.begin();it!=m_latency_vector.end();it++){
- out << " int m_" << (*it)->c_str() << ";" << endl;
+ for(int i=0;i<m_config_parameters->size();i++){
+ out << " int m_" << m_config_parameters->ref(i)->getName() << ";" << endl;
}
if (strncmp(component.c_str(), "L1Cache", 7) == 0) {
out << " bool servicing_atomic;" << endl;
out << " else if (argv[i] == \"number_of_TBEs\") " << endl;
out << " m_number_of_TBEs = atoi(argv[i+1].c_str());" << endl;
- if (m_latency_vector.size()) {
- out << " else { " << endl;
- std::vector<std::string*>::const_iterator it;
- for(it=m_latency_vector.begin();it!=m_latency_vector.end();it++) {
- string str = (*it)->c_str();
- str.erase(0,8);
-//convert to lowercase
- size_t i;
- char* strc = (char*) malloc (str.length()+1);
- strc[str.length()]=0;
- for(i=0; i < str.length(); i++) {
- strc[i] = str.at(i);
- strc[i] = tolower(strc[i]);
- }
- str = strc;
- delete strc;
- out << " if (argv[i] == \"" << str << "\"){" << endl;
- if (str == "to_mem_ctrl_latency")
- out << " m_" << (*it)->c_str() << "=" << "atoi(argv[i+1].c_str())+(random() % 5);" << endl;
+ if (m_config_parameters->size()) {
+ for(int i= 0 ; i < m_config_parameters->size(); i++) {
+ out << " else if (argv[i] == \"" << m_config_parameters->ref(i)->getName() << "\")" << endl;
+ if (m_config_parameters->ref(i)->getTypeName() == "int")
+ out << " m_" << m_config_parameters->ref(i)->getName() << "=" << "atoi(argv[i+1].c_str());" << endl;
else
- out << " m_" << (*it)->c_str() << "=" << "atoi(argv[i+1].c_str());" << endl;
-// out << " printf (\"SET m_" << it->c_str() << "= %i \\n \", m_" << it->c_str() << ");" << endl;
- out << " }" << endl;
+ assert(0); // only int parameters are supported right now
+ // if (str == "to_mem_ctrl_latency")
+ // out << " m_" << (*it)->c_str() << "=" << "atoi(argv[i+1].c_str())+(random() % 5);" << endl;
}
- out << " }" << endl;
}
out << " }" << endl;
-
out << " m_net_ptr = net_ptr;" << endl;
out << " m_machineID.type = MachineType_" << component << ";" << endl;
out << " m_machineID.num = m_version;" << endl;
-// out << " printf (\"I set m_LATENCY_ISSUE_LATENCY to %i \\n \", m_LATENCY_ISSUE_LATENCY);" << endl;
-// out << " printf (\"I set m_LATENCY_CACHE_RESPONSE_LATENCY to %i \\n \", m_LATENCY_CACHE_RESPONSE_LATENCY);" << endl;
-
// make configuration array
out << " for (size_t i=0; i < argv.size(); i+=2) {" << endl;
out << " if (argv[i] != \"version\") " << endl;
string c_code_string = action.lookupPair("c_code");
-/*
- size_t found = c_code_string.find("RubyConfig::get");
-
- if (found!=string::npos){ //found --> replace it with local access
- //if it is related to latency --> replace it
- std::vector<std::string*>::const_iterator it;
- for(it=m_latency_vector.begin();it!=m_latency_vector.end();it++){
- string str = (*it)->c_str();
- str.erase(0,8);
- size_t fd = c_code_string.find(str, found);
- if (fd!=string::npos && (fd == found+15)){
- string rstr = "m_";
- rstr += (*it)->c_str();
- c_code_string.replace(found,15+str.size()+2,rstr);
- break;
- }
- }
- }
-*/
+
// add here:
if (strncmp(component.c_str(), "L1Cache", 7) == 0) {
if (c_code_string.find("writeCallback") != string::npos) {
class Action;
class Var;
class Func;
+class FormalParamAST;
class StateMachine : public Symbol {
public:
// Constructors
- StateMachine(string ident, const Location& location, const Map<string, string>& pairs, std::vector<std::string*>* latency_vector);
+ StateMachine(string ident, const Location& location, const Map<string, string>& pairs, Vector<FormalParamAST*>* config_parameters);
// Destructor
~StateMachine();
void print(ostream& out) const { out << "[StateMachine: " << toString() << "]" << endl; }
private:
- std::vector<std::string*> m_latency_vector;
+ Vector<FormalParamAST*>* m_config_parameters;
// Private Methods
void checkForDuplicate(const Symbol& sym) const;