l1_cntrl = L1Cache_Controller(version = i,
cntrl_id = cntrl_count,
- L1IcacheMemory = l1i_cache,
- L1DcacheMemory = l1d_cache,
+ L1Icache = l1i_cache,
+ L1Dcache = l1d_cache,
l2_select_num_bits = l2_bits,
send_evictions = (
options.cpu_type == "detailed"),
l2_cntrl = L2Cache_Controller(version = i,
cntrl_id = cntrl_count,
- L2cacheMemory = l2_cache,
+ L2cache = l2_cache,
ruby_system = ruby_system)
exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
-
/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
machine(L1Cache, "MESI Directory L1 Cache CMP")
: Sequencer * sequencer,
- CacheMemory * L1IcacheMemory,
- CacheMemory * L1DcacheMemory,
+ CacheMemory * L1Icache,
+ CacheMemory * L1Dcache,
Prefetcher * prefetcher = 'NULL',
int l2_select_num_bits,
Cycles l1_request_latency = 2,
// inclusive cache returns L1 entries only
Entry getCacheEntry(Address addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory[addr]);
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
if(is_valid(L1Dcache_entry)) {
return L1Dcache_entry;
}
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory[addr]);
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
return L1Icache_entry;
}
Entry getL1DCacheEntry(Address addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1DcacheMemory[addr]);
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
return L1Dcache_entry;
}
Entry getL1ICacheEntry(Address addr), return_by_pointer="yes" {
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1IcacheMemory[addr]);
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
return L1Icache_entry;
}
State getState(TBE tbe, Entry cache_entry, Address addr) {
- assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
if(is_valid(tbe)) {
return tbe.TBEState;
}
void setState(TBE tbe, Entry cache_entry, Address addr, State state) {
- assert((L1DcacheMemory.isTagPresent(addr) && L1IcacheMemory.isTagPresent(addr)) == false);
+ assert((L1Dcache.isTagPresent(addr) && L1Icache.isTagPresent(addr)) == false);
// MUST CHANGE
if(is_valid(tbe)) {
L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
}
- if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
+ if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 so let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
- L1IcacheMemory.cacheProbe(in_msg.LineAddress),
- getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
- L1_TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
+ L1Icache.cacheProbe(in_msg.LineAddress),
+ getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
+ L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
} else {
// Data prefetch
L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
}
- if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
+ if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in
// the L1 let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
- L1DcacheMemory.cacheProbe(in_msg.LineAddress),
- getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
- L1_TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
+ L1Dcache.cacheProbe(in_msg.LineAddress),
+ getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
+ L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.Address, cache_entry, tbe);
- } else if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:UPGRADE) {
+ } else if (in_msg.Type == CoherenceRequestType:GETX ||
+ in_msg.Type == CoherenceRequestType:UPGRADE) {
// upgrade transforms to GETX due to race
trigger(Event:Fwd_GETX, in_msg.Address, cache_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
}
- if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
+ if (L1Icache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it
+ // in the L1 so let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress),
- getL1ICacheEntry(L1IcacheMemory.cacheProbe(in_msg.LineAddress)),
- L1_TBEs[L1IcacheMemory.cacheProbe(in_msg.LineAddress)]);
+ trigger(Event:L1_Replacement, L1Icache.cacheProbe(in_msg.LineAddress),
+ getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
+ L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
L1Icache_entry, L1_TBEs[in_msg.LineAddress]);
}
- if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
- // L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
+ if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
+ // L1 does't have the line, but we have space for it
+ // in the L1 let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
L1Dcache_entry, L1_TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress),
- getL1DCacheEntry(L1DcacheMemory.cacheProbe(in_msg.LineAddress)),
- L1_TBEs[L1DcacheMemory.cacheProbe(in_msg.LineAddress)]);
+ trigger(Event:L1_Replacement, L1Dcache.cacheProbe(in_msg.LineAddress),
+ getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
+ L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
}
action(ff_deallocateL1CacheBlock, "\f", desc="Deallocate L1 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- if (L1DcacheMemory.isTagPresent(address)) {
- L1DcacheMemory.deallocate(address);
+ if (L1Dcache.isTagPresent(address)) {
+ L1Dcache.deallocate(address);
} else {
- L1IcacheMemory.deallocate(address);
+ L1Icache.deallocate(address);
}
unset_cache_entry();
}
action(oo_allocateL1DCacheBlock, "\o", desc="Set L1 D-cache tag equal to tag of block B.") {
if (is_invalid(cache_entry)) {
- set_cache_entry(L1DcacheMemory.allocate(address, new Entry));
+ set_cache_entry(L1Dcache.allocate(address, new Entry));
}
}
action(pp_allocateL1ICacheBlock, "\p", desc="Set L1 I-cache tag equal to tag of block B.") {
if (is_invalid(cache_entry)) {
- set_cache_entry(L1IcacheMemory.allocate(address, new Entry));
+ set_cache_entry(L1Icache.allocate(address, new Entry));
}
}
}
action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
- ++L1IcacheMemory.demand_misses;
+ ++L1Icache.demand_misses;
}
action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
- ++L1IcacheMemory.demand_hits;
+ ++L1Icache.demand_hits;
}
action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
- ++L1DcacheMemory.demand_misses;
+ ++L1Dcache.demand_misses;
}
action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
- ++L1DcacheMemory.demand_hits;
+ ++L1Dcache.demand_hits;
}
action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
-
/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
*/
machine(L2Cache, "MESI Directory L2 Cache CMP")
- : CacheMemory * L2cacheMemory,
+ : CacheMemory * L2cache,
Cycles l2_request_latency = 2,
Cycles l2_response_latency = 2,
Cycles to_l1_latency = 1
MT_IIB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, waiting for unblock and data";
MT_IB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, got unblock, waiting for data";
MT_SB, AccessPermission:Busy, desc="Blocked for L1_GETS from MT, got data, waiting for unblock";
-
+
}
// EVENTS
// inclusive cache, returns L2 entries only
Entry getCacheEntry(Address addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cacheMemory[addr]);
+ return static_cast(Entry, "pointer", L2cache[addr]);
}
std::string getCoherenceRequestTypeStr(CoherenceRequestType type) {
in_msg.Requestor, cache_entry),
in_msg.Address, cache_entry, tbe);
} else {
- if (L2cacheMemory.cacheAvail(in_msg.Address)) {
+ if (L2cache.cacheAvail(in_msg.Address)) {
// L2 does't have the line, but we have space for it in the L2
trigger(L1Cache_request_type_to_event(in_msg.Type, in_msg.Address,
in_msg.Requestor, cache_entry),
in_msg.Address, cache_entry, tbe);
} else {
// No room in the L2, so we need to make room before handling the request
- Entry L2cache_entry := getCacheEntry(L2cacheMemory.cacheProbe(in_msg.Address));
+ Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.Address));
if (isDirty(L2cache_entry)) {
- trigger(Event:L2_Replacement, L2cacheMemory.cacheProbe(in_msg.Address),
- L2cache_entry, L2_TBEs[L2cacheMemory.cacheProbe(in_msg.Address)]);
+ trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.Address),
+ L2cache_entry, L2_TBEs[L2cache.cacheProbe(in_msg.Address)]);
} else {
- trigger(Event:L2_Replacement_clean, L2cacheMemory.cacheProbe(in_msg.Address),
- L2cache_entry, L2_TBEs[L2cacheMemory.cacheProbe(in_msg.Address)]);
+ trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.Address),
+ L2cache_entry, L2_TBEs[L2cache.cacheProbe(in_msg.Address)]);
}
}
}
}
action(set_setMRU, "\set", desc="set the MRU entry") {
- L2cacheMemory.setMRU(address);
+ L2cache.setMRU(address);
}
action(qq_allocateL2CacheBlock, "\q", desc="Set L2 cache tag equal to tag of block B.") {
if (is_invalid(cache_entry)) {
- set_cache_entry(L2cacheMemory.allocate(address, new Entry));
+ set_cache_entry(L2cache.allocate(address, new Entry));
}
}
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
- L2cacheMemory.deallocate(address);
+ L2cache.deallocate(address);
unset_cache_entry();
}
}
action(uu_profileMiss, "\um", desc="Profile the demand miss") {
- ++L2cacheMemory.demand_misses;
+ ++L2cache.demand_misses;
}
action(uu_profileHit, "\uh", desc="Profile the demand hit") {
- ++L2cacheMemory.demand_hits;
+ ++L2cache.demand_hits;
}
action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
// Transitions from I (Idle)
transition({NP, IS, ISS, IM, SS, M, M_I, I_I, S_I, MT_IB, MT_SB}, L1_PUTX) {
- t_sendWBAck;
+ t_sendWBAck;
jj_popL1RequestQueue;
}
transition({NP, SS, M, MT, M_I, I_I, S_I, IS, ISS, IM, MT_IB, MT_SB}, L1_PUTX_old) {
- t_sendWBAck;
+ t_sendWBAck;
jj_popL1RequestQueue;
}
zz_stallAndWaitL1RequestQueue;
}
- transition({IM, IS, ISS, SS_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, MEM_Inv) {
+ transition({IM, IS, ISS, SS_MB, MT_MB, MT_IIB, MT_IB, MT_SB}, MEM_Inv) {
zn_recycleResponseNetwork;
}
- transition({S_I, M_I, MT_I}, MEM_Inv) {
+ transition({S_I, M_I, MT_I}, MEM_Inv) {
o_popIncomingResponseQueue;
}
k_popUnblockQueue;
kd_wakeUpDependents;
}
-
+
transition(MT_IIB, {L1_PUTX, L1_PUTX_old}){
zz_stallAndWaitL1RequestQueue;
}
}
transition(MCT_I, {WB_Data_clean, Ack_all}, M_I) {
- c_exclusiveCleanReplacement;
+ c_exclusiveCleanReplacement;
o_popIncomingResponseQueue;
}
transition(MCT_I, {L1_PUTX, L1_PUTX_old}){
- zz_stallAndWaitL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
-
+
// L1 never changed Dirty data
transition(MT_I, Ack_all, M_I) {
ct_exclusiveReplacementFromTBE;
}
transition(MT_I, {L1_PUTX, L1_PUTX_old}){
- zz_stallAndWaitL1RequestQueue;
+ zz_stallAndWaitL1RequestQueue;
}
// possible race between unblock and immediate replacement
-
/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
// Copied here by aep 12/14/07
-machine(Directory, "MESI_CMP_filter_directory protocol")
+machine(Directory, "MESI_CMP_filter_directory protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
Cycles to_mem_ctrl_latency = 1,
// Base states
I, AccessPermission:Read_Write, desc="dir is the owner and memory is up-to-date, all other copies are Invalid";
ID, AccessPermission:Busy, desc="Intermediate state for DMA_READ when in I";
- ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
+ ID_W, AccessPermission:Busy, desc="Intermediate state for DMA_WRITE when in I";
M, AccessPermission:Maybe_Stale, desc="memory copy may be stale, i.e. other modified copies may exist";
IM, AccessPermission:Busy, desc="Intermediate State I>M";
State DirectoryState, desc="Directory state";
DataBlock DataBlk, desc="data for the block";
NetDest Sharers, desc="Sharers for this block";
- NetDest Owner, desc="Owner of this block";
+ NetDest Owner, desc="Owner of this block";
}
// TBE entries for DMA requests
DataBlock DataBlk, desc="Data to be written (DMA write only)";
int Len, desc="...";
}
-
+
structure(TBETable, external="yes") {
- TBE lookup(Address);
- void allocate(Address);
+ TBE lookup(Address);
+ void allocate(Address);
void deallocate(Address);
bool isPresent(Address);
- }
+ }
// ** OBJECTS **
State getState(TBE tbe, Address addr) {
if (is_valid(tbe)) {
- return tbe.TBEState;
+ return tbe.TBEState;
} else if (directory.isPresent(addr)) {
return getDirectoryEntry(addr).DirectoryState;
} else {
return State:I;
}
- }
-
+ }
+
void setState(TBE tbe, Address addr, State state) {
if (is_valid(tbe)) {
tbe.TBEState := state;
}
-
+
if (directory.isPresent(addr)) {
-
+
if (state == State:I) {
assert(getDirectoryEntry(addr).Owner.count() == 0);
assert(getDirectoryEntry(addr).Sharers.count() == 0);
assert(getDirectoryEntry(addr).Owner.count() == 1);
assert(getDirectoryEntry(addr).Sharers.count() == 0);
}
-
+
getDirectoryEntry(addr).DirectoryState := state;
}
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(requestNetwork_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.Address;
- tbe.Len := in_msg.Len;
+ tbe.Len := in_msg.Len;
}
}
//out_msg.DataBlk := in_msg.DataBlk;
//out_msg.DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
-
+
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
-
+
DPRINTF(RubySlicc, "%s\n", out_msg);
}
}
}
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
- TBEs.deallocate(address);
+ TBEs.deallocate(address);
unset_tbe();
}
transition(M_DWR, Data, M_DWRI) {
m_writeDataToMemory;
qw_queueMemoryWBRequest_partialTBE;
- k_popIncomingResponseQueue;
+ k_popIncomingResponseQueue;
}
transition(M_DWRI, Memory_Ack, I) {
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-machine(DMA, "DMA Controller")
+machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
Cycles request_latency = 6
{