// inclusive cache returns L0 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
+ Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
if(is_valid(Dcache_entry)) {
return Dcache_entry;
}
- Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
+ Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
return Icache_entry;
}
Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
+ Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
return Dcache_entry;
}
Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
+ Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
return Icache_entry;
}
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
return L0Cache_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
assert(in_msg.Dest == machineID);
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
if (is_valid(Icache_entry)) {
// The tag matches for the L0, so the L0 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Icache_entry, TBEs[in_msg.LineAddress]);
+ Icache_entry, TBEs.lookup(in_msg.LineAddress));
} else {
// Check to see if it is in the OTHER L0
if (is_valid(Dcache_entry)) {
// The block is in the wrong L0, put the request on the queue to the shared L2
trigger(Event:L0_Replacement, in_msg.LineAddress,
- Dcache_entry, TBEs[in_msg.LineAddress]);
+ Dcache_entry, TBEs.lookup(in_msg.LineAddress));
}
if (Icache.cacheAvail(in_msg.LineAddress)) {
// L0 does't have the line, but we have space for it
// in the L0 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Icache_entry, TBEs[in_msg.LineAddress]);
+ Icache_entry, TBEs.lookup(in_msg.LineAddress));
} else {
// No room in the L0, so we need to make room in the L0
trigger(Event:L0_Replacement, Icache.cacheProbe(in_msg.LineAddress),
getICacheEntry(Icache.cacheProbe(in_msg.LineAddress)),
- TBEs[Icache.cacheProbe(in_msg.LineAddress)]);
+ TBEs.lookup(Icache.cacheProbe(in_msg.LineAddress)));
}
}
} else {
if (is_valid(Dcache_entry)) {
// The tag matches for the L0, so the L0 ask the L1 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Dcache_entry, TBEs[in_msg.LineAddress]);
+ Dcache_entry, TBEs.lookup(in_msg.LineAddress));
} else {
// Check to see if it is in the OTHER L0
if (is_valid(Icache_entry)) {
// The block is in the wrong L0, put the request on the queue to the private L1
trigger(Event:L0_Replacement, in_msg.LineAddress,
- Icache_entry, TBEs[in_msg.LineAddress]);
+ Icache_entry, TBEs.lookup(in_msg.LineAddress));
}
if (Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L0 let's see if the L1 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Dcache_entry, TBEs[in_msg.LineAddress]);
+ Dcache_entry, TBEs.lookup(in_msg.LineAddress));
} else {
// No room in the L1, so we need to make room in the L0
trigger(Event:L0_Replacement, Dcache.cacheProbe(in_msg.LineAddress),
getDCacheEntry(Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs[Dcache.cacheProbe(in_msg.LineAddress)]);
+ TBEs.lookup(Dcache.cacheProbe(in_msg.LineAddress)));
}
}
}
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
// inclusive cache returns L1 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry cache_entry := static_cast(Entry, "pointer", cache[addr]);
+ Entry cache_entry := static_cast(Entry, "pointer", cache.lookup(addr));
return cache_entry;
}
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == CoherenceRequestType:INV) {
if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
if (messageBufferFromL0_in.isReady()) {
peek(messageBufferFromL0_in, CoherenceMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if(in_msg.Class == CoherenceClass:INV_DATA) {
trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
// No room in the L1, so we need to make room in the L1
Entry victim_entry :=
getCacheEntry(cache.cacheProbe(in_msg.addr));
- TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.addr)];
+ TBE victim_tbe := TBEs.lookup(cache.cacheProbe(in_msg.addr));
if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
trigger(Event:L0_Invalidate_Own,
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
// inclusive cache returns L1 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
if(is_valid(L1Dcache_entry)) {
return L1Dcache_entry;
}
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
return L1Icache_entry;
}
Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
return L1Dcache_entry;
}
Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
return L1Icache_entry;
}
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
// cache. We should drop this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
+ L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
}
// Check to see if it is in the OTHER L1
// this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// in the L1 so let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
+ L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
+ TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
}
} else {
// Data prefetch
// cache. We should drop this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
}
// Check to see if it is in the OTHER L1
// request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
+ L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// the L1 let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
+ TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
}
}
}
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
if (is_valid(L1Icache_entry)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
+ L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
} else {
// Check to see if it is in the OTHER L1
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 so let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
+ L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
+ TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
}
}
} else {
if (is_valid(L1Dcache_entry)) {
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
} else {
// Check to see if it is in the OTHER L1
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
+ L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Dcache_entry, TBEs[in_msg.LineAddress]);
+ L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
+ TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
}
}
}
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.isPrefetch := false;
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
// inclusive cache, returns L2 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache[addr]);
+ return static_cast(Entry, "pointer", L2cache.lookup(addr));
}
bool isSharer(Addr addr, MachineID requestor, Entry cache_entry) {
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
return L2Cache_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
if(L1unblockNetwork_in.isReady()) {
peek(L1unblockNetwork_in, ResponseMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
in_msg.Sender, in_msg.Type, in_msg.Destination);
// test wether it's from a local L1 or an off chip source
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
if(in_msg.Type == CoherenceResponseType:DATA) {
if(L1RequestL2Network_in.isReady()) {
peek(L1RequestL2Network_in, RequestMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.addr));
if (isDirty(L2cache_entry)) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
- L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
+ L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
} else {
trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.addr),
- L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
+ L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
}
}
}
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.L1_GetS_IDs.clear();
tbe.DataBlk := cache_entry.DataBlk;
tbe.Dirty := cache_entry.Dirty;
void wakeUpBuffers(Addr a);
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
+ Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
if (is_valid(dir_entry)) {
return dir_entry;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
return Directory_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
- trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Fetch, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
+ TBEs.lookup(makeLineAddress(in_msg.addr)));
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
+ TBEs.lookup(makeLineAddress(in_msg.addr)));
} else {
DPRINTF(RubySlicc, "%s\n", in_msg);
error("Invalid message");
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Data, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:CleanReplacement, in_msg.addr, TBEs.lookup(in_msg.addr));
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(requestNetwork_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.addr;
tbe.Len := in_msg.Len;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
// make room for the block
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
- TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
+ TBEs.lookup(cacheMemory.cacheProbe(in_msg.LineAddress)));
}
else {
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- cache_entry, TBEs[in_msg.LineAddress]);
+ cache_entry, TBEs.lookup(in_msg.LineAddress));
}
}
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
}
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
+ Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
if (is_valid(dir_entry)) {
return dir_entry;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
- TBE tbe := TBEs[in_msg.LineAddress];
+ TBE tbe := TBEs.lookup(in_msg.LineAddress);
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
} else if (in_msg.Type == DMARequestType:WRITE) {
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.addr, tbe);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, tbe);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.DmaRequestor := in_msg.Requestor;
}
}
action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
peek(requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.DataBlk := in_msg.DataBlk;
}
}
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
if(is_valid(cache_entry)) {
testAndRead(addr, cache_entry.DataBlk, pkt);
} else {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
return num_functional_writes;
}
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
return num_functional_writes;
if (useTimerTable_in.isReady()) {
trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
getCacheEntry(useTimerTable.readyAddress()),
- TBEs[useTimerTable.readyAddress()]);
+ TBEs.lookup(useTimerTable.readyAddress()));
}
}
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else {
error("Unexpected message");
}
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
trigger(Event:Own_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else {
trigger(Event:Fwd_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:Fwd_DMA, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Writeback_Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
trigger(Event:Writeback_Ack_Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else {
error("Unexpected message");
}
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Exclusive_Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else {
error("Unexpected message");
}
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry,
- TBEs[in_msg.LineAddress]);
+ TBEs.lookup(in_msg.LineAddress));
} else {
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
- TBEs[in_msg.LineAddress]);
+ TBEs.lookup(in_msg.LineAddress));
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry,
- TBEs[in_msg.LineAddress]);
+ TBEs.lookup(in_msg.LineAddress));
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
+ TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
}
}
} else {
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry,
- TBEs[in_msg.LineAddress]);
+ TBEs.lookup(in_msg.LineAddress));
} else {
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs[in_msg.LineAddress]);
+ L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry,
- TBEs[in_msg.LineAddress]);
+ TBEs.lookup(in_msg.LineAddress));
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
+ TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
}
}
}
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
assert(is_valid(cache_entry));
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
tbe.Dirty := cache_entry.Dirty;
void unset_tbe();
Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache[address]);
+ return static_cast(Entry, "pointer", L2cache.lookup(address));
}
bool isDirTagPresent(Addr addr) {
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
return L2Cache_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else {
error("Unexpected message");
}
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else {
trigger(Event:Fwd_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if(in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:Fwd_DMA, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Writeback_Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else {
error("Unexpected message");
}
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:L1_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:L1_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
trigger(Event:L1_PUTO, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
trigger(Event:L1_PUTX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:PUTS) {
Entry cache_entry := getCacheEntry(in_msg.addr);
if (isOnlySharer(cache_entry, in_msg.addr, in_msg.Requestor)) {
trigger(Event:L1_PUTS_only, in_msg.addr,
- cache_entry, TBEs[in_msg.addr]);
+ cache_entry, TBEs.lookup(in_msg.addr));
}
else {
trigger(Event:L1_PUTS, in_msg.addr,
- cache_entry, TBEs[in_msg.addr]);
+ cache_entry, TBEs.lookup(in_msg.addr));
}
} else {
error("Unexpected message");
if (in_msg.Type == CoherenceResponseType:ACK) {
if (in_msg.SenderMachine == MachineType:L2Cache) {
trigger(Event:ExtAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
}
else {
trigger(Event:IntAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
}
} else if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
trigger(Event:Unblock, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
Entry cache_entry := getCacheEntry(in_msg.addr);
if (is_invalid(cache_entry) &&
L2cache.cacheAvail(in_msg.addr) == false) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
- TBEs[L2cache.cacheProbe(in_msg.addr)]);
+ TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
}
else {
trigger(Event:L1_WBDIRTYDATA, in_msg.addr,
- cache_entry, TBEs[in_msg.addr]);
+ cache_entry, TBEs.lookup(in_msg.addr));
}
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA) {
Entry cache_entry := getCacheEntry(in_msg.addr);
L2cache.cacheAvail(in_msg.addr) == false) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
- TBEs[L2cache.cacheProbe(in_msg.addr)]);
+ TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
}
else {
trigger(Event:L1_WBCLEANDATA, in_msg.addr,
- cache_entry, TBEs[in_msg.addr]);
+ cache_entry, TBEs.lookup(in_msg.addr));
}
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DmaAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
+ getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
} else {
error("Unexpected message");
}
action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
if(is_valid(cache_entry)) {
tbe.DataBlk := cache_entry.DataBlk;
tbe.Dirty := cache_entry.Dirty;
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
+ Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
if (is_valid(dir_entry)) {
return dir_entry;
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
trigger(Event:Last_Unblock, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else {
trigger(Event:Unblock, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
}
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
trigger(Event:Dirty_Writeback, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
trigger(Event:Clean_Writeback, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_ACK, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else {
error("Invalid message");
}
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
- trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:PUTX, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
- trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:PUTO, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
- trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
+ TBEs.lookup(makeLineAddress(in_msg.addr)));
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
+ TBEs.lookup(makeLineAddress(in_msg.addr)));
} else {
error("Invalid message");
}
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
peek (requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.PhysicalAddress := in_msg.addr;
tbe.Len := in_msg.Len;
tbe.DataBlk := in_msg.DataBlk;
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress,
- TBEs[in_msg.LineAddress]);
+ TBEs.lookup(in_msg.LineAddress));
} else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress,
- TBEs[in_msg.LineAddress]);
+ TBEs.lookup(in_msg.LineAddress));
} else {
error("Invalid request type");
}
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
+ TBEs.lookup(makeLineAddress(in_msg.addr)));
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
+ TBEs.lookup(makeLineAddress(in_msg.addr)));
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
- TBEs[makeLineAddress(in_msg.addr)]);
+ TBEs.lookup(makeLineAddress(in_msg.addr)));
} else {
error("Invalid response type");
}
if (triggerQueue_in.isReady()) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
- trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:All_Acks, in_msg.addr, TBEs.lookup(in_msg.addr));
} else {
error("Unexpected message");
}
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
}
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := L1_TBEs[addr];
+ TBE tbe := L1_TBEs.lookup(addr);
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
// Use Timer
in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
if (useTimerTable_in.isReady()) {
- TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
+ TBE tbe := L1_TBEs.lookup(useTimerTable.readyAddress());
if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
(persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
if (reissueTimerTable_in.isReady()) {
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
getCacheEntry(reissueTimerTable.readyAddress()),
- L1_TBEs[reissueTimerTable.readyAddress()]);
+ L1_TBEs.lookup(reissueTimerTable.readyAddress()));
}
}
// React to the message based on the current state of the table
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs[in_msg.addr];
+ TBE tbe := L1_TBEs.lookup(in_msg.addr);
if (persistentTable.isLocked(in_msg.addr)) {
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs[in_msg.addr];
+ TBE tbe := L1_TBEs.lookup(in_msg.addr);
// Mark TBE flag if response received off-chip. Use this to update average latency estimate
if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
// came from an off-chip L2 cache
if (is_valid(tbe)) {
- // L1_TBEs[in_msg.addr].ExternalResponse := true;
+ // L1_TBEs.lookup(in_msg.addr).ExternalResponse := true;
// profile_offchipL2_response(in_msg.addr);
}
}
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs[in_msg.addr];
+ TBE tbe := L1_TBEs.lookup(in_msg.addr);
if (in_msg.Type == CoherenceRequestType:GETX) {
if (in_msg.isLocal) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
- TBE tbe := L1_TBEs[in_msg.LineAddress];
+ TBE tbe := L1_TBEs.lookup(in_msg.LineAddress);
if (in_msg.Type == RubyRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
+ L1_TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
}
}
} else {
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
+ L1_TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
}
}
}
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(L1_TBEs);
L1_TBEs.allocate(address);
- set_tbe(L1_TBEs[address]);
+ set_tbe(L1_TBEs.lookup(address));
tbe.IssueCount := 0;
peek(mandatoryQueue_in, RubyRequest) {
tbe.PC := in_msg.ProgramCounter;
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
+ Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
if (is_valid(dir_entry)) {
return dir_entry;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
if (reissueTimerTable_in.isReady()) {
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
- TBEs[reissueTimerTable.readyAddress()]);
+ TBEs.lookup(reissueTimerTable.readyAddress()));
}
}
if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
trigger(Event:Data_All_Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack_All_Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
} else {
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
trigger(Event:Data_Owner, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else if ((in_msg.Type == CoherenceResponseType:ACK) ||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
trigger(Event:Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else {
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
}
} else {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
}
} else {
// unlocked
- trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
}
}
else {
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
} else {
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- TBEs[in_msg.addr]);
+ TBEs.lookup(in_msg.addr));
}
} else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
// unlocked
- trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
} else {
error("Invalid message");
}
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
} else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
+ trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
} else {
error("Invalid message");
}
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
if (in_msg.Type == DMARequestType:READ) {
- trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
+ trigger(Event:DMA_READ, in_msg.LineAddress, TBEs.lookup(in_msg.LineAddress));
} else if (in_msg.Type == DMARequestType:WRITE) {
if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
- TBEs[in_msg.LineAddress]);
+ TBEs.lookup(in_msg.LineAddress));
} else {
trigger(Event:DMA_WRITE, in_msg.LineAddress,
- TBEs[in_msg.LineAddress]);
+ TBEs.lookup(in_msg.LineAddress));
}
} else {
error("Invalid message");
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
if(is_valid(cache_entry)) {
testAndRead(addr, cache_entry.DataBlk, pkt);
} else {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
return num_functional_writes;
}
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
return num_functional_writes;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
peek(triggerQueue_in, TriggerMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == TriggerType:L2_to_L1) {
trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
peek(responseToCache_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
peek(forwardToCache_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if ((in_msg.Type == CoherenceRequestType:GETX) ||
(in_msg.Type == CoherenceRequestType:GETF)) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
- TBE tbe := TBEs[in_msg.LineAddress];
+ TBE tbe := TBEs.lookup(in_msg.LineAddress);
if (in_msg.Type == RubyRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs[l2_victim_addr]);
+ TBEs.lookup(l2_victim_addr));
}
}
trigger(Event:L1_to_L2,
l1i_victim_addr,
getL1ICacheEntry(l1i_victim_addr),
- TBEs[l1i_victim_addr]);
+ TBEs.lookup(l1i_victim_addr));
} else {
Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
// The L2 does not have room, so we replace a line from the L2
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs[l2_victim_addr]);
+ TBEs.lookup(l2_victim_addr));
}
}
}
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs[l2_victim_addr]);
+ TBEs.lookup(l2_victim_addr));
}
}
trigger(Event:L1_to_L2,
l1d_victim_addr,
getL1DCacheEntry(l1d_victim_addr),
- TBEs[l1d_victim_addr]);
+ TBEs.lookup(l1d_victim_addr));
} else {
Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
// The L2 does not have room, so we replace a line from the L2
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs[l2_victim_addr]);
+ TBEs.lookup(l2_victim_addr));
}
}
}
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
tbe.Dirty := cache_entry.Dirty;
tbe.Sharers := false;
action(it_allocateTBE, "it", desc="Allocate TBE") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.Dirty := false;
tbe.Sharers := false;
}
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
+ Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
if (is_valid(dir_entry)) {
return dir_entry;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs[addr];
+ TBE tbe := TBEs.lookup(addr);
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
if (triggerQueue_in.isReady()) {
peek(triggerQueue_in, TriggerMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks_and_owner_data, in_msg.addr,
pf_entry, tbe);
if (unblockNetwork_in.isReady()) {
peek(unblockNetwork_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
trigger(Event:Unblock, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
if (responseToDir_in.isReady()) {
peek(responseToDir_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs[in_msg.addr];
+ TBE tbe := TBEs.lookup(in_msg.addr);
if (in_msg.Type == CoherenceRequestType:PUT) {
trigger(Event:PUT, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:PUTF) {
trigger(Event:Pf_Replacement,
probeFilter.cacheProbe(in_msg.addr),
getProbeFilterEntry(probeFilter.cacheProbe(in_msg.addr)),
- TBEs[probeFilter.cacheProbe(in_msg.addr)]);
+ TBEs.lookup(probeFilter.cacheProbe(in_msg.addr)));
}
}
} else {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
- TBE tbe := TBEs[in_msg.LineAddress];
+ TBE tbe := TBEs.lookup(in_msg.LineAddress);
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
} else if (in_msg.Type == DMARequestType:WRITE) {
check_allocate(TBEs);
peek(requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.PhysicalAddress := address;
tbe.ResponseType := CoherenceResponseType:NULL;
}
check_allocate(TBEs);
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs[address]);
+ set_tbe(TBEs.lookup(address));
tbe.DmaDataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
"aexpr : aexpr DOT ident '(' exprs ')'"
p[0] = ast.MemberMethodCallExprAST(self, p[1], p[3], p[5])
- def p_expr__member_method_call_lookup(self, p):
- "aexpr : aexpr '[' exprs ']'"
- p[0] = ast.MemberMethodCallExprAST(self, p[1], "lookup", p[3])
-
def p_expr__class_method_call(self, p):
"aexpr : type DOUBLE_COLON ident '(' exprs ')'"
p[0] = ast.ClassMethodCallExprAST(self, p[1], p[3], p[5])