parser.add_option("--recycle-latency", type="int", default=10,
help="Recycle latency for ruby controller input buffers")
+ parser.add_option("--random_seed", type="int", default=1234,
+ help="Used for seeding the random number generator")
+
protocol = buildEnv['PROTOCOL']
exec "import %s" % protocol
eval("%s.define_options(parser)" % protocol)
if buildEnv['TARGET_ISA'] == "x86":
cpu_seq.pio_slave_port = piobus.master
- ruby.number_of_virtual_networks = ruby.network.number_of_virtual_networks
ruby._cpu_ports = cpu_sequencers
ruby.num_of_sequencers = len(cpu_sequencers)
+ ruby.random_seed = options.random_seed
# Create a backing copy of physical memory in case required
if options.access_backing_store:
RubyDirectedTester(const RubyDirectedTester& obj);
RubyDirectedTester& operator=(const RubyDirectedTester& obj);
- uint64_t m_requests_completed;
+ uint64 m_requests_completed;
std::vector<MasterPort*> ports;
- uint64_t m_requests_to_complete;
+ uint64 m_requests_to_complete;
DirectedGenerator* generator;
};
std::vector<Cycles> m_last_progress_vector;
int m_num_cpus;
- uint64_t m_checks_completed;
+ uint64 m_checks_completed;
std::vector<MasterPort*> writePorts;
std::vector<MasterPort*> readPorts;
- uint64_t m_checks_to_complete;
+ uint64 m_checks_to_complete;
int m_deadlock_threshold;
int m_num_writers;
int m_num_readers;
// inclusive cache returns L0 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
+ Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
if(is_valid(Dcache_entry)) {
return Dcache_entry;
}
- Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
+ Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
return Icache_entry;
}
Entry getDCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Dcache_entry := static_cast(Entry, "pointer", Dcache.lookup(addr));
+ Entry Dcache_entry := static_cast(Entry, "pointer", Dcache[addr]);
return Dcache_entry;
}
Entry getICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry Icache_entry := static_cast(Entry, "pointer", Icache.lookup(addr));
+ Entry Icache_entry := static_cast(Entry, "pointer", Icache[addr]);
return Icache_entry;
}
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L0Cache_State_to_permission(tbe.TBEState));
return L0Cache_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
assert(in_msg.Dest == machineID);
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Class == CoherenceClass:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
if (is_valid(Icache_entry)) {
// The tag matches for the L0, so the L0 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L0
if (is_valid(Dcache_entry)) {
// The block is in the wrong L0, put the request on the queue to the shared L2
trigger(Event:L0_Replacement, in_msg.LineAddress,
- Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (Icache.cacheAvail(in_msg.LineAddress)) {
// L0 does't have the line, but we have space for it
// in the L0 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L0, so we need to make room in the L0
trigger(Event:L0_Replacement, Icache.cacheProbe(in_msg.LineAddress),
getICacheEntry(Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
if (is_valid(Dcache_entry)) {
// The tag matches for the L0, so the L0 ask the L1 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L0
if (is_valid(Icache_entry)) {
// The block is in the wrong L0, put the request on the queue to the private L1
trigger(Event:L0_Replacement, in_msg.LineAddress,
- Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ Icache_entry, TBEs[in_msg.LineAddress]);
}
if (Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L0 let's see if the L1 has it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L0
trigger(Event:L0_Replacement, Dcache.cacheProbe(in_msg.LineAddress),
getDCacheEntry(Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
}
}
- action(h_load_hit, "hd", desc="If not prefetch, notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="If not prefetch, notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
- action(h_ifetch_hit, "hi", desc="If not prefetch, notify sequencer the ifetch completed.") {
+ action(hx_load_hit, "hx", desc="If not prefetch, notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(hx_load_hit, "hxd", desc="notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, true);
- }
-
- action(hx_ifetch_hit, "hxi", desc="notify sequencer the ifetch completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, true);
}
action(hh_store_hit, "\h", desc="If not prefetch, notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
action(hhx_store_hit, "\hx", desc="If not prefetch, notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, true);
cache_entry.Dirty := true;
}
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
}
transition({S,E,M}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
transition(Inst_IS, Data, S) {
u_writeInstToCache;
- hx_ifetch_hit;
+ hx_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
transition(Inst_IS, Data_Exclusive, E) {
u_writeInstToCache;
- hx_ifetch_hit;
+ hx_load_hit;
s_deallocateTBE;
o_popIncomingResponseQueue;
kd_wakeUpDependents;
// inclusive cache returns L1 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry cache_entry := static_cast(Entry, "pointer", cache.lookup(addr));
+ Entry cache_entry := static_cast(Entry, "pointer", cache[addr]);
return cache_entry;
}
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:INV) {
if (is_valid(cache_entry) && inL0Cache(cache_entry.CacheState)) {
if (messageBufferFromL0_in.isReady()) {
peek(messageBufferFromL0_in, CoherenceMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Class == CoherenceClass:INV_DATA) {
trigger(Event:L0_DataAck, in_msg.addr, cache_entry, tbe);
// No room in the L1, so we need to make room in the L1
Entry victim_entry :=
getCacheEntry(cache.cacheProbe(in_msg.addr));
- TBE victim_tbe := TBEs.lookup(cache.cacheProbe(in_msg.addr));
+ TBE victim_tbe := TBEs[cache.cacheProbe(in_msg.addr)];
if (is_valid(victim_entry) && inL0Cache(victim_entry.CacheState)) {
trigger(Event:L0_Invalidate_Own,
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
// inclusive cache returns L1 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
if(is_valid(L1Dcache_entry)) {
return L1Dcache_entry;
}
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
return L1Icache_entry;
}
Entry getL1DCacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache.lookup(addr));
+ Entry L1Dcache_entry := static_cast(Entry, "pointer", L1Dcache[addr]);
return L1Dcache_entry;
}
Entry getL1ICacheEntry(Addr addr), return_by_pointer="yes" {
- Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache.lookup(addr));
+ Entry L1Icache_entry := static_cast(Entry, "pointer", L1Icache[addr]);
return L1Icache_entry;
}
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
// cache. We should drop this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
// Check to see if it is in the OTHER L1
// this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// in the L1 so let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
} else {
// Data prefetch
// cache. We should drop this request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
// Check to see if it is in the OTHER L1
// request.
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// the L1 let's see if the L2 has it
trigger(prefetch_request_type_to_event(in_msg.Type),
in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr, cache_entry, tbe);
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr, cache_entry, tbe);
if (is_valid(L1Icache_entry)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L1
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 so let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
if (is_valid(L1Dcache_entry)) {
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// Check to see if it is in the OTHER L1
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it
// in the L1 let's see if the L2 has it.
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- L1Dcache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Dcache_entry, TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement, L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
sequencer.invalidateSC(address);
}
- action(h_load_hit, "hd",
- desc="Notify sequencer the load completed.")
+ action(h_load_hit, "h",
+ desc="If not prefetch, notify sequencer the load completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
- action(h_ifetch_hit, "hi", desc="Notify sequencer the instruction fetch completed.")
+ action(hx_load_hit, "hx",
+ desc="If not prefetch, notify sequencer the load completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(hx_load_hit, "hx", desc="Notify sequencer the load completed.")
- {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.readCallback(address, cache_entry.DataBlk, true);
}
- action(hh_store_hit, "\h", desc="Notify sequencer that store completed.")
+ action(hh_store_hit, "\h",
+ desc="If not prefetch, notify sequencer that store completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
- action(hhx_store_hit, "\hx", desc="Notify sequencer that store completed.")
+ action(hhx_store_hit, "\hx",
+ desc="If not prefetch, notify sequencer that store completed.")
{
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.writeCallback(address, cache_entry.DataBlk, true);
cache_entry.Dirty := true;
}
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.isPrefetch := false;
tbe.Dirty := cache_entry.Dirty;
tbe.DataBlk := cache_entry.DataBlk;
}
transition({S,E,M}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
// inclusive cache, returns L2 entries only
Entry getCacheEntry(Addr addr), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache.lookup(addr));
+ return static_cast(Entry, "pointer", L2cache[addr]);
}
bool isSharer(Addr addr, MachineID requestor, Entry cache_entry) {
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
return L2Cache_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
if(L1unblockNetwork_in.isReady()) {
peek(L1unblockNetwork_in, ResponseMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
DPRINTF(RubySlicc, "Addr: %s State: %s Sender: %s Type: %s Dest: %s\n",
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
in_msg.Sender, in_msg.Type, in_msg.Destination);
// test wether it's from a local L1 or an off chip source
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if(machineIDToMachineType(in_msg.Sender) == MachineType:L1Cache) {
if(in_msg.Type == CoherenceResponseType:DATA) {
if(L1RequestL2Network_in.isReady()) {
peek(L1RequestL2Network_in, RequestMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
DPRINTF(RubySlicc, "Addr: %s State: %s Req: %s Type: %s Dest: %s\n",
in_msg.addr, getState(tbe, cache_entry, in_msg.addr),
Entry L2cache_entry := getCacheEntry(L2cache.cacheProbe(in_msg.addr));
if (isDirty(L2cache_entry)) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
- L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
} else {
trigger(Event:L2_Replacement_clean, L2cache.cacheProbe(in_msg.addr),
- L2cache_entry, TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ L2cache_entry, TBEs[L2cache.cacheProbe(in_msg.addr)]);
}
}
}
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.L1_GetS_IDs.clear();
tbe.DataBlk := cache_entry.DataBlk;
tbe.Dirty := cache_entry.Dirty;
void wakeUpBuffers(Addr a);
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", Directory_State_to_permission(tbe.TBEState));
return Directory_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (isGETRequest(in_msg.Type)) {
- trigger(Event:Fetch, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Fetch, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg);
error("Invalid message");
peek(responseNetwork_in, ResponseMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceResponseType:MEMORY_DATA) {
- trigger(Event:Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
- trigger(Event:CleanReplacement, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:CleanReplacement, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(requestNetwork_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.addr;
tbe.Len := in_msg.Len;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
peek(forwardRequestNetwork_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:Fwd_GETX, in_msg.addr, cache_entry, tbe);
peek(responseNetwork_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr, cache_entry, tbe);
// make room for the block
trigger(Event:Replacement, cacheMemory.cacheProbe(in_msg.LineAddress),
getCacheEntry(cacheMemory.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(cacheMemory.cacheProbe(in_msg.LineAddress)));
+ TBEs[cacheMemory.cacheProbe(in_msg.LineAddress)]);
}
else {
trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress,
- cache_entry, TBEs.lookup(in_msg.LineAddress));
+ cache_entry, TBEs[in_msg.LineAddress]);
}
}
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, false);
}
peek(responseNetwork_in, ResponseMsg) {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender));
}
action(s_store_hit, "s", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, false);
}
peek(responseNetwork_in, ResponseMsg) {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc,"%s\n", cache_entry.DataBlk);
- cacheMemory.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender));
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
}
action(w_deallocateTBE, "w", desc="Deallocate TBE") {
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, tbe);
} else if (in_msg.Type == DMARequestType:WRITE) {
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:GETS, in_msg.addr, tbe);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
in_port(memQueue_in, MemoryMsg, responseFromMemory) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, tbe);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
action(r_allocateTbeForDmaRead, "\r", desc="Allocate TBE for DMA Read") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DmaRequestor := in_msg.Requestor;
}
}
action(v_allocateTBEFromRequestNet, "\v", desc="Allocate TBE") {
peek(requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
}
}
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L1Cache_State_to_permission(tbe.TBEState));
return L1Cache_State_to_permission(tbe.TBEState);
if(is_valid(cache_entry)) {
testAndRead(addr, cache_entry.DataBlk, pkt);
} else {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
return num_functional_writes;
}
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
return num_functional_writes;
if (useTimerTable_in.isReady()) {
trigger(Event:Use_Timeout, useTimerTable.readyAddress(),
getCacheEntry(useTimerTable.readyAddress()),
- TBEs.lookup(useTimerTable.readyAddress()));
+ TBEs[useTimerTable.readyAddress()]);
}
}
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
trigger(Event:Own_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
trigger(Event:Fwd_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:Fwd_DMA, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Writeback_Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK_DATA) {
trigger(Event:Writeback_Ack_Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
peek(responseToL1Cache_in, ResponseMsg, block_on="addr") {
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Exclusive_Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
// The tag matches for the L1, so the L1 asks the L2 for it.
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress, L1Dcache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
}
if (L1Icache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
// The tag matches for the L1, so the L1 ask the L2 for it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
trigger(Event:L1_Replacement, in_msg.LineAddress,
- L1Icache_entry, TBEs.lookup(in_msg.LineAddress));
+ L1Icache_entry, TBEs[in_msg.LineAddress]);
}
if (L1Dcache.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
// No room in the L1, so we need to make room in the L1
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
}
}
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk);
- }
-
- action(h_ifetch_hit, "hi", desc="Notify the sequencer about ifetch completion.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk);
}
action(hx_load_hit, "hx", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.readCallback(address, cache_entry.DataBlk, true);
}
action(hh_store_hit, "\h", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk);
cache_entry.Dirty := true;
}
action(xx_store_hit, "\xx", desc="Notify sequencer that store completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.writeCallback(address, cache_entry.DataBlk, true);
cache_entry.Dirty := true;
}
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
assert(is_valid(cache_entry));
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
tbe.Dirty := cache_entry.Dirty;
}
transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
void unset_tbe();
Entry getCacheEntry(Addr address), return_by_pointer="yes" {
- return static_cast(Entry, "pointer", L2cache.lookup(address));
+ return static_cast(Entry, "pointer", L2cache[address]);
}
bool isDirTagPresent(Addr addr) {
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
DPRINTF(RubySlicc, "%s\n", L2Cache_State_to_permission(tbe.TBEState));
return L2Cache_State_to_permission(tbe.TBEState);
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_Acks, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
trigger(Event:Fwd_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:Fwd_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if(in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:Fwd_DMA, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:INV) {
trigger(Event:Inv, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_ACK) {
trigger(Event:Writeback_Ack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:WB_NACK) {
trigger(Event:Writeback_Nack, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETX) {
trigger(Event:L1_GETX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:GETS) {
trigger(Event:L1_GETS, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
trigger(Event:L1_PUTO, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
trigger(Event:L1_PUTX, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTS) {
Entry cache_entry := getCacheEntry(in_msg.addr);
if (isOnlySharer(cache_entry, in_msg.addr, in_msg.Requestor)) {
trigger(Event:L1_PUTS_only, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
else {
trigger(Event:L1_PUTS, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
} else {
error("Unexpected message");
if (in_msg.Type == CoherenceResponseType:ACK) {
if (in_msg.SenderMachine == MachineType:L2Cache) {
trigger(Event:ExtAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
else {
trigger(Event:IntAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data_Exclusive, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
trigger(Event:Unblock, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
Entry cache_entry := getCacheEntry(in_msg.addr);
if (is_invalid(cache_entry) &&
L2cache.cacheAvail(in_msg.addr) == false) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
- TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ TBEs[L2cache.cacheProbe(in_msg.addr)]);
}
else {
trigger(Event:L1_WBDIRTYDATA, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_DATA) {
Entry cache_entry := getCacheEntry(in_msg.addr);
L2cache.cacheAvail(in_msg.addr) == false) {
trigger(Event:L2_Replacement, L2cache.cacheProbe(in_msg.addr),
getCacheEntry(L2cache.cacheProbe(in_msg.addr)),
- TBEs.lookup(L2cache.cacheProbe(in_msg.addr)));
+ TBEs[L2cache.cacheProbe(in_msg.addr)]);
}
else {
trigger(Event:L1_WBCLEANDATA, in_msg.addr,
- cache_entry, TBEs.lookup(in_msg.addr));
+ cache_entry, TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DmaAck, in_msg.addr,
- getCacheEntry(in_msg.addr), TBEs.lookup(in_msg.addr));
+ getCacheEntry(in_msg.addr), TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
action(i_allocateTBE, "i", desc="Allocate TBE for internal/external request(isPrefetch=0, number of invalidates=0)") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
if(is_valid(cache_entry)) {
tbe.DataBlk := cache_entry.DataBlk;
tbe.Dirty := cache_entry.Dirty;
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
if (getDirectoryEntry(in_msg.addr).WaitingUnblocks == 1) {
trigger(Event:Last_Unblock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
trigger(Event:Unblock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
}
} else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
trigger(Event:Exclusive_Unblock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
trigger(Event:Dirty_Writeback, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
trigger(Event:Clean_Writeback, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
trigger(Event:Data, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_ACK, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
error("Invalid message");
}
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTX) {
- trigger(Event:PUTX, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:PUTX, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTO) {
- trigger(Event:PUTO, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:PUTO, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
- trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:PUTO_SHARERS, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
trigger(Event:DMA_READ, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
trigger(Event:DMA_WRITE, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else {
error("Invalid message");
}
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
peek (requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.PhysicalAddress := in_msg.addr;
tbe.Len := in_msg.Len;
tbe.DataBlk := in_msg.DataBlk;
peek(dmaRequestQueue_in, SequencerMsg) {
if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
error("Invalid request type");
}
peek( dmaResponseQueue_in, ResponseMsg) {
if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
trigger(Event:DMA_Ack, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE ||
in_msg.Type == CoherenceResponseType:DATA) {
trigger(Event:Data, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Inv_Ack, makeLineAddress(in_msg.addr),
- TBEs.lookup(makeLineAddress(in_msg.addr)));
+ TBEs[makeLineAddress(in_msg.addr)]);
} else {
error("Invalid response type");
}
if (triggerQueue_in.isReady()) {
peek(triggerQueue_in, TriggerMsg) {
if (in_msg.Type == TriggerType:ALL_ACKS) {
- trigger(Event:All_Acks, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:All_Acks, in_msg.addr, TBEs[in_msg.addr]);
} else {
error("Unexpected message");
}
action(v_allocateTBE, "v", desc="Allocate TBE entry") {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
}
action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := L1_TBEs.lookup(addr);
+ TBE tbe := L1_TBEs[addr];
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
// Use Timer
in_port(useTimerTable_in, Addr, useTimerTable, rank=5) {
if (useTimerTable_in.isReady()) {
- TBE tbe := L1_TBEs.lookup(useTimerTable.readyAddress());
+ TBE tbe := L1_TBEs[useTimerTable.readyAddress()];
if (persistentTable.isLocked(useTimerTable.readyAddress()) &&
(persistentTable.findSmallest(useTimerTable.readyAddress()) != machineID)) {
if (reissueTimerTable_in.isReady()) {
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
getCacheEntry(reissueTimerTable.readyAddress()),
- L1_TBEs.lookup(reissueTimerTable.readyAddress()));
+ L1_TBEs[reissueTimerTable.readyAddress()]);
}
}
// React to the message based on the current state of the table
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs.lookup(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
if (persistentTable.isLocked(in_msg.addr)) {
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs.lookup(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
// Mark TBE flag if response received off-chip. Use this to update average latency estimate
if ( machineIDToMachineType(in_msg.Sender) == MachineType:L2Cache ) {
// came from an off-chip L2 cache
if (is_valid(tbe)) {
- // L1_TBEs.lookup(in_msg.addr).ExternalResponse := true;
+ // L1_TBEs[in_msg.addr].ExternalResponse := true;
// profile_offchipL2_response(in_msg.addr);
}
}
assert(in_msg.Destination.isElement(machineID));
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := L1_TBEs.lookup(in_msg.addr);
+ TBE tbe := L1_TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:GETX) {
if (in_msg.isLocal) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
- TBE tbe := L1_TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := L1_TBEs[in_msg.LineAddress];
if (in_msg.Type == RubyRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
trigger(Event:L1_Replacement,
L1Icache.cacheProbe(in_msg.LineAddress),
getL1ICacheEntry(L1Icache.cacheProbe(in_msg.LineAddress)),
- L1_TBEs.lookup(L1Icache.cacheProbe(in_msg.LineAddress)));
+ L1_TBEs[L1Icache.cacheProbe(in_msg.LineAddress)]);
}
}
} else {
trigger(Event:L1_Replacement,
L1Dcache.cacheProbe(in_msg.LineAddress),
getL1DCacheEntry(L1Dcache.cacheProbe(in_msg.LineAddress)),
- L1_TBEs.lookup(L1Dcache.cacheProbe(in_msg.LineAddress)));
+ L1_TBEs[L1Dcache.cacheProbe(in_msg.LineAddress)]);
}
}
}
}
}
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
- address, cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false,
- MachineType:L1Cache);
- }
-
- action(h_ifetch_hit, "hi", desc="Notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, false,
MachineType:L1Cache);
}
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
peek(responseNetwork_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.readCallback(address, cache_entry.DataBlk,
isExternalHit(address, in_msg.Sender),
machineIDToMachineType(in_msg.Sender));
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, false,
MachineType:L1Cache);
cache_entry.Dirty := true;
DPRINTF(RubySlicc, "Address: %s, Data Block: %s\n",
address, cache_entry.DataBlk);
peek(responseNetwork_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
sequencer.writeCallback(address, cache_entry.DataBlk,
isExternalHit(address, in_msg.Sender),
machineIDToMachineType(in_msg.Sender));
action(i_allocateTBE, "i", desc="Allocate TBE") {
check_allocate(L1_TBEs);
L1_TBEs.allocate(address);
- set_tbe(L1_TBEs.lookup(address));
+ set_tbe(L1_TBEs[address]);
tbe.IssueCount := 0;
peek(mandatoryQueue_in, RubyRequest) {
tbe.PC := in_msg.ProgramCounter;
}
transition({S, SM, S_L, SM_L}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
// Transitions from Owned
transition({O, OM}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
// Transitions from Modified
transition({MM, MM_W}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
// Transitions from Dirty Exclusive
transition({M, M_W}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileInstHit;
k_popMandatoryQueue;
}
void unset_tbe();
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Data, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Memory_Ack, in_msg.addr, TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
in_port(reissueTimerTable_in, Addr, reissueTimerTable) {
if (reissueTimerTable_in.isReady()) {
trigger(Event:Request_Timeout, reissueTimerTable.readyAddress(),
- TBEs.lookup(reissueTimerTable.readyAddress()));
+ TBEs[reissueTimerTable.readyAddress()]);
}
}
if ((in_msg.Type == CoherenceResponseType:DATA_OWNER) ||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
trigger(Event:Data_All_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner_All_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack_All_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
} else {
if (in_msg.Type == CoherenceResponseType:DATA_OWNER) {
trigger(Event:Data_Owner, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if ((in_msg.Type == CoherenceResponseType:ACK) ||
(in_msg.Type == CoherenceResponseType:DATA_SHARED)) {
trigger(Event:Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceResponseType:ACK_OWNER) {
trigger(Event:Ack_Owner, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
DPRINTF(RubySlicc, "%s\n", in_msg.Type);
error("Invalid message");
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
}
} else {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
}
} else {
// unlocked
- trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
}
}
else {
if (persistentTable.findSmallest(in_msg.addr) == machineID) {
if (getDirectoryEntry(in_msg.addr).Tokens > 0) {
trigger(Event:Own_Lock_or_Unlock_Tokens, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
} else {
trigger(Event:Own_Lock_or_Unlock, in_msg.addr,
- TBEs.lookup(in_msg.addr));
+ TBEs[in_msg.addr]);
}
} else if (in_msg.Type == PersistentRequestType:GETX_PERSISTENT) {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == PersistentRequestType:GETS_PERSISTENT) {
// locked
- trigger(Event:Lockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Lockdown, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == PersistentRequestType:DEACTIVATE_PERSISTENT) {
// unlocked
- trigger(Event:Unlockdown, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:Unlockdown, in_msg.addr, TBEs[in_msg.addr]);
} else {
error("Invalid message");
}
peek(requestNetwork_in, RequestMsg) {
assert(in_msg.Destination.isElement(machineID));
if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETS, in_msg.addr, TBEs[in_msg.addr]);
} else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.addr, TBEs.lookup(in_msg.addr));
+ trigger(Event:GETX, in_msg.addr, TBEs[in_msg.addr]);
} else {
error("Invalid message");
}
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
if (in_msg.Type == DMARequestType:READ) {
- trigger(Event:DMA_READ, in_msg.LineAddress, TBEs.lookup(in_msg.LineAddress));
+ trigger(Event:DMA_READ, in_msg.LineAddress, TBEs[in_msg.LineAddress]);
} else if (in_msg.Type == DMARequestType:WRITE) {
if (getDirectoryEntry(in_msg.LineAddress).Tokens == max_tokens()) {
trigger(Event:DMA_WRITE_All_Tokens, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
} else {
trigger(Event:DMA_WRITE, in_msg.LineAddress,
- TBEs.lookup(in_msg.LineAddress));
+ TBEs[in_msg.LineAddress]);
}
} else {
error("Invalid message");
action(vd_allocateDmaRequestInTBE, "vd", desc="Record Data in TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
if(is_valid(cache_entry)) {
testAndRead(addr, cache_entry.DataBlk, pkt);
} else {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
return num_functional_writes;
}
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
return num_functional_writes;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return L1Cache_State_to_permission(tbe.TBEState);
}
peek(triggerQueue_in, TriggerMsg) {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == TriggerType:L2_to_L1) {
trigger(Event:Complete_L2_to_L1, in_msg.addr, cache_entry, tbe);
peek(responseToCache_in, ResponseMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr, cache_entry, tbe);
peek(forwardToCache_in, RequestMsg, block_on="addr") {
Entry cache_entry := getCacheEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if ((in_msg.Type == CoherenceRequestType:GETX) ||
(in_msg.Type == CoherenceRequestType:GETF)) {
peek(mandatoryQueue_in, RubyRequest, block_on="LineAddress") {
// Check for data access to blocks in I-cache and ifetchs to blocks in D-cache
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == RubyRequestType:IFETCH) {
// ** INSTRUCTION ACCESS ***
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
trigger(Event:L1_to_L2,
l1i_victim_addr,
getL1ICacheEntry(l1i_victim_addr),
- TBEs.lookup(l1i_victim_addr));
+ TBEs[l1i_victim_addr]);
} else {
Addr l2_victim_addr := L2cache.cacheProbe(l1i_victim_addr);
// The L2 does not have room, so we replace a line from the L2
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
}
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
trigger(Event:L1_to_L2,
l1d_victim_addr,
getL1DCacheEntry(l1d_victim_addr),
- TBEs.lookup(l1d_victim_addr));
+ TBEs[l1d_victim_addr]);
} else {
Addr l2_victim_addr := L2cache.cacheProbe(l1d_victim_addr);
// The L2 does not have room, so we replace a line from the L2
trigger(Event:L2_Replacement,
l2_victim_addr,
getL2CacheEntry(l2_victim_addr),
- TBEs.lookup(l2_victim_addr));
+ TBEs[l2_victim_addr]);
}
}
}
}
}
- action(h_load_hit, "hd", desc="Notify sequencer the load completed.") {
+ action(h_load_hit, "h", desc="Notify sequencer the load completed.") {
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Dcache.setMRU(cache_entry);
- sequencer.readCallback(address, cache_entry.DataBlk, false,
- testAndClearLocalHit(cache_entry));
- }
-
- action(h_ifetch_hit, "hi", desc="Notify sequencer the ifetch completed.") {
- assert(is_valid(cache_entry));
- DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(cache_entry);
sequencer.readCallback(address, cache_entry.DataBlk, false,
testAndClearLocalHit(cache_entry));
}
assert(is_valid(tbe));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
peek(responseToCache_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
+
sequencer.readCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
tbe.ForwardRequestTime, tbe.FirstResponseTime);
assert(is_valid(cache_entry));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
peek(mandatoryQueue_in, RubyRequest) {
- L1Dcache.setMRU(cache_entry);
sequencer.writeCallback(address, cache_entry.DataBlk, false,
testAndClearLocalHit(cache_entry));
assert(is_valid(tbe));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
peek(responseToCache_in, ResponseMsg) {
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
+
sequencer.writeCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(in_msg.Sender), tbe.InitialRequestTime,
tbe.ForwardRequestTime, tbe.FirstResponseTime);
assert(is_valid(cache_entry));
assert(is_valid(tbe));
DPRINTF(RubySlicc, "%s\n", cache_entry.DataBlk);
- L1Icache.setMRU(address);
- L1Dcache.setMRU(address);
+
sequencer.writeCallback(address, cache_entry.DataBlk, true,
machineIDToMachineType(tbe.LastResponder), tbe.InitialRequestTime,
tbe.ForwardRequestTime, tbe.FirstResponseTime);
check_allocate(TBEs);
assert(is_valid(cache_entry));
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DataBlk := cache_entry.DataBlk; // Data only used for writebacks
tbe.Dirty := cache_entry.Dirty;
tbe.Sharers := false;
action(it_allocateTBE, "it", desc="Allocate TBE") {
check_allocate(TBEs);
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.Dirty := false;
tbe.Sharers := false;
}
}
transition({S, SM, ISM}, Ifetch) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstHit;
k_popMandatoryQueue;
}
}
transition(SR, Ifetch, S) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
}
transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstHit;
k_popMandatoryQueue;
}
}
transition(OR, Ifetch, O) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
// Transitions from Modified
transition({MM, M}, {Ifetch}) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstHit;
k_popMandatoryQueue;
}
}
transition(MMR, Ifetch, MM) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
}
transition(MR, Ifetch, M) {
- h_ifetch_hit;
+ h_load_hit;
uu_profileL1InstMiss;
uu_profileL2Hit;
k_popMandatoryQueue;
TBETable TBEs, template="<Directory_TBE>", constructor="m_number_of_TBEs";
Entry getDirectoryEntry(Addr addr), return_by_pointer="yes" {
- Entry dir_entry := static_cast(Entry, "pointer", directory.lookup(addr));
+ Entry dir_entry := static_cast(Entry, "pointer", directory[addr]);
if (is_valid(dir_entry)) {
return dir_entry;
}
AccessPermission getAccessPermission(Addr addr) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
return Directory_State_to_permission(tbe.TBEState);
}
}
void functionalRead(Addr addr, Packet *pkt) {
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
int functionalWrite(Addr addr, Packet *pkt) {
int num_functional_writes := 0;
- TBE tbe := TBEs.lookup(addr);
+ TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
num_functional_writes := num_functional_writes +
testAndWrite(addr, tbe.DataBlk, pkt);
if (triggerQueue_in.isReady()) {
peek(triggerQueue_in, TriggerMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == TriggerType:ALL_ACKS) {
trigger(Event:All_acks_and_owner_data, in_msg.addr,
pf_entry, tbe);
if (unblockNetwork_in.isReady()) {
peek(unblockNetwork_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
trigger(Event:Unblock, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:UNBLOCKS) {
if (responseToDir_in.isReady()) {
peek(responseToDir_in, ResponseMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceResponseType:ACK) {
trigger(Event:Ack, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceResponseType:ACK_SHARED) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
trigger(Event:Memory_Data, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.addr);
- TBE tbe := TBEs.lookup(in_msg.addr);
+ TBE tbe := TBEs[in_msg.addr];
if (in_msg.Type == CoherenceRequestType:PUT) {
trigger(Event:PUT, in_msg.addr, pf_entry, tbe);
} else if (in_msg.Type == CoherenceRequestType:PUTF) {
trigger(Event:Pf_Replacement,
probeFilter.cacheProbe(in_msg.addr),
getProbeFilterEntry(probeFilter.cacheProbe(in_msg.addr)),
- TBEs.lookup(probeFilter.cacheProbe(in_msg.addr)));
+ TBEs[probeFilter.cacheProbe(in_msg.addr)]);
}
}
} else {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.LineAddress);
- TBE tbe := TBEs.lookup(in_msg.LineAddress);
+ TBE tbe := TBEs[in_msg.LineAddress];
if (in_msg.Type == DMARequestType:READ) {
trigger(Event:DMA_READ, in_msg.LineAddress, pf_entry, tbe);
} else if (in_msg.Type == DMARequestType:WRITE) {
check_allocate(TBEs);
peek(requestQueue_in, RequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.PhysicalAddress := address;
tbe.ResponseType := CoherenceResponseType:NULL;
}
check_allocate(TBEs);
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
- set_tbe(TBEs.lookup(address));
+ set_tbe(TBEs[address]);
tbe.DmaDataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
}
external_type(NodeID, default="0", primitive="yes");
-structure (MachineID, external = "yes", non_obj="yes") {
- MachineType getType();
- NodeID getNum();
-}
+external_type(MachineID);
structure (Set, external = "yes", non_obj="yes") {
void setSize(int);
Cycles getTagLatency();
Cycles getDataLatency();
void setMRU(Addr);
- void setMRU(AbstractCacheEntry);
void recordRequestType(CacheRequestType, Addr);
bool checkResourceAvailable(CacheResourceType, Addr);
private:
void alloc();
uint8_t *m_data;
- //! true if this DataBlock is responsible for deleting m_data,
- //! false otherwise.
bool m_alloc;
};
}
void
-Histogram::add(int64_t value)
+Histogram::add(int64 value)
{
assert(value >= 0);
m_max = max(m_max, value);
Histogram(int binsize = 1, uint32_t bins = 50);
~Histogram();
- void add(int64_t value);
+ void add(int64 value);
void add(Histogram& hist);
void doubleBinSize();
uint64_t size() const { return m_count; }
uint32_t getBins() const { return m_data.size(); }
int getBinSize() const { return m_binsize; }
- int64_t getTotal() const { return m_sumSamples; }
+ int64 getTotal() const { return m_sumSamples; }
uint64_t getSquaredTotal() const { return m_sumSquaredSamples; }
uint64_t getData(int index) const { return m_data[index]; }
- int64_t getMax() const { return m_max; }
+ int64 getMax() const { return m_max; }
void printWithMultiplier(std::ostream& out, double multiplier) const;
void printPercent(std::ostream& out) const;
private:
std::vector<uint64_t> m_data;
- int64_t m_max; // the maximum value seen so far
+ int64 m_max; // the maximum value seen so far
uint64_t m_count; // the number of elements added
int m_binsize; // the size of each bucket
uint32_t m_largest_bin; // the largest bin used
- int64_t m_sumSamples; // the sum of all samples
+ int64 m_sumSamples; // the sum of all samples
uint64_t m_sumSquaredSamples; // the sum of the square of all samples
double getStandardDeviation() const;
}
void
-SubBlock::mergeFrom(const DataBlock& data)
+SubBlock::internalMergeFrom(const DataBlock& data)
{
int size = getSize();
assert(size > 0);
}
void
-SubBlock::mergeTo(DataBlock& data) const
+SubBlock::internalMergeTo(DataBlock& data) const
{
int size = getSize();
assert(size > 0);
{
out << "[" << m_address << ", " << getSize() << ", " << m_data << "]";
}
+
+
+
// Merging to and from DataBlocks - We only need to worry about
// updates when we are using DataBlocks
- void mergeTo(DataBlock& data) const;
- void mergeFrom(const DataBlock& data);
+ void mergeTo(DataBlock& data) const { internalMergeTo(data); }
+ void mergeFrom(const DataBlock& data) { internalMergeFrom(data); }
void print(std::ostream& out) const;
private:
+ void internalMergeTo(DataBlock& data) const;
+ void internalMergeFrom(const DataBlock& data);
+
// Data Members (m_ prefix)
Addr m_address;
std::vector<uint8_t> m_data;
#ifndef TYPEDEFINES_H
#define TYPEDEFINES_H
+typedef unsigned long long uint64;
+typedef long long int64;
+
typedef unsigned int LinkID;
typedef unsigned int NodeID;
typedef unsigned int SwitchID;
int
H3BloomFilter::get_index(Addr addr, int i)
{
- uint64_t x = makeLineAddress(addr);
- // uint64_t y = (x*mults_list[i] + adds_list[i]) % primes_list[i];
+ uint64 x = makeLineAddress(addr);
+ // uint64 y = (x*mults_list[i] + adds_list[i]) % primes_list[i];
int y = hash_H3(x,i);
if (isParallel) {
}
int
-H3BloomFilter::hash_H3(uint64_t value, int index)
+H3BloomFilter::hash_H3(uint64 value, int index)
{
- uint64_t mask = 1;
- uint64_t val = value;
+ uint64 mask = 1;
+ uint64 val = value;
int result = 0;
for (int i = 0; i < 64; i++) {
private:
int get_index(Addr addr, int hashNumber);
- int hash_H3(uint64_t value, int index);
+ int hash_H3(uint64 value, int index);
std::vector<int> m_filter;
int m_filter_size;
// m_skip_bits is used to perform BitSelect after skipping some
// bits. Used to simulate BitSel hashing on larger than cache-line
// granularities
- uint64_t x = (makeLineAddress(addr) >> m_skip_bits);
+ uint64 x = (makeLineAddress(addr) >> m_skip_bits);
int y = hash_bitsel(x, i, m_num_hashes, 30, m_filter_size_bits);
//36-bit addresses, 6-bit cache lines
}
int
-MultiBitSelBloomFilter::hash_bitsel(uint64_t value, int index, int jump,
+MultiBitSelBloomFilter::hash_bitsel(uint64 value, int index, int jump,
int maxBits, int numBits)
{
- uint64_t mask = 1;
+ uint64 mask = 1;
int result = 0;
int bit, i;
private:
int get_index(Addr addr, int hashNumber);
- int hash_bitsel(uint64_t value, int index, int jump, int maxBits,
+ int hash_bitsel(uint64 value, int index, int jump, int maxBits,
int numBits);
std::vector<int> m_filter;
(m_prio_heap.front()->getLastEnqueueTime() <= m_receiver->clockEdge()));
}
+bool
+MessageBuffer::functionalRead(Packet *pkt)
+{
+ // Check the priority heap and read any messages that may
+ // correspond to the address in the packet.
+ for (unsigned int i = 0; i < m_prio_heap.size(); ++i) {
+ Message *msg = m_prio_heap[i].get();
+ if (msg->functionalRead(pkt)) return true;
+ }
+
+ // Read the messages in the stall queue that correspond
+ // to the address in the packet.
+ for (StallMsgMapType::iterator map_iter = m_stall_msg_map.begin();
+ map_iter != m_stall_msg_map.end();
+ ++map_iter) {
+
+ for (std::list<MsgPtr>::iterator it = (map_iter->second).begin();
+ it != (map_iter->second).end(); ++it) {
+
+ Message *msg = (*it).get();
+ if (msg->functionalRead(pkt)) return true;
+ }
+ }
+ return false;
+}
+
uint32_t
MessageBuffer::functionalWrite(Packet *pkt)
{
void setIncomingLink(int link_id) { m_input_link_id = link_id; }
void setVnet(int net) { m_vnet_id = net; }
+ // Function for figuring out if any of the messages in the buffer can
+ // satisfy the read request for the address in the packet.
+ // Return value, if true, indicates that the request was fulfilled.
+ bool functionalRead(Packet *pkt);
+
// Function for figuring out if any of the messages in the buffer need
// to be updated with the data from the packet.
// Return value indicates the number of messages that were updated.
int m_not_avail_count; // count the # of times I didn't have N
// slots available
- uint64_t m_msg_counter;
+ uint64 m_msg_counter;
int m_priority_rank;
const bool m_strict_fifo;
const bool m_randomization;
int vnet = t_flit->get_vnet();
m_net_ptr->increment_received_flits(vnet);
- Cycles network_delay = curCycle() - t_flit->get_creation_time();
+ Cycles network_delay = curCycle() - t_flit->get_enqueue_time();
Cycles queueing_delay = t_flit->get_delay();
m_net_ptr->increment_network_latency(network_delay, vnet);
#include "mem/ruby/network/garnet/flexible-pipeline/flit.hh"
flit::flit(int id, int vc, int vnet, int size, MsgPtr msg_ptr, Cycles curTime)
- : m_id(id), m_vnet(vnet), m_vc(vc), m_size(size), m_creation_time(curTime)
{
+ m_size = size;
m_msg_ptr = msg_ptr;
+ m_enqueue_time = curTime;
m_time = curTime;
+ m_id = id;
+ m_vnet = vnet;
+ m_vc = vc;
if (size == 1) {
m_type = HEAD_TAIL_;
m_type = BODY_;
}
+int
+flit::get_size()
+{
+ return m_size;
+}
+
+int
+flit::get_id()
+{
+ return m_id;
+}
+
+Cycles
+flit::get_time()
+{
+ return m_time;
+}
+
+Cycles
+flit::get_enqueue_time()
+{
+ return m_enqueue_time;
+}
+
+void
+flit::set_time(Cycles time)
+{
+ m_time = time;
+}
+
+int
+flit::get_vnet()
+{
+ return m_vnet;
+}
+
+int
+flit::get_vc()
+{
+ return m_vc;
+}
+
+void
+flit::set_vc(int vc)
+{
+ m_vc = vc;
+}
+
+MsgPtr&
+flit::get_msg_ptr()
+{
+ return m_msg_ptr;
+}
+
+flit_type
+flit::get_type()
+{
+ return m_type;
+}
+
+void
+flit::set_delay(Cycles delay)
+{
+ src_delay = delay;
+}
+
+Cycles
+flit::get_delay()
+{
+ return src_delay;
+}
+
void
flit::print(std::ostream& out) const
{
out << "Type=" << m_type << " ";
out << "Vnet=" << m_vnet << " ";
out << "VC=" << m_vc << " ";
- out << "Creation Time=" << m_creation_time << " ";
+ out << "Enqueue Time=" << m_enqueue_time << " ";
out << "]";
}
public:
flit(int id, int vc, int vnet, int size, MsgPtr msg_ptr, Cycles curTime);
- int get_size() const { return m_size; }
- int get_id() const { return m_id; }
- Cycles get_time() const { return m_time; }
- Cycles get_creation_time() const { return m_creation_time; }
- void set_time(Cycles time) { m_time = time; }
- int get_vnet() const { return m_vnet; }
- int get_vc() const { return m_vc; }
- void set_vc(int vc) { m_vc = vc; }
- MsgPtr& get_msg_ptr() { return m_msg_ptr; }
- flit_type get_type() const { return m_type; }
- void set_delay(Cycles delay) { src_delay = delay; }
- Cycles get_delay() const { return src_delay; }
+ int get_size();
+ int get_id();
+ Cycles get_time();
+ Cycles get_enqueue_time();
+ void set_time(Cycles time);
+ int get_vnet();
+ int get_vc();
+ void set_vc(int vc);
+ MsgPtr& get_msg_ptr();
+ flit_type get_type();
+ void set_delay(Cycles delay);
+ Cycles get_delay();
void print(std::ostream& out) const;
static bool
bool functionalWrite(Packet *pkt);
private:
- const int m_id;
- const int m_vnet;
+ int m_id;
+ int m_vnet;
int m_vc;
- const int m_size;
- const Cycles m_creation_time;
- Cycles m_time;
+ int m_size;
+ Cycles m_enqueue_time, m_time;
flit_type m_type;
MsgPtr m_msg_ptr;
Cycles src_delay;
}
PerfectSwitch::PerfectSwitch(SwitchID sid, Switch *sw, uint32_t virt_nets)
- : Consumer(sw), m_switch_id(sid), m_switch(sw)
+ : Consumer(sw)
{
+ m_switch_id = sid;
m_round_robin_start = 0;
m_wakeups_wo_switch = 0;
m_virtual_networks = virt_nets;
void
PerfectSwitch::operateVnet(int vnet)
{
+ MsgPtr msg_ptr;
+ Message *net_msg_ptr = NULL;
+
// This is for round-robin scheduling
int incoming = m_round_robin_start;
m_round_robin_start++;
incoming = 0;
}
+ // temporary vectors to store the routing results
+ vector<LinkID> output_links;
+ vector<NetDest> output_link_destinations;
+
// Is there a message waiting?
if (m_in[incoming].size() <= vnet) {
continue;
continue;
}
- operateMessageBuffer(buffer, incoming, vnet);
- }
- }
-}
-
-void
-PerfectSwitch::operateMessageBuffer(MessageBuffer *buffer, int incoming,
- int vnet)
-{
- MsgPtr msg_ptr;
- Message *net_msg_ptr = NULL;
-
- // temporary vectors to store the routing results
- vector<LinkID> output_links;
- vector<NetDest> output_link_destinations;
-
- while (buffer->isReady()) {
- DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
-
- // Peek at message
- msg_ptr = buffer->peekMsgPtr();
- net_msg_ptr = msg_ptr.get();
- DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
-
- output_links.clear();
- output_link_destinations.clear();
- NetDest msg_dsts = net_msg_ptr->getDestination();
-
- // Unfortunately, the token-protocol sends some
- // zero-destination messages, so this assert isn't valid
- // assert(msg_dsts.count() > 0);
-
- assert(m_link_order.size() == m_routing_table.size());
- assert(m_link_order.size() == m_out.size());
-
- if (m_network_ptr->getAdaptiveRouting()) {
- if (m_network_ptr->isVNetOrdered(vnet)) {
- // Don't adaptively route
- for (int out = 0; out < m_out.size(); out++) {
- m_link_order[out].m_link = out;
- m_link_order[out].m_value = 0;
- }
- } else {
- // Find how clogged each link is
- for (int out = 0; out < m_out.size(); out++) {
- int out_queue_length = 0;
- for (int v = 0; v < m_virtual_networks; v++) {
- out_queue_length += m_out[out][v]->getSize();
+ while (buffer->isReady()) {
+ DPRINTF(RubyNetwork, "incoming: %d\n", incoming);
+
+ // Peek at message
+ msg_ptr = buffer->peekMsgPtr();
+ net_msg_ptr = msg_ptr.get();
+ DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
+
+ output_links.clear();
+ output_link_destinations.clear();
+ NetDest msg_dsts = net_msg_ptr->getDestination();
+
+ // Unfortunately, the token-protocol sends some
+ // zero-destination messages, so this assert isn't valid
+ // assert(msg_dsts.count() > 0);
+
+ assert(m_link_order.size() == m_routing_table.size());
+ assert(m_link_order.size() == m_out.size());
+
+ if (m_network_ptr->getAdaptiveRouting()) {
+ if (m_network_ptr->isVNetOrdered(vnet)) {
+ // Don't adaptively route
+ for (int out = 0; out < m_out.size(); out++) {
+ m_link_order[out].m_link = out;
+ m_link_order[out].m_value = 0;
+ }
+ } else {
+ // Find how clogged each link is
+ for (int out = 0; out < m_out.size(); out++) {
+ int out_queue_length = 0;
+ for (int v = 0; v < m_virtual_networks; v++) {
+ out_queue_length += m_out[out][v]->getSize();
+ }
+ int value =
+ (out_queue_length << 8) |
+ random_mt.random(0, 0xff);
+ m_link_order[out].m_link = out;
+ m_link_order[out].m_value = value;
+ }
+
+ // Look at the most empty link first
+ sort(m_link_order.begin(), m_link_order.end());
}
- int value =
- (out_queue_length << 8) |
- random_mt.random(0, 0xff);
- m_link_order[out].m_link = out;
- m_link_order[out].m_value = value;
}
- // Look at the most empty link first
- sort(m_link_order.begin(), m_link_order.end());
- }
- }
+ for (int i = 0; i < m_routing_table.size(); i++) {
+ // pick the next link to look at
+ int link = m_link_order[i].m_link;
+ NetDest dst = m_routing_table[link];
+ DPRINTF(RubyNetwork, "dst: %s\n", dst);
- for (int i = 0; i < m_routing_table.size(); i++) {
- // pick the next link to look at
- int link = m_link_order[i].m_link;
- NetDest dst = m_routing_table[link];
- DPRINTF(RubyNetwork, "dst: %s\n", dst);
+ if (!msg_dsts.intersectionIsNotEmpty(dst))
+ continue;
- if (!msg_dsts.intersectionIsNotEmpty(dst))
- continue;
+ // Remember what link we're using
+ output_links.push_back(link);
- // Remember what link we're using
- output_links.push_back(link);
+ // Need to remember which destinations need this message in
+ // another vector. This Set is the intersection of the
+ // routing_table entry and the current destination set. The
+ // intersection must not be empty, since we are inside "if"
+ output_link_destinations.push_back(msg_dsts.AND(dst));
- // Need to remember which destinations need this message in
- // another vector. This Set is the intersection of the
- // routing_table entry and the current destination set. The
- // intersection must not be empty, since we are inside "if"
- output_link_destinations.push_back(msg_dsts.AND(dst));
-
- // Next, we update the msg_destination not to include
- // those nodes that were already handled by this link
- msg_dsts.removeNetDest(dst);
- }
+ // Next, we update the msg_destination not to include
+ // those nodes that were already handled by this link
+ msg_dsts.removeNetDest(dst);
+ }
- assert(msg_dsts.count() == 0);
+ assert(msg_dsts.count() == 0);
- // Check for resources - for all outgoing queues
- bool enough = true;
- for (int i = 0; i < output_links.size(); i++) {
- int outgoing = output_links[i];
+ // Check for resources - for all outgoing queues
+ bool enough = true;
+ for (int i = 0; i < output_links.size(); i++) {
+ int outgoing = output_links[i];
- if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
- enough = false;
+ if (!m_out[outgoing][vnet]->areNSlotsAvailable(1))
+ enough = false;
- DPRINTF(RubyNetwork, "Checking if node is blocked ..."
- "outgoing: %d, vnet: %d, enough: %d\n",
- outgoing, vnet, enough);
- }
+ DPRINTF(RubyNetwork, "Checking if node is blocked ..."
+ "outgoing: %d, vnet: %d, enough: %d\n",
+ outgoing, vnet, enough);
+ }
- // There were not enough resources
- if (!enough) {
- scheduleEvent(Cycles(1));
- DPRINTF(RubyNetwork, "Can't deliver message since a node "
- "is blocked\n");
- DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
- break; // go to next incoming port
- }
+ // There were not enough resources
+ if (!enough) {
+ scheduleEvent(Cycles(1));
+ DPRINTF(RubyNetwork, "Can't deliver message since a node "
+ "is blocked\n");
+ DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
+ break; // go to next incoming port
+ }
- MsgPtr unmodified_msg_ptr;
+ MsgPtr unmodified_msg_ptr;
- if (output_links.size() > 1) {
- // If we are sending this message down more than one link
- // (size>1), we need to make a copy of the message so each
- // branch can have a different internal destination we need
- // to create an unmodified MsgPtr because the MessageBuffer
- // enqueue func will modify the message
+ if (output_links.size() > 1) {
+ // If we are sending this message down more than one link
+ // (size>1), we need to make a copy of the message so each
+ // branch can have a different internal destination we need
+ // to create an unmodified MsgPtr because the MessageBuffer
+ // enqueue func will modify the message
- // This magic line creates a private copy of the message
- unmodified_msg_ptr = msg_ptr->clone();
- }
+ // This magic line creates a private copy of the message
+ unmodified_msg_ptr = msg_ptr->clone();
+ }
- // Dequeue msg
- buffer->dequeue();
- m_pending_message_count[vnet]--;
+ // Dequeue msg
+ buffer->dequeue();
+ m_pending_message_count[vnet]--;
- // Enqueue it - for all outgoing queues
- for (int i=0; i<output_links.size(); i++) {
- int outgoing = output_links[i];
+ // Enqueue it - for all outgoing queues
+ for (int i=0; i<output_links.size(); i++) {
+ int outgoing = output_links[i];
- if (i > 0) {
- // create a private copy of the unmodified message
- msg_ptr = unmodified_msg_ptr->clone();
- }
+ if (i > 0) {
+ // create a private copy of the unmodified message
+ msg_ptr = unmodified_msg_ptr->clone();
+ }
- // Change the internal destination set of the message so it
- // knows which destinations this link is responsible for.
- net_msg_ptr = msg_ptr.get();
- net_msg_ptr->getDestination() = output_link_destinations[i];
+ // Change the internal destination set of the message so it
+ // knows which destinations this link is responsible for.
+ net_msg_ptr = msg_ptr.get();
+ net_msg_ptr->getDestination() =
+ output_link_destinations[i];
- // Enqeue msg
- DPRINTF(RubyNetwork, "Enqueuing net msg from "
- "inport[%d][%d] to outport [%d][%d].\n",
- incoming, vnet, outgoing, vnet);
+ // Enqeue msg
+ DPRINTF(RubyNetwork, "Enqueuing net msg from "
+ "inport[%d][%d] to outport [%d][%d].\n",
+ incoming, vnet, outgoing, vnet);
- m_out[outgoing][vnet]->enqueue(msg_ptr);
+ m_out[outgoing][vnet]->enqueue(msg_ptr);
+ }
+ }
}
}
}
PerfectSwitch& operator=(const PerfectSwitch& obj);
void operateVnet(int vnet);
- void operateMessageBuffer(MessageBuffer *b, int incoming, int vnet);
- const SwitchID m_switch_id;
- Switch * const m_switch;
+ SwitchID m_switch_id;
// vector of queues from the components
std::vector<std::vector<MessageBuffer*> > m_in;
#include "mem/ruby/network/simple/Switch.hh"
#include "mem/ruby/network/simple/Throttle.hh"
#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/system/System.hh"
using namespace std;
using m5::stl_helpers::deletePointers;
SimpleNetwork::SimpleNetwork(const Params *p)
- : Network(p), m_buffer_size(p->buffer_size),
- m_endpoint_bandwidth(p->endpoint_bandwidth),
- m_adaptive_routing(p->adaptive_routing)
+ : Network(p)
{
+ m_buffer_size = p->buffer_size;
+ m_endpoint_bandwidth = p->endpoint_bandwidth;
+ m_adaptive_routing = p->adaptive_routing;
+
+ // Note: the parent Network Object constructor is called before the
+ // SimpleNetwork child constructor. Therefore, the member variables
+ // used below should already be initialized.
+ m_endpoint_switches.resize(m_nodes);
+
// record the routers
for (vector<BasicRouter*>::const_iterator i = p->routers.begin();
i != p->routers.end(); ++i) {
m_switches[src]->addOutPort(m_fromNetQueues[dest], routing_table_entry,
simple_link->m_latency,
simple_link->m_bw_multiplier);
+
+ m_endpoint_switches[dest] = m_switches[src];
}
// From an endpoint node to a switch
}
}
+ for (unsigned int i = 0; i < m_int_link_buffers.size(); ++i) {
+ if (m_int_link_buffers[i]->functionalRead(pkt)) {
+ return true;
+ }
+ }
+
return false;
}
std::vector<Switch*> m_switches;
std::vector<MessageBuffer*> m_int_link_buffers;
int m_num_connected_buffers;
- const int m_buffer_size;
- const int m_endpoint_bandwidth;
- const bool m_adaptive_routing;
+ std::vector<Switch*> m_endpoint_switches;
+
+ int m_buffer_size;
+ int m_endpoint_bandwidth;
+ bool m_adaptive_routing;
//Statistical variables
Stats::Formula m_msg_counts[MessageSizeType_NUM];
bool
Switch::functionalRead(Packet *pkt)
{
+ // Access the buffers in the switch for performing a functional read
+ for (unsigned int i = 0; i < m_port_buffers.size(); ++i) {
+ if (m_port_buffers[i]->functionalRead(pkt)) {
+ return true;
+ }
+ }
return false;
}
#include "base/cast.hh"
#include "base/cprintf.hh"
#include "debug/RubyNetwork.hh"
-#include "mem/ruby/network/simple/Switch.hh"
#include "mem/ruby/network/simple/Throttle.hh"
#include "mem/ruby/network/MessageBuffer.hh"
#include "mem/ruby/network/Network.hh"
Throttle::Throttle(int sID, RubySystem *rs, NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth,
- Switch *em)
- : Consumer(em), m_switch_id(sID), m_switch(em), m_node(node),
- m_ruby_system(rs)
+ ClockedObject *em)
+ : Consumer(em), m_ruby_system(rs)
{
+ init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth);
+ m_sID = sID;
+}
+
+Throttle::Throttle(RubySystem *rs, NodeID node, Cycles link_latency,
+ int link_bandwidth_multiplier, int endpoint_bandwidth,
+ ClockedObject *em)
+ : Consumer(em), m_ruby_system(rs)
+{
+ init(node, link_latency, link_bandwidth_multiplier, endpoint_bandwidth);
+ m_sID = 0;
+}
+
+void
+Throttle::init(NodeID node, Cycles link_latency,
+ int link_bandwidth_multiplier, int endpoint_bandwidth)
+{
+ m_node = node;
m_vnets = 0;
assert(link_bandwidth_multiplier > 0);
// Set consumer and description
in_ptr->setConsumer(this);
- string desc = "[Queue to Throttle " + to_string(m_switch_id) + " " +
+ string desc = "[Queue to Throttle " + to_string(m_sID) + " " +
to_string(m_node) + "]";
}
}
#include "mem/ruby/system/System.hh"
class MessageBuffer;
-class Switch;
class Throttle : public Consumer
{
public:
Throttle(int sID, RubySystem *rs, NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth,
- Switch *em);
+ ClockedObject *em);
+ Throttle(RubySystem *rs, NodeID node, Cycles link_latency,
+ int link_bandwidth_multiplier, int endpoint_bandwidth,
+ ClockedObject *em);
~Throttle() {}
std::string name()
- { return csprintf("Throttle-%i", m_switch_id); }
+ { return csprintf("Throttle-%i", m_sID); }
void addLinks(const std::vector<MessageBuffer*>& in_vec,
const std::vector<MessageBuffer*>& out_vec);
unsigned int m_vnets;
std::vector<int> m_units_remaining;
- const int m_switch_id;
- Switch *m_switch;
+ int m_sID;
NodeID m_node;
-
int m_link_bandwidth_multiplier;
Cycles m_link_latency;
int m_wakeups_wo_switch;
private:
Addr m_addr;
- uint64_t m_loads;
- uint64_t m_stores;
- uint64_t m_atomics;
- uint64_t m_total;
- uint64_t m_user;
- uint64_t m_sharing;
+ uint64 m_loads;
+ uint64 m_stores;
+ uint64 m_atomics;
+ uint64 m_total;
+ uint64 m_user;
+ uint64 m_sharing;
Set m_touched_by;
Histogram* m_histogram_ptr;
};
{
const int records_printed = 100;
- uint64_t misses = 0;
+ uint64 misses = 0;
std::vector<const AccessTraceForAddress *> sorted;
AddressMap::const_iterator i = record_map.begin();
Histogram all_records_log(-1);
// Allows us to track how many lines where touched by n processors
- std::vector<int64_t> m_touched_vec;
- std::vector<int64_t> m_touched_weighted_vec;
+ std::vector<int64> m_touched_vec;
+ std::vector<int64> m_touched_weighted_vec;
m_touched_vec.resize(num_of_sequencers+1);
m_touched_weighted_vec.resize(num_of_sequencers+1);
for (int j = 0; j < m_touched_vec.size(); j++) {
AddressProfiler(const AddressProfiler& obj);
AddressProfiler& operator=(const AddressProfiler& obj);
- int64_t m_sharing_miss_counter;
+ int64 m_sharing_miss_counter;
AddressMap m_dataAccessTrace;
AddressMap m_macroBlockAccessTrace;
using m5::stl_helpers::operator<<;
Profiler::Profiler(const RubySystemParams *p, RubySystem *rs)
- : m_ruby_system(rs), m_hot_lines(p->hot_lines),
- m_all_instructions(p->all_instructions),
- m_num_vnets(p->number_of_virtual_networks)
+ : m_ruby_system(rs)
{
+ m_hot_lines = p->hot_lines;
+ m_all_instructions = p->all_instructions;
+
m_address_profiler_ptr = new AddressProfiler(p->num_of_sequencers, this);
m_address_profiler_ptr->setHotLines(m_hot_lines);
m_address_profiler_ptr->setAllInstructions(m_all_instructions);
.desc("delay histogram for all message")
.flags(Stats::nozero | Stats::pdf | Stats::oneline);
- for (int i = 0; i < m_num_vnets; i++) {
+ uint32_t numVNets = Network::getNumberOfVirtualNetworks();
+ for (int i = 0; i < numVNets; i++) {
delayVCHistogram.push_back(new Stats::Histogram());
delayVCHistogram[i]
->init(10)
m_inst_profiler_ptr->collateStats();
}
+ uint32_t numVNets = Network::getNumberOfVirtualNetworks();
for (uint32_t i = 0; i < MachineType_NUM; i++) {
for (map<uint32_t, AbstractController*>::iterator it =
m_ruby_system->m_abstract_controls[i].begin();
AbstractController *ctr = (*it).second;
delayHistogram.add(ctr->getDelayHist());
- for (uint32_t i = 0; i < m_num_vnets; i++) {
+ for (uint32_t i = 0; i < numVNets; i++) {
delayVCHistogram[i]->add(ctr->getDelayVCHist(i));
}
}
void addAddressTraceSample(const RubyRequest& msg, NodeID id);
// added by SS
- bool getHotLines() const { return m_hot_lines; }
- bool getAllInstructions() const { return m_all_instructions; }
+ bool getHotLines() { return m_hot_lines; }
+ bool getAllInstructions() { return m_all_instructions; }
private:
// Private copy constructor and assignment operator
Stats::Scalar m_IncompleteTimes[MachineType_NUM];
//added by SS
- const bool m_hot_lines;
- const bool m_all_instructions;
- const uint32_t m_num_vnets;
+ bool m_hot_lines;
+ bool m_all_instructions;
};
#endif // __MEM_RUBY_PROFILER_PROFILER_HH__
bool StoreTrace::s_init = false; // Total number of store lifetimes of
// all lines
-int64_t StoreTrace::s_total_samples = 0; // Total number of store
+int64 StoreTrace::s_total_samples = 0; // Total number of store
// lifetimes of all lines
Histogram* StoreTrace::s_store_count_ptr = NULL;
Histogram* StoreTrace::s_store_first_to_stolen_ptr = NULL;
private:
static bool s_init;
- static int64_t s_total_samples; // Total number of store lifetimes
+ static int64 s_total_samples; // Total number of store lifetimes
// of all lines
static Histogram* s_store_count_ptr;
static Histogram* s_store_first_to_stolen_ptr;
Tick m_last_store;
int m_stores_this_interval;
- int64_t m_total_samples; // Total number of store lifetimes of this line
+ int64 m_total_samples; // Total number of store lifetimes of this line
Histogram m_store_count;
Histogram m_store_first_to_stolen;
Histogram m_store_last_to_stolen;
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
-#include "base/trace.hh"
-#include "debug/RubyCache.hh"
-
AbstractCacheEntry::AbstractCacheEntry()
{
m_Permission = AccessPermission_NotPresent;
m_locked = -1;
}
}
-
-void
-AbstractCacheEntry::setLocked(int context)
-{
- DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", m_Address, context);
- m_locked = context;
-}
-
-void
-AbstractCacheEntry::clearLocked()
-{
- DPRINTF(RubyCache, "Clear Lock for addr: %x\n", m_Address);
- m_locked = -1;
-}
-
-bool
-AbstractCacheEntry::isLocked(int context) const
-{
- DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
- m_Address, m_locked, context);
- return m_locked == context;
-}
virtual DataBlock& getDataBlk()
{ panic("getDataBlk() not implemented!"); }
- // Functions for locking and unlocking the cache entry. These are required
- // for supporting atomic memory accesses.
- void setLocked(int context);
- void clearLocked();
- bool isLocked(int context) const;
- void setSetIndex(uint32_t s) { m_set_index = s; }
- uint32_t getSetIndex() const { return m_set_index; }
-
- void setWayIndex(uint32_t s) { m_way_index = s; }
- uint32_t getWayIndex() const { return m_way_index; }
-
- // Address of this block, required by CacheMemory
- Addr m_Address;
- // Holds info whether the address is locked.
- // Required for implementing LL/SC operations.
- int m_locked;
-
- private:
- // Set and way coordinates of the entry within the cache memory object.
- uint32_t m_set_index;
- uint32_t m_way_index;
+ Addr m_Address; // Address of this block, required by CacheMemory
+ int m_locked; // Holds info whether the address is locked,
+ // required for implementing LL/SC
};
inline std::ostream&
void wakeUpAllBuffers();
protected:
- const NodeID m_version;
+ NodeID m_version;
MachineID m_machineID;
- const NodeID m_clusterID;
+ NodeID m_clusterID;
// MasterID used by some components of gem5.
- const MasterID m_masterId;
+ MasterID m_masterId;
- Network *m_net_ptr;
+ Network* m_net_ptr;
bool m_is_blocking;
std::map<Addr, MessageBuffer*> m_block_map;
unsigned int m_in_ports;
unsigned int m_cur_in_port;
- const int m_number_of_TBEs;
- const int m_transitions_per_cycle;
- const unsigned int m_buffer_size;
+ int m_number_of_TBEs;
+ int m_transitions_per_cycle;
+ unsigned int m_buffer_size;
Cycles m_recycle_latency;
//! Counter for the number of cycles when the transitions carried out
}
Tick
-AbstractReplacementPolicy::getLastAccess(int64_t set, int64_t way)
+AbstractReplacementPolicy::getLastAccess(int64 set, int64 way)
{
return m_last_ref_ptr[set][way];
}
virtual ~AbstractReplacementPolicy();
/* touch a block. a.k.a. update timestamp */
- virtual void touch(int64_t set, int64_t way, Tick time) = 0;
+ virtual void touch(int64 set, int64 way, Tick time) = 0;
/* returns the way to replace */
- virtual int64_t getVictim(int64_t set) const = 0;
+ virtual int64 getVictim(int64 set) const = 0;
/* get the time of the last access */
- Tick getLastAccess(int64_t set, int64_t way);
+ Tick getLastAccess(int64 set, int64 way);
virtual bool useOccupancy() const { return false; }
}
bool
-BankedArray::tryAccess(int64_t idx)
+BankedArray::tryAccess(int64 idx)
{
if (accessLatency == 0)
return true;
}
void
-BankedArray::reserve(int64_t idx)
+BankedArray::reserve(int64 idx)
{
if (accessLatency == 0)
return;
}
unsigned int
-BankedArray::mapIndexToBank(int64_t idx)
+BankedArray::mapIndexToBank(int64 idx)
{
if (banks == 1) {
return 0;
{
public:
AccessRecord() : idx(0), startAccess(0), endAccess(0) {}
- int64_t idx;
+ int64 idx;
Tick startAccess;
Tick endAccess;
};
// otherwise, schedule the event and wait for it to complete
std::vector<AccessRecord> busyBanks;
- unsigned int mapIndexToBank(int64_t idx);
+ unsigned int mapIndexToBank(int64 idx);
public:
BankedArray(unsigned int banks, Cycles accessLatency,
// Note: We try the access based on the cache index, not the address
// This is so we don't get aliasing on blocks being replaced
- bool tryAccess(int64_t idx);
+ bool tryAccess(int64 idx);
- void reserve(int64_t idx);
+ void reserve(int64 idx);
Cycles getLatency() const { return accessLatency; }
};
}
// convert a Address to its location in the cache
-int64_t
+int64
CacheMemory::addressToCacheSet(Addr address) const
{
assert(address == makeLineAddress(address));
// Given a cache index: returns the index of the tag in a set.
// returns -1 if the tag is not found.
int
-CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const
+CacheMemory::findTagInSet(int64 cacheSet, Addr tag) const
{
assert(tag == makeLineAddress(tag));
// search the set for the tags
// Given a cache index: returns the index of the tag in a set.
// returns -1 if the tag is not found.
int
-CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet,
+CacheMemory::findTagInSetIgnorePermissions(int64 cacheSet,
Addr tag) const
{
assert(tag == makeLineAddress(tag));
return entry->m_Address;
}
+bool
+CacheMemory::tryCacheAccess(Addr address, RubyRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == makeLineAddress(address));
+ DPRINTF(RubyCache, "address: %s\n", address);
+ int64 cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if (loc != -1) {
+ // Do we even have a tag match?
+ AbstractCacheEntry* entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
+ data_ptr = &(entry->getDataBlk());
+
+ if (entry->m_Permission == AccessPermission_Read_Write) {
+ return true;
+ }
+ if ((entry->m_Permission == AccessPermission_Read_Only) &&
+ (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
+ return true;
+ }
+ // The line must not be accessible
+ }
+ data_ptr = NULL;
+ return false;
+}
+
+bool
+CacheMemory::testCacheAccess(Addr address, RubyRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == makeLineAddress(address));
+ DPRINTF(RubyCache, "address: %s\n", address);
+ int64 cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+
+ if (loc != -1) {
+ // Do we even have a tag match?
+ AbstractCacheEntry* entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
+ data_ptr = &(entry->getDataBlk());
+
+ return m_cache[cacheSet][loc]->m_Permission !=
+ AccessPermission_NotPresent;
+ }
+
+ data_ptr = NULL;
+ return false;
+}
+
// tests to see if an address is present in the cache
bool
CacheMemory::isTagPresent(Addr address) const
{
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc == -1) {
{
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
for (int i = 0; i < m_cache_assoc; i++) {
AbstractCacheEntry* entry = m_cache[cacheSet][i];
}
AbstractCacheEntry*
-CacheMemory::allocate(Addr address, AbstractCacheEntry *entry, bool touch)
+CacheMemory::allocate(Addr address, AbstractCacheEntry* entry, bool touch)
{
assert(address == makeLineAddress(address));
assert(!isTagPresent(address));
DPRINTF(RubyCache, "address: %s\n", address);
// Find the first open slot
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
for (int i = 0; i < m_cache_assoc; i++) {
if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
address);
set[i]->m_locked = -1;
m_tag_index[address] = i;
- entry->setSetIndex(cacheSet);
- entry->setWayIndex(i);
if (touch) {
m_replacementPolicy_ptr->touch(cacheSet, i, curTick());
assert(address == makeLineAddress(address));
assert(isTagPresent(address));
DPRINTF(RubyCache, "address: %s\n", address);
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if (loc != -1) {
delete m_cache[cacheSet][loc];
assert(address == makeLineAddress(address));
assert(!cacheAvail(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
m_Address;
}
CacheMemory::lookup(Addr address)
{
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if(loc == -1) return NULL;
return m_cache[cacheSet][loc];
CacheMemory::lookup(Addr address) const
{
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if(loc == -1) return NULL;
return m_cache[cacheSet][loc];
void
CacheMemory::setMRU(Addr address)
{
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
if(loc != -1)
m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
}
-void
-CacheMemory::setMRU(const AbstractCacheEntry *e)
-{
- uint32_t cacheSet = e->getSetIndex();
- uint32_t loc = e->getWayIndex();
- m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
-}
-
void
CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
{
- uint64_t warmedUpBlocks = 0;
- uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets *
- (uint64_t)m_cache_assoc;
+ uint64 warmedUpBlocks = 0;
+ uint64 totalBlocks M5_VAR_USED = (uint64)m_cache_num_sets
+ * (uint64)m_cache_assoc;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
"recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
- totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0);
+ (uint64)m_cache_num_sets * (uint64)m_cache_assoc,
+ (float(warmedUpBlocks)/float(totalBlocks))*100.0);
}
void
{
DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context);
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
- m_cache[cacheSet][loc]->setLocked(context);
+ m_cache[cacheSet][loc]->m_locked = context;
}
void
{
DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address);
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
- m_cache[cacheSet][loc]->clearLocked();
+ m_cache[cacheSet][loc]->m_locked = -1;
}
bool
CacheMemory::isLocked(Addr address, int context)
{
assert(address == makeLineAddress(address));
- int64_t cacheSet = addressToCacheSet(address);
+ int64 cacheSet = addressToCacheSet(address);
int loc = findTagInSet(cacheSet, address);
assert(loc != -1);
DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
address, m_cache[cacheSet][loc]->m_locked, context);
- return m_cache[cacheSet][loc]->isLocked(context);
+ return m_cache[cacheSet][loc]->m_locked == context;
}
void
}
bool
-CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc)
+CacheMemory::isBlockInvalid(int64 cache_set, int64 loc)
{
return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid);
}
bool
-CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc)
+CacheMemory::isBlockNotBusy(int64 cache_set, int64 loc)
{
return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy);
}
void init();
+ // Public Methods
+ // perform a cache access and see if we hit or not. Return true on a hit.
+ bool tryCacheAccess(Addr address, RubyRequestType type,
+ DataBlock*& data_ptr);
+
+ // similar to above, but doesn't require full access check
+ bool testCacheAccess(Addr address, RubyRequestType type,
+ DataBlock*& data_ptr);
+
// tests to see if an address is present in the cache
bool isTagPresent(Addr address) const;
Cycles getTagLatency() const { return tagArray.getLatency(); }
Cycles getDataLatency() const { return dataArray.getLatency(); }
- bool isBlockInvalid(int64_t cache_set, int64_t loc);
- bool isBlockNotBusy(int64_t cache_set, int64_t loc);
+ bool isBlockInvalid(int64 cache_set, int64 loc);
+ bool isBlockNotBusy(int64 cache_set, int64 loc);
// Hook for checkpointing the contents of the cache
void recordCacheContents(int cntrl, CacheRecorder* tr) const;
// Set this address to most recently used
void setMRU(Addr address);
- // Set this entry to most recently used
- void setMRU(const AbstractCacheEntry *e);
-
- // Functions for locking and unlocking cache lines corresponding to the
- // provided address. These are required for supporting atomic memory
- // accesses. These are to be used when only the address of the cache entry
- // is available. In case the entry itself is available. use the functions
- // provided by the AbstractCacheEntry class.
+
void setLocked (Addr addr, int context);
void clearLocked (Addr addr);
bool isLocked (Addr addr, int context);
private:
// convert a Address to its location in the cache
- int64_t addressToCacheSet(Addr address) const;
+ int64 addressToCacheSet(Addr address) const;
// Given a cache tag: returns the index of the tag in a set.
// returns -1 if the tag is not found.
- int findTagInSet(int64_t line, Addr tag) const;
- int findTagInSetIgnorePermissions(int64_t cacheSet, Addr tag) const;
+ int findTagInSet(int64 line, Addr tag) const;
+ int findTagInSetIgnorePermissions(int64 cacheSet, Addr tag) const;
// Private copy constructor and assignment operator
CacheMemory(const CacheMemory& obj);
int DirectoryMemory::m_num_directories = 0;
int DirectoryMemory::m_num_directories_bits = 0;
+uint64_t DirectoryMemory::m_total_size_bytes = 0;
int DirectoryMemory::m_numa_high_bit = 0;
DirectoryMemory::DirectoryMemory(const Params *p)
m_num_directories++;
m_num_directories_bits = ceilLog2(m_num_directories);
+ m_total_size_bytes += m_size_bytes;
if (m_numa_high_bit == 0) {
m_numa_high_bit = RubySystem::getMemorySizeBits() - 1;
static int m_num_directories;
static int m_num_directories_bits;
+ static uint64_t m_total_size_bytes;
static int m_numa_high_bit;
};
void
-LRUPolicy::touch(int64_t set, int64_t index, Tick time)
+LRUPolicy::touch(int64 set, int64 index, Tick time)
{
assert(index >= 0 && index < m_assoc);
assert(set >= 0 && set < m_num_sets);
m_last_ref_ptr[set][index] = time;
}
-int64_t
-LRUPolicy::getVictim(int64_t set) const
+int64
+LRUPolicy::getVictim(int64 set) const
{
Tick time, smallest_time;
- int64_t smallest_index;
+ int64 smallest_index;
smallest_index = 0;
smallest_time = m_last_ref_ptr[set][0];
LRUPolicy(const Params * p);
~LRUPolicy();
- void touch(int64_t set, int64_t way, Tick time);
- int64_t getVictim(int64_t set) const;
+ void touch(int64 set, int64 way, Tick time);
+ int64 getVictim(int64 set) const;
};
#endif // __MEM_RUBY_STRUCTURES_LRUPOLICY_HH__
// associativity cannot exceed capacity of tree representation
assert(m_num_sets > 0 &&
m_assoc > 1 &&
- m_assoc <= (int64_t) sizeof(uint64_t)*4);
+ m_assoc <= (int64) sizeof(uint64)*4);
m_trees = NULL;
m_num_levels = 0;
m_num_levels++;
}
assert(m_num_levels < sizeof(unsigned int)*4);
- m_trees = new uint64_t[m_num_sets];
+ m_trees = new uint64[m_num_sets];
for (unsigned i = 0; i < m_num_sets; i++) {
m_trees[i] = 0;
}
}
void
-PseudoLRUPolicy::touch(int64_t set, int64_t index, Tick time)
+PseudoLRUPolicy::touch(int64 set, int64 index, Tick time)
{
assert(index >= 0 && index < m_assoc);
assert(set >= 0 && set < m_num_sets);
m_last_ref_ptr[set][index] = time;
}
-int64_t
-PseudoLRUPolicy::getVictim(int64_t set) const
+int64
+PseudoLRUPolicy::getVictim(int64 set) const
{
- int64_t index = 0;
+ int64 index = 0;
int tree_index = 0;
int node_val;
PseudoLRUPolicy(const Params * p);
~PseudoLRUPolicy();
- void touch(int64_t set, int64_t way, Tick time);
- int64_t getVictim(int64_t set) const;
+ void touch(int64 set, int64 way, Tick time);
+ int64 getVictim(int64 set) const;
private:
unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */
unsigned int m_num_levels; /** number of levels in the tree */
- uint64_t *m_trees; /** bit representation of the
+ uint64* m_trees; /** bit representation of the
* trees, one for each set */
};
RubyMemoryControl::init()
{
m_msg_counter = 0;
- assert(m_tFaw <= 62); // must fit in a uint64_t shift register
+ assert(m_tFaw <= 62); // must fit in a uint64 shift register
m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
// m_tfaw_count keeps track of how many 1 bits are set
// in each shift register. When m_tfaw_count is >= 4,
// new activates are not allowed.
- m_tfaw_shift = new uint64_t[m_total_ranks];
+ m_tfaw_shift = new uint64[m_total_ranks];
m_tfaw_count = new int[m_total_ranks];
for (int i = 0; i < m_total_ranks; i++) {
m_tfaw_shift[i] = 0;
{
m_msg_counter = 0;
- assert(m_tFaw <= 62); // must fit in a uint64_t shift register
+ assert(m_tFaw <= 62); // must fit in a uint64 shift register
m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
// Each entry indicates number of address-bus cycles until bank
// is reschedulable:
- int *m_bankBusyCounter;
- int *m_oldRequest;
+ int* m_bankBusyCounter;
+ int* m_oldRequest;
- uint64_t *m_tfaw_shift;
- int *m_tfaw_count;
+ uint64* m_tfaw_shift;
+ int* m_tfaw_count;
// Each of these indicates number of address-bus cycles until
// we can issue a new request of the corresponding type:
int m_ageCounter; // age of old requests; to detect starvation
int m_idleCount; // watchdog timer for shutting down
- MemCntrlProfiler *m_profiler_ptr;
+ MemCntrlProfiler* m_profiler_ptr;
class MemCntrlEvent : public Event
{
public:
- MemCntrlEvent(RubyMemoryControl *_mem_cntrl)
+ MemCntrlEvent(RubyMemoryControl* _mem_cntrl)
{
mem_cntrl = _mem_cntrl;
}
m_seq_map(seq_map), m_bytes_read(0), m_records_read(0),
m_records_flushed(0), m_block_size_bytes(block_size_bytes)
{
+ if (m_uncompressed_trace != NULL) {
+ if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) {
+ // Block sizes larger than when the trace was recorded are not
+ // supported, as we cannot reliably turn accesses to smaller blocks
+ // into larger ones.
+ panic("Recorded cache block size (%d) < current block size (%d) !!",
+ m_block_size_bytes, RubySystem::getBlockSizeBytes());
+ }
+ }
}
CacheRecorder::~CacheRecorder()
m_records.push_back(rec);
}
-uint64_t
-CacheRecorder::aggregateRecords(uint8_t **buf, uint64_t total_size)
+uint64
+CacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size)
{
std::sort(m_records.begin(), m_records.end(), compareTraceRecords);
int size = m_records.size();
- uint64_t current_size = 0;
+ uint64 current_size = 0;
int record_size = sizeof(TraceRecord) + m_block_size_bytes;
for (int i = 0; i < size; ++i) {
void addRecord(int cntrl, Addr data_addr, Addr pc_addr,
RubyRequestType type, Tick time, DataBlock& data);
- uint64_t aggregateRecords(uint8_t **data, uint64_t size);
+ uint64 aggregateRecords(uint8_t** data, uint64 size);
/*!
* Function for flushing the memory contents of the caches to the
class RubySystem(ClockedObject):
type = 'RubySystem'
cxx_header = "mem/ruby/system/System.hh"
+ random_seed = Param.Int(1234, "random seed used by the simulation");
randomization = Param.Bool(False,
"insert random delays on message enqueue times");
block_size_bytes = Param.UInt32(64,
memory_size_bits = Param.UInt32(64,
"number of bits that a memory address requires");
- phys_mem = Param.SimpleMemory(NULL, "")
-
- access_backing_store = Param.Bool(False, "Use phys_mem as the functional \
- store and only use ruby for timing.")
-
# Profiler related configuration variables
hot_lines = Param.Bool(False, "")
all_instructions = Param.Bool(False, "")
num_of_sequencers = Param.Int("")
- number_of_virtual_networks = Param.Unsigned("")
+ phys_mem = Param.SimpleMemory(NULL, "")
+
+ access_backing_store = Param.Bool(False, "Use phys_mem as the functional \
+ store and only use ruby for timing.")
void
Sequencer::invalidateSC(Addr address)
{
- AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
- // The controller has lost the coherence permissions, hence the lock
- // on the cache line maintained by the cache should be cleared.
- if (e && e->isLocked(m_version)) {
- e->clearLocked();
+ RequestTable::iterator i = m_writeRequestTable.find(address);
+ if (i != m_writeRequestTable.end()) {
+ SequencerRequest* request = i->second;
+ // The controller has lost the coherence permissions, hence the lock
+ // on the cache line maintained by the cache should be cleared.
+ if (request->m_type == RubyRequestType_Store_Conditional) {
+ m_dataCache_ptr->clearLocked(address);
+ }
}
}
bool
Sequencer::handleLlsc(Addr address, SequencerRequest* request)
{
- AbstractCacheEntry *e = m_dataCache_ptr->lookup(address);
- if (!e)
- return true;
-
+ //
// The success flag indicates whether the LLSC operation was successful.
// LL ops will always succeed, but SC may fail if the cache line is no
// longer locked.
+ //
bool success = true;
if (request->m_type == RubyRequestType_Store_Conditional) {
- if (!e->isLocked(m_version)) {
+ if (!m_dataCache_ptr->isLocked(address, m_version)) {
//
// For failed SC requests, indicate the failure to the cpu by
// setting the extra data to zero.
//
// Independent of success, all SC operations must clear the lock
//
- e->clearLocked();
+ m_dataCache_ptr->clearLocked(address);
} else if (request->m_type == RubyRequestType_Load_Linked) {
//
// Note: To fully follow Alpha LLSC semantics, should the LL clear any
// previously locked cache lines?
//
- e->setLocked(m_version);
- } else if (e->isLocked(m_version)) {
+ m_dataCache_ptr->setLocked(address, m_version);
+ } else if ((m_dataCache_ptr->isTagPresent(address)) &&
+ (m_dataCache_ptr->isLocked(address, m_version))) {
//
// Normal writes should clear the locked address
//
- e->clearLocked();
+ m_dataCache_ptr->clearLocked(address);
}
return success;
}
const Cycles forwardRequestTime,
const Cycles firstResponseTime)
{
- warn_once("Replacement policy updates recently became the responsibility "
- "of SLICC state machines. Make sure to setMRU() near callbacks "
- "in .sm files!");
-
PacketPtr pkt = srequest->pkt;
Addr request_address(pkt->getAddr());
+ Addr request_line_address = makeLineAddress(pkt->getAddr());
RubyRequestType type = srequest->m_type;
Cycles issued_time = srequest->issue_time;
+ // Set this cache entry to the most recently used
+ if (type == RubyRequestType_IFETCH) {
+ m_instCache_ptr->setMRU(request_line_address);
+ } else {
+ m_dataCache_ptr->setMRU(request_line_address);
+ }
+
assert(curCycle() >= issued_time);
Cycles total_latency = curCycle() - issued_time;
using namespace std;
+int RubySystem::m_random_seed;
bool RubySystem::m_randomization;
uint32_t RubySystem::m_block_size_bytes;
uint32_t RubySystem::m_block_size_bits;
: ClockedObject(p), m_access_backing_store(p->access_backing_store),
m_cache_recorder(NULL)
{
+ m_random_seed = p->random_seed;
+ srandom(m_random_seed);
m_randomization = p->randomization;
m_block_size_bytes = p->block_size_bytes;
void
RubySystem::makeCacheRecorder(uint8_t *uncompressed_trace,
- uint64_t cache_trace_size,
- uint64_t block_size_bytes)
+ uint64 cache_trace_size,
+ uint64 block_size_bytes)
{
vector<Sequencer*> sequencer_map;
Sequencer* sequencer_ptr = NULL;
void
RubySystem::writeCompressedTrace(uint8_t *raw_data, string filename,
- uint64_t uncompressed_trace_size)
+ uint64 uncompressed_trace_size)
{
// Create the checkpoint file for the memory
string thefile = CheckpointIn::dir() + "/" + filename.c_str();
// Store the cache-block size, so we are able to restore on systems with a
// different cache-block size. CacheRecorder depends on the correct
// cache-block size upon unserializing.
- uint64_t block_size_bytes = getBlockSizeBytes();
+ uint64 block_size_bytes = getBlockSizeBytes();
SERIALIZE_SCALAR(block_size_bytes);
// Check that there's a valid trace to use. If not, then memory won't be
// Aggregate the trace entries together into a single array
uint8_t *raw_data = new uint8_t[4096];
- uint64_t cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data,
+ uint64 cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data,
4096);
string cache_trace_file = name() + ".cache.gz";
writeCompressedTrace(raw_data, cache_trace_file, cache_trace_size);
void
RubySystem::readCompressedTrace(string filename, uint8_t *&raw_data,
- uint64_t &uncompressed_trace_size)
+ uint64& uncompressed_trace_size)
{
// Read the trace file
gzFile compressedTrace;
// This value should be set to the checkpoint-system's block-size.
// Optional, as checkpoints without it can be run if the
// checkpoint-system's block-size == current block-size.
- uint64_t block_size_bytes = m_block_size_bytes;
+ uint64 block_size_bytes = getBlockSizeBytes();
UNSERIALIZE_OPT_SCALAR(block_size_bytes);
- if (block_size_bytes < m_block_size_bytes) {
- // Block sizes larger than when the trace was recorded are not
- // supported, as we cannot reliably turn accesses to smaller blocks
- // into larger ones.
- panic("Recorded cache block size (%d) < current block size (%d) !!",
- block_size_bytes, m_block_size_bytes);
- }
-
string cache_trace_file;
- uint64_t cache_trace_size = 0;
+ uint64 cache_trace_size = 0;
UNSERIALIZE_SCALAR(cache_trace_file);
UNSERIALIZE_SCALAR(cache_trace_size);
~RubySystem();
// config accessors
+ static int getRandomSeed() { return m_random_seed; }
static int getRandomization() { return m_randomization; }
static uint32_t getBlockSizeBytes() { return m_block_size_bytes; }
static uint32_t getBlockSizeBits() { return m_block_size_bits; }
RubySystem& operator=(const RubySystem& obj);
void makeCacheRecorder(uint8_t *uncompressed_trace,
- uint64_t cache_trace_size,
- uint64_t block_size_bytes);
+ uint64 cache_trace_size,
+ uint64 block_size_bytes);
void readCompressedTrace(std::string filename,
uint8_t *&raw_data,
- uint64_t &uncompressed_trace_size);
+ uint64& uncompressed_trace_size);
void writeCompressedTrace(uint8_t *raw_data, std::string file,
- uint64_t uncompressed_trace_size);
+ uint64 uncompressed_trace_size);
private:
// configuration parameters
+ static int m_random_seed;
static bool m_randomization;
static uint32_t m_block_size_bytes;
static uint32_t m_block_size_bits;
pairs = { "external" : "yes" }
func = Func(self.symtab, func_id + "_" + t.c_ident,
func_id, self.location,
- self.symtab.find("std::string", Type), [ t ], [], [], "",
+ self.symtab.find("std::string", Type), [ t ], [], "",
pairs)
self.symtab.newSymbol(func)
def generate(self):
type = self.type_ast.type
param = "param_%s" % self.ident
- proto = ""
- body = ""
- default = False
# Add to symbol table
v = Var(self.symtab, self.ident, self.location, type, param,
"interface" in type and (
type["interface"] == "AbstractCacheEntry" or
type["interface"] == "AbstractEntry")):
- proto = "%s* %s" % (type.c_ident, param)
- body = proto
- elif self.default != None:
- value = ""
- if self.default == True:
- value = "true"
- elif self.default == False:
- value = "false"
- else:
- value = "%s" % self.default
- proto = "const %s& %s = %s" % (type.c_ident, param, value)
- body = "const %s& %s" % (type.c_ident, param)
- default = True
+ return type, "%s* %s" % (type.c_ident, param)
else:
- proto = "const %s& %s" % (type.c_ident, param)
- body = proto
-
- return type, proto, body, default
+ return type, "const %s& %s" % (type.c_ident, param)
if func is None:
self.error("Unrecognized function name: '%s'", func_name_args)
- cvec, type_vec = func.checkArguments(self.exprs)
+ if len(self.exprs) != len(func.param_types):
+ self.error("Wrong number of arguments passed to function : '%s'" +\
+ " Expected %d, got %d", self.proc_name,
+ len(func.param_types), len(self.exprs))
+
+ cvec = []
+ type_vec = []
+ for expr,expected_type in zip(self.exprs, func.param_types):
+ # Check the types of the parameter
+ actual_type,param_code = expr.inline(True)
+ if str(actual_type) != 'OOD' and \
+ str(actual_type) != str(expected_type):
+ expr.error("Type mismatch: expected: %s actual: %s" % \
+ (expected_type, actual_type))
+ cvec.append(param_code)
+ type_vec.append(expected_type)
# OK, the semantics of "trigger" here is that, ports in the
# machine have different priorities. We always check the first
def generate(self, parent = None):
types = []
- proto_params = []
- body_params = []
- default_count = 0
+ params = []
void_type = self.symtab.find("void", Type)
# Generate definition code
for formal in self.formals:
# Lookup parameter types
try:
- type, proto, body, default = formal.generate()
+ type, ident = formal.generate()
types.append(type)
- proto_params.append(proto)
- body_params.append(body)
- if default:
- default_count += 1
+ params.append(ident)
except AttributeError:
types.append(formal.type)
- proto_params.append(None)
- body_params.append(None)
+ params.append(None)
body = self.slicc.codeFormatter()
if self.statements is None:
machine = self.state_machine
func = Func(self.symtab, func_name_args, self.ident, self.location,
- return_type, types, proto_params,
- body_params, str(body), self.pairs, default_count)
+ return_type, types, params, str(body), self.pairs)
if parent is not None:
if not parent.addFunc(func):
for param in param_types:
trigger_func_name += "_" + param.ident
func = Func(self.symtab, trigger_func_name, "trigger", self.location,
- void_type, param_types, [], [], "", pairs)
+ void_type, param_types, [], "", pairs)
symtab.newSymbol(func)
# Add the stallPort method - this hacks reschedules the controller
# for stalled messages that don't trigger events
func = Func(self.symtab, "stallPort", "stallPort", self.location,
- void_type, [], [], [], "", pairs)
+ void_type, [], [], "", pairs)
symtab.newSymbol(func)
param_types = []
self.error("Invalid method call: Type '%s' does not have a method '%s'",
obj_type, methodId)
- func = obj_type.methods[methodId]
- func.checkArguments(self.expr_ast_vec)
+ if len(self.expr_ast_vec) != \
+ len(obj_type.methods[methodId].param_types):
+ # Right number of parameters
+ self.error("Wrong number of parameters for function name: '%s', " + \
+ "expected: , actual: ", proc_name,
+ len(obj_type.methods[methodId].param_types),
+ len(self.expr_ast_vec))
+
+ for actual_type, expected_type in \
+ zip(paramTypes, obj_type.methods[methodId].param_types):
+ if actual_type != expected_type and \
+ str(actual_type["interface"]) != str(expected_type):
+ self.error("Type mismatch: expected: %s actual: %s",
+ expected_type, actual_type)
# Return the return type of the method
return obj_type.methods[methodId].return_type
pass
class MemberMethodCallExprAST(MethodCallExprAST):
- def __init__(self, slicc, obj_expr_ast, func_call):
+ def __init__(self, slicc, obj_expr_ast, proc_name, expr_ast_vec):
s = super(MemberMethodCallExprAST, self)
- s.__init__(slicc, func_call.proc_name, func_call.exprs)
+ s.__init__(slicc, proc_name, expr_ast_vec)
+
self.obj_expr_ast = obj_expr_ast
def __repr__(self):
pairs = { "external" : "yes" }
func = Func(self.symtab, func_id + "_" +
t.ident, func_id, self.location,
- self.symtab.find("std::string", Type), [ t ], [], [], "",
+ self.symtab.find("std::string", Type), [ t ], [], "",
pairs)
self.symtab.newSymbol(func)
pairs = { "external" : "yes" }
func = Func(self.symtab, func_id + "_" +
t.ident, func_id, self.location,
- self.symtab.find("AccessPermission", Type), [ t ], [], [], "",
+ self.symtab.find("AccessPermission", Type), [ t ], [], "",
pairs)
self.symtab.newSymbol(func)
def p_expr__member_method_call(self, p):
"aexpr : aexpr DOT ident '(' exprs ')'"
- p[0] = ast.MemberMethodCallExprAST(self, p[1],
- ast.FuncCallExprAST(self, p[3], p[5]))
+ p[0] = ast.MemberMethodCallExprAST(self, p[1], p[3], p[5])
+
+ def p_expr__member_method_call_lookup(self, p):
+ "aexpr : aexpr '[' exprs ']'"
+ p[0] = ast.MemberMethodCallExprAST(self, p[1], "lookup", p[3])
def p_expr__class_method_call(self, p):
"aexpr : type DOUBLE_COLON ident '(' exprs ')'"
- p[0] = ast.ClassMethodCallExprAST(self, p[1],
- ast.FuncCallExprAST(self, p[3], p[5]))
+ p[0] = ast.ClassMethodCallExprAST(self, p[1], p[3], p[5])
def p_expr__aexpr(self, p):
"expr : aexpr"
class Func(Symbol):
def __init__(self, table, ident, name, location, return_type, param_types,
- proto_param_strings, body_param_strings, body,
- pairs, default_count = 0):
+ param_strings, body, pairs):
super(Func, self).__init__(table, ident, location, pairs)
self.return_type = return_type
self.param_types = param_types
- self.proto_param_strings = proto_param_strings
- self.body_param_strings = body_param_strings
+ self.param_strings = param_strings
self.body = body
self.isInternalMachineFunc = False
self.c_ident = ident
self.c_name = name
self.class_name = ""
- self.default_count = default_count
def __repr__(self):
return ""
return_type += "*"
return "%s %s(%s);" % (return_type, self.c_name,
- ", ".join(self.proto_param_strings))
+ ", ".join(self.param_strings))
def writeCodeFiles(self, path, includes):
return
- def checkArguments(self, args):
- if len(args) + self.default_count < len(self.param_types) or \
- len(args) > len(self.param_types):
- self.error("Wrong number of arguments passed to function: '%s'" + \
- " Expected at least: %d, got: %d", self.c_ident,
- len(self.param_types) - self.default_count, len(args))
-
- cvec = []
- type_vec = []
- for expr,expected_type in zip(args, self.param_types):
- # Check the types of the parameter
- actual_type,param_code = expr.inline(True)
- if str(actual_type) != 'OOD' and \
- str(actual_type) != str(expected_type) and \
- str(actual_type["interface"]) != str(expected_type):
- expr.error("Type mismatch: expected: %s actual: %s" % \
- (expected_type, actual_type))
- cvec.append(param_code)
- type_vec.append(expected_type)
-
- return cvec, type_vec
-
def generateCode(self):
'''This write a function of object Chip'''
if "external" in self:
code = self.symtab.codeFormatter()
# Generate function header
- return_type = self.return_type.c_ident
void_type = self.symtab.find("void", Type)
+ return_type = self.return_type.c_ident
if "return_by_ref" in self and self.return_type != void_type:
return_type += "&"
if "return_by_pointer" in self and self.return_type != void_type:
return_type += "*"
- params = ', '.join(self.body_param_strings)
+ params = ', '.join(self.param_strings)
code('''
$return_type
void countTransition(${ident}_State state, ${ident}_Event event);
void possibleTransition(${ident}_State state, ${ident}_Event event);
- uint64_t getEventCount(${ident}_Event event);
+ uint64 getEventCount(${ident}_Event event);
bool isPossible(${ident}_State state, ${ident}_Event event);
- uint64_t getTransitionCount(${ident}_State state, ${ident}_Event event);
+ uint64 getTransitionCount(${ident}_State state, ${ident}_Event event);
private:
''')
m_possible[state][event] = true;
}
-uint64_t
+uint64
$c_ident::getEventCount(${ident}_Event event)
{
return m_event_counters[event];
return m_possible[state][event];
}
-uint64_t
+uint64
$c_ident::getTransitionCount(${ident}_State state,
${ident}_Event event)
{
else:
code('doTransitionWorker(event, state, next_state, addr);')
+ port_to_buf_map, in_msg_bufs, msg_bufs = self.getBufferMaps(ident)
+
code('''
if (result == TransitionResult_Valid) {