This patch changes the way cache statistics are collected in ruby.
As of now, there is separate entity called CacheProfiler which holds
statistical variables for caches. The CacheMemory class defines different
functions for accessing the CacheProfiler. These functions are then invoked
in the .sm files. I find this approach opaque and prone to error. Secondly,
we probably should not be paying the cost of a function call for recording
statistics.
Instead, this patch allows for accessing statistical variables in the
.sm files. The collection would become transparent. Secondly, it would happen
in place, so no function calls. The patch also removes the CacheProfiler class.
--HG--
rename : src/mem/slicc/ast/InfixOperatorExprAST.py => src/mem/slicc/ast/OperatorExprAST.py
wakeUpBuffers(address);
}
- action(uu_profileInstMiss, "\ui", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- L1IcacheMemory.profileMiss(in_msg);
- }
+ action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1IcacheMemory.demand_misses;
}
- action(uu_profileDataMiss, "\ud", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- L1DcacheMemory.profileMiss(in_msg);
- }
+ action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
+ ++L1IcacheMemory.demand_hits;
+ }
+
+ action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1DcacheMemory.demand_misses;
+ }
+
+ action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
+ ++L1DcacheMemory.demand_hits;
}
action(po_observeMiss, "\po", desc="Inform the prefetcher about the miss") {
}
// Transitions from Shared
- transition(S, {Load,Ifetch}) {
+ transition({S,E,M}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S,E,M}, Ifetch) {
h_load_hit;
+ uu_profileInstHit;
k_popMandatoryQueue;
}
// Transitions from Exclusive
- transition(E, {Load, Ifetch}) {
- h_load_hit;
- k_popMandatoryQueue;
- }
-
- transition(E, Store, M) {
+ transition({E,M}, Store, M) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
}
// Transitions from Modified
- transition(M, {Load, Ifetch}) {
- h_load_hit;
- k_popMandatoryQueue;
- }
-
- transition(M, Store) {
- hh_store_hit;
- k_popMandatoryQueue;
- }
transition(M, L1_Replacement, M_I) {
forward_eviction_to_cpu;
}
}
- GenericRequestType convertToGenericType(CoherenceRequestType type) {
- if(type == CoherenceRequestType:GETS) {
- return GenericRequestType:GETS;
- } else if(type == CoherenceRequestType:GETX) {
- return GenericRequestType:GETX;
- } else if(type == CoherenceRequestType:GET_INSTR) {
- return GenericRequestType:GET_INSTR;
- } else if(type == CoherenceRequestType:UPGRADE) {
- return GenericRequestType:UPGRADE;
- } else {
- DPRINTF(RubySlicc, "%s\n", type);
- error("Invalid CoherenceRequestType\n");
- }
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++L2cacheMemory.demand_misses;
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(L1RequestIntraChipL2Network_in, RequestMsg) {
- L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
- in_msg.AccessMode, in_msg.Prefetch);
- }
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++L2cacheMemory.demand_hits;
}
action(ww_profileMissNoDir, "\w", desc="Profile this transition at the L2 because Dir won't see the request") {
ds_sendSharedDataToRequestor;
nn_addSharer;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
// fw_sendFwdInvToSharers;
fwm_sendFwdInvToSharersMinusRequestor;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
fwm_sendFwdInvToSharersMinusRequestor;
ts_sendInvAckToUpgrader;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
transition(M, L1_GETX, MT_MB) {
d_sendDataToRequestor;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
d_sendDataToRequestor;
nn_addSharer;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
transition(M, L1_GETS, MT_MB) {
dd_sendExclusiveDataToRequestor;
set_setMRU;
+ uu_profileHit;
jj_popL1RequestQueue;
}
profileMsgDelay(2, forwardRequestNetwork_in.dequeue_getDelayCycles());
}
- action(p_profileMiss, "p", desc="Profile cache miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- cacheMemory.profileMiss(in_msg);
- }
+ action(p_profileMiss, "pi", desc="Profile cache miss") {
+ ++cacheMemory.demand_misses;
+ }
+
+ action(p_profileHit, "ph", desc="Profile cache miss") {
+ ++cacheMemory.demand_hits;
}
action(r_load_hit, "r", desc="Notify sequencer the load completed.") {
transition(M, Store) {
s_store_hit;
+ p_profileHit;
m_popMandatoryQueue;
}
transition(M, {Load, Ifetch}) {
r_load_hit;
+ p_profileHit;
m_popMandatoryQueue;
}
-
/*
* Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
* All rights reserved.
}
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- // profile_miss(in_msg);
- }
+ action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1Icache.demand_misses;
+ }
+
+ action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
+ ++L1Icache.demand_hits;
+ }
+
+ action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1Dcache.demand_misses;
+ }
+
+ action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
+ ++L1Dcache.demand_hits;
}
action(z_recycleRequestQueue, "z", desc="Send the head of the mandatory queue to the back of the queue.") {
ii_allocateL1DCacheBlock;
i_allocateTBE;
a_issueGETS;
- // uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
jj_allocateL1ICacheBlock;
i_allocateTBE;
a_issueGETS;
- // uu_profileMiss;
+ uu_profileInstMiss;
k_popMandatoryQueue;
}
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueGETX;
- // uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
l_popForwardQueue;
}
- // Transitions from Shared
- transition({S, SM}, {Load, Ifetch}) {
+ transition({S, SM, O, OM, MM, MM_W, M, M_W}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, SM, O, OM, MM, MM_W, M, M_W}, Ifetch) {
h_load_hit;
+ uu_profileInstHit;
k_popMandatoryQueue;
}
+ // Transitions from Shared
transition(S, Store, SM) {
i_allocateTBE;
b_issueGETX;
- // uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
}
// Transitions from Owned
- transition({O, OM}, {Load, Ifetch}) {
- h_load_hit;
- k_popMandatoryQueue;
- }
-
transition(O, Store, OM) {
i_allocateTBE;
b_issueGETX;
- // uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
}
// Transitions from MM
- transition({MM, MM_W}, {Load, Ifetch}) {
- h_load_hit;
- k_popMandatoryQueue;
- }
-
transition({MM, MM_W}, Store) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
}
// Transitions from M
- transition({M, M_W}, {Load, Ifetch}) {
- h_load_hit;
- k_popMandatoryQueue;
- }
-
transition(M, Store, MM) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(M_W, Store, MM_W) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
}
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(L1requestNetwork_in, RequestMsg) {
- // AccessModeType not implemented
- // profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
- }
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++L2cache.demand_misses;
+ }
+
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++L2cache.demand_hits;
}
action(y_copyCacheStateToDir, "y", desc="Copy cache state to directory state") {
y_copyCacheStateToDir;
r_setMRU;
rr_deallocateL2CacheBlock;
- uu_profileMiss;
+ uu_profileHit;
o_popL1RequestQueue;
}
transition(OLSX, L1_GETS, OLSXS) {
d_sendDataToL1GETS;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
transition(SLS, L1_GETS, SLSS ) {
d_sendDataToL1GETS;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
transition(OLS, L1_GETS, OLSS) {
d_sendDataToL1GETS;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
i_allocateTBE;
// should count 0 of course
h_countLocalSharersExceptRequestor;
- d_sendDataToL1GETX
+ d_sendDataToL1GETX;
y_copyCacheStateToDir;
rr_deallocateL2CacheBlock;
s_deallocateTBE;
+ uu_profileHit;
o_popL1RequestQueue;
}
d_sendDataToL1GETX;
r_setMRU;
s_deallocateTBE;
+ uu_profileHit;
o_popL1RequestQueue;
}
transition(S, L1_GETS, SS) {
d_sendDataToL1GETS;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
transition(O, L1_GETS, OO) {
d_sendDataToL1GETS;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
-
/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
+ * Copyright (c) 1999-2013 Mark D. Hill and David A. Wood
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
Entry L1Icache_entry := getL1ICacheEntry(in_msg.LineAddress);
if (is_valid(L1Icache_entry)) {
- // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ // The tag matches for the L1, so the L1 fetches the line.
+ // We know it can't be in the L2 due to exclusion.
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Icache_entry, tbe);
} else {
Entry L1Dcache_entry := getL1DCacheEntry(in_msg.LineAddress);
if (is_valid(L1Dcache_entry)) {
- // The tag matches for the L1, so the L1 fetches the line. We know it can't be in the L2 due to exclusion
+ // The tag matches for the L1, so the L1 fetches the line.
+ // We know it can't be in the L2 due to exclusion.
trigger(mandatory_request_type_to_event(in_msg.Type),
in_msg.LineAddress, L1Dcache_entry, tbe);
} else {
}
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- if (L1DcacheMemory.isTagPresent(address)) {
- L1DcacheMemory.profileMiss(in_msg);
- } else {
- L1IcacheMemory.profileMiss(in_msg);
- }
- }
+ action(uu_profileInstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1IcacheMemory.demand_misses;
+ }
+
+ action(uu_profileInstHit, "\uih", desc="Profile the demand hit") {
+ ++L1IcacheMemory.demand_hits;
+ }
+
+ action(uu_profileDataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1DcacheMemory.demand_misses;
+ }
+
+ action(uu_profileDataHit, "\udh", desc="Profile the demand hit") {
+ ++L1DcacheMemory.demand_hits;
}
action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
ii_allocateL1DCacheBlock;
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
pp_allocateL1ICacheBlock;
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileInstMiss;
k_popMandatoryQueue;
}
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
transition(I, Load, IS) {
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
transition(I, Ifetch, IS) {
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileInstMiss;
k_popMandatoryQueue;
}
transition(I, {Store, Atomic}, IM) {
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
}
// Transitions from Shared
- transition({S, SM, S_L, SM_L}, {Load, Ifetch}) {
+ transition({S, SM, S_L, SM_L}, Load) {
h_load_hit;
+ uu_profileDataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, SM, S_L, SM_L}, Ifetch) {
+ h_load_hit;
+ uu_profileInstHit;
k_popMandatoryQueue;
}
transition(S, {Store, Atomic}, SM) {
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
}
// Transitions from Owned
- transition({O, OM}, {Load, Ifetch}) {
+ transition({O, OM}, Ifetch) {
h_load_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({O, OM}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(O, {Store, Atomic}, OM) {
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
}
// Transitions from Modified
- transition({MM, MM_W}, {Load, Ifetch}) {
+ transition({MM, MM_W}, Ifetch) {
h_load_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM, MM_W}, Load) {
+ h_load_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition({MM_W}, {Store, Atomic}) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(MM, Store) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(MM, Atomic, M) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
}
// Transitions from Dirty Exclusive
- transition({M, M_W}, {Load, Ifetch}) {
+ transition({M, M_W}, Ifetch) {
+ h_load_hit;
+ uu_profileInstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({M, M_W}, Load) {
h_load_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(M, Store, MM) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(M, Atomic) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(M_W, Store, MM_W) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
transition(M_W, Atomic) {
hh_store_hit;
+ uu_profileDataHit;
k_popMandatoryQueue;
}
ii_allocateL1DCacheBlock;
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
pp_allocateL1ICacheBlock;
i_allocateTBE;
a_issueReadRequest;
- uu_profileMiss;
+ uu_profileInstMiss;
k_popMandatoryQueue;
}
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
transition(S_L, {Store, Atomic}, SM_L) {
i_allocateTBE;
b_issueWriteRequest;
- uu_profileMiss;
+ uu_profileDataMiss;
k_popMandatoryQueue;
}
}
}
- GenericRequestType convertToGenericType(CoherenceRequestType type) {
- if(type == CoherenceRequestType:GETS) {
- return GenericRequestType:GETS;
- } else if(type == CoherenceRequestType:GETX) {
- return GenericRequestType:GETX;
- } else {
- DPRINTF(RubySlicc, "%s\n", type);
- error("invalid CoherenceRequestType");
- }
- }
-
// ** OUT_PORTS **
out_port(globalRequestNetwork_out, RequestMsg, GlobalRequestFromL2Cache);
out_port(localRequestNetwork_out, RequestMsg, L1RequestFromL2Cache);
unset_cache_entry();
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(L1requestNetwork_in, RequestMsg) {
- L2cacheMemory.profileGenericRequest(convertToGenericType(in_msg.Type),
- in_msg.AccessMode,
- in_msg.Prefetch);
- }
+ action(uu_profileMiss, "\um", desc="Profile the demand miss") {
+ ++L2cacheMemory.demand_misses;
}
+ action(uu_profileHit, "\uh", desc="Profile the demand hit") {
+ ++L2cacheMemory.demand_hits;
+ }
action(w_assertIncomingDataAndCacheDataMatch, "w", desc="Assert that the incoming data and the data in the cache match") {
peek(responseNetwork_in, ResponseMsg) {
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataOwnerFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataAndAllTokensFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
k_dataFromL2CacheToL1Requestor;
r_markNewSharer;
r_setMRU;
+ uu_profileHit;
o_popL1RequestQueue;
}
}
}
- action(uu_profileMiss, "\u", desc="Profile the demand miss") {
- peek(mandatoryQueue_in, RubyRequest) {
- if (L1IcacheMemory.isTagPresent(address)) {
- L1IcacheMemory.profileMiss(in_msg);
- } else if (L1DcacheMemory.isTagPresent(address)) {
- L1DcacheMemory.profileMiss(in_msg);
- }
- if (L2cacheMemory.isTagPresent(address) == false) {
- L2cacheMemory.profileMiss(in_msg);
- }
- }
+ action(uu_profileL1DataMiss, "\udm", desc="Profile the demand miss") {
+ ++L1DcacheMemory.demand_misses;
+ }
+
+ action(uu_profileL1DataHit, "\udh", desc="Profile the demand hits") {
+ ++L1DcacheMemory.demand_hits;
+ }
+
+ action(uu_profileL1InstMiss, "\uim", desc="Profile the demand miss") {
+ ++L1IcacheMemory.demand_misses;
+ }
+
+ action(uu_profileL1InstHit, "\uih", desc="Profile the demand hits") {
+ ++L1IcacheMemory.demand_hits;
+ }
+
+ action(uu_profileL2Miss, "\um", desc="Profile the demand miss") {
+ ++L2cacheMemory.demand_misses;
+ }
+
+ action(uu_profileL2Hit, "\uh", desc="Profile the demand hits ") {
+ ++L2cacheMemory.demand_hits;
}
action(zz_stallAndWaitMandatoryQueue, "\z", desc="Send the head of the mandatory queue to the back of the queue.") {
ii_allocateL1DCacheBlock;
nb_copyFromTBEToL1; // Not really needed for state I
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
ii_allocateL1DCacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
ii_allocateL1DCacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
ii_allocateL1DCacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
ii_allocateL1DCacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
jj_allocateL1ICacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
jj_allocateL1ICacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
jj_allocateL1ICacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
jj_allocateL1ICacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
jj_allocateL1ICacheBlock;
nb_copyFromTBEToL1;
s_deallocateTBE;
- uu_profileMiss;
zz_stallAndWaitMandatoryQueue;
ll_L2toL1Transfer;
}
}
// Transitions from Idle
- transition({I, IR}, Load, IS) {
+ transition({I,IR}, Load, IS) {
ii_allocateL1DCacheBlock;
i_allocateTBE;
a_issueGETS;
- uu_profileMiss;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
k_popMandatoryQueue;
}
- transition({I, IR}, Ifetch, IS) {
+ transition({I,IR}, Ifetch, IS) {
jj_allocateL1ICacheBlock;
i_allocateTBE;
a_issueGETS;
- uu_profileMiss;
+ uu_profileL1InstMiss;
+ uu_profileL2Miss;
k_popMandatoryQueue;
}
- transition({I, IR}, Store, IM) {
+ transition({I,IR}, Store, IM) {
ii_allocateL1DCacheBlock;
i_allocateTBE;
b_issueGETX;
- uu_profileMiss;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
k_popMandatoryQueue;
}
transition({I, IR}, Flush_line, IM_F) {
it_allocateTBE;
bf_issueGETF;
- uu_profileMiss;
k_popMandatoryQueue;
}
}
// Transitions from Shared
- transition({S, SM, ISM}, {Load, Ifetch}) {
+ transition({S, SM, ISM}, Load) {
+ h_load_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({S, SM, ISM}, Ifetch) {
h_load_hit;
+ uu_profileL1InstHit;
k_popMandatoryQueue;
}
- transition(SR, {Load, Ifetch}, S) {
+ transition(SR, Load, S) {
h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
- transition({S, SR}, Store, SM) {
+ transition(SR, Ifetch, S) {
+ h_load_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition({S,SR}, Store, SM) {
i_allocateTBE;
b_issueGETX;
- uu_profileMiss;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
k_popMandatoryQueue;
}
transition({S, SR}, Flush_line, SM_F) {
i_allocateTBE;
bf_issueGETF;
- uu_profileMiss;
forward_eviction_to_cpu;
gg_deallocateL1CacheBlock;
k_popMandatoryQueue;
}
// Transitions from Owned
- transition({O, OM, SS, MM_W, M_W}, {Load, Ifetch}) {
+ transition({O, OM, SS, MM_W, M_W}, {Load}) {
h_load_hit;
+ uu_profileL1DataHit;
k_popMandatoryQueue;
}
- transition(OR, {Load, Ifetch}, O) {
+ transition({O, OM, SS, MM_W, M_W}, {Ifetch}) {
h_load_hit;
+ uu_profileL1InstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(OR, Load, O) {
+ h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(OR, Ifetch, O) {
+ h_load_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
- transition({O, OR}, Store, OM) {
+ transition({O,OR}, Store, OM) {
i_allocateTBE;
b_issueGETX;
p_decrementNumberOfMessagesByOne;
- uu_profileMiss;
+ uu_profileL1DataMiss;
+ uu_profileL2Miss;
k_popMandatoryQueue;
}
+
transition({O, OR}, Flush_line, OM_F) {
i_allocateTBE;
bf_issueGETF;
p_decrementNumberOfMessagesByOne;
- uu_profileMiss;
forward_eviction_to_cpu;
gg_deallocateL1CacheBlock;
k_popMandatoryQueue;
}
// Transitions from Modified
- transition({MM, M}, {Load, Ifetch}) {
+ transition({MM, M}, {Ifetch}) {
h_load_hit;
+ uu_profileL1InstHit;
+ k_popMandatoryQueue;
+ }
+
+ transition({MM, M}, {Load}) {
+ h_load_hit;
+ uu_profileL1DataHit;
k_popMandatoryQueue;
}
transition(MM, Store) {
hh_store_hit;
+ uu_profileL1DataHit;
+ k_popMandatoryQueue;
+ }
+
+ transition(MMR, Load, MM) {
+ h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
}
- transition(MMR, {Load, Ifetch}, MM) {
+ transition(MMR, Ifetch, MM) {
h_load_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
transition(MMR, Store, MM) {
hh_store_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
// Transitions from Dirty Exclusive
transition(M, Store, MM) {
hh_store_hit;
+ uu_profileL1DataHit;
k_popMandatoryQueue;
}
- transition(MR, {Load, Ifetch}, M) {
+ transition(MR, Load, M) {
+ h_load_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
+ k_popMandatoryQueue;
+ ka_wakeUpAllDependents;
+ }
+
+ transition(MR, Ifetch, M) {
h_load_hit;
+ uu_profileL1InstMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
transition(MR, Store, MM) {
hh_store_hit;
+ uu_profileL1DataMiss;
+ uu_profileL2Hit;
k_popMandatoryQueue;
ka_wakeUpAllDependents;
}
transition(MM_W, Store) {
hh_store_hit;
+ uu_profileL1DataHit;
k_popMandatoryQueue;
}
transition(M_W, Store, MM_W) {
hh_store_hit;
+ uu_profileL1DataHit;
k_popMandatoryQueue;
}
NULL, desc="Invalid request type";
}
-enumeration(GenericRequestType, desc="...", default="GenericRequestType_NULL") {
- GETS, desc="gets request";
- GET_INSTR, desc="get instr request";
- GETX, desc="getx request";
- UPGRADE, desc="upgrade request";
- DOWNGRADE, desc="downgrade request";
- INV, desc="invalidate request";
- INV_S, desc="invalidate shared copy request";
- PUTS, desc="puts request";
- PUTO, desc="puto request";
- PUTX, desc="putx request";
- L2_PF, desc="L2 prefetch";
- LD, desc="Load";
- ST, desc="Store";
- ATOMIC, desc="Atomic Load/Store";
- IFETCH, desc="Instruction fetch";
- IO, desc="I/O";
- NACK, desc="Nack";
- REPLACEMENT, desc="Replacement";
- WB_ACK, desc="WriteBack ack";
- EXE_ACK, desc="Execlusive ack";
- COMMIT, desc="Commit version";
- LD_XACT, desc="Transactional Load";
- LDX_XACT, desc="Transactional Load-Intend-Modify";
- ST_XACT, desc="Transactional Store";
- BEGIN_XACT, desc="Begin Transaction";
- COMMIT_XACT, desc="Commit Transaction";
- ABORT_XACT, desc="Abort Transaction";
- DMA_READ, desc="DMA READ";
- DMA_WRITE, desc="DMA WRITE";
- NULL, desc="null request type";
-}
-
enumeration(CacheRequestType, desc="...", default="CacheRequestType_NULL") {
DataArrayRead, desc="Read access to the cache's data array";
DataArrayWrite, desc="Write access to the cache's data array";
external_type(MessageBuffer, buffer="yes", inport="yes", outport="yes");
external_type(OutPort, primitive="yes");
+external_type(Scalar, primitive="yes");
structure(InPort, external = "yes", primitive="yes") {
bool isReady();
void deallocate(Address);
AbstractCacheEntry lookup(Address);
bool isTagPresent(Address);
- void profileMiss(RubyRequest);
-
- void profileGenericRequest(GenericRequestType,
- RubyAccessMode,
- PrefetchBit);
-
void setMRU(Address);
void recordRequestType(CacheRequestType);
bool checkResourceAvailable(CacheResourceType, Address);
+
+ Scalar demand_misses;
+ Scalar demand_hits;
}
structure (WireBuffer, inport="yes", outport="yes", external = "yes") {
+++ /dev/null
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "mem/ruby/profiler/CacheProfiler.hh"
-#include "mem/ruby/profiler/Profiler.hh"
-#include "mem/ruby/system/System.hh"
-
-using namespace std;
-
-CacheProfiler::CacheProfiler(const string& description)
- : m_cacheRequestType(int(RubyRequestType_NUM)), m_genericRequestType(int(GenericRequestType_NUM))
-{
- m_description = description;
-
- clearStats();
-}
-
-CacheProfiler::~CacheProfiler()
-{
-}
-
-void
-CacheProfiler::printStats(ostream& out) const
-{
- out << "Cache Stats: " << m_description << endl;
- string description = " " + m_description;
-
- out << description << "_total_misses: " << m_misses << endl;
- out << description << "_total_demand_misses: " << m_demand_misses << endl;
- out << description << "_total_prefetches: " << m_prefetches << endl;
- out << description << "_total_sw_prefetches: " << m_sw_prefetches << endl;
- out << description << "_total_hw_prefetches: " << m_hw_prefetches << endl;
- out << endl;
-
- int requests = 0;
-
- for (int i = 0; i < int(RubyRequestType_NUM); i++) {
- requests += m_cacheRequestType[i];
- }
-
- for (int i = 0; i < int(GenericRequestType_NUM); i++) {
- requests += m_genericRequestType[i];
- }
-
- assert(m_misses == requests);
-
- if (requests > 0) {
- for (int i = 0; i < int(RubyRequestType_NUM); i++) {
- if (m_cacheRequestType[i] > 0) {
- out << description << "_request_type_"
- << RubyRequestType_to_string(RubyRequestType(i))
- << ": "
- << 100.0 * (double)m_cacheRequestType[i] /
- (double)requests
- << "%" << endl;
- }
- }
-
- for (int i = 0; i < int(GenericRequestType_NUM); i++) {
- if (m_genericRequestType[i] > 0) {
- out << description << "_request_type_"
- << GenericRequestType_to_string(GenericRequestType(i))
- << ": "
- << 100.0 * (double)m_genericRequestType[i] /
- (double)requests
- << "%" << endl;
- }
- }
-
- out << endl;
-
- for (int i = 0; i < RubyAccessMode_NUM; i++){
- if (m_accessModeTypeHistogram[i] > 0) {
- out << description << "_access_mode_type_"
- << (RubyAccessMode) i << ": "
- << m_accessModeTypeHistogram[i] << " "
- << 100.0 * m_accessModeTypeHistogram[i] / requests
- << "%" << endl;
- }
- }
- }
-
- out << endl;
-}
-
-void
-CacheProfiler::clearStats()
-{
- for (int i = 0; i < int(RubyRequestType_NUM); i++) {
- m_cacheRequestType[i] = 0;
- }
- for (int i = 0; i < int(GenericRequestType_NUM); i++) {
- m_genericRequestType[i] = 0;
- }
- m_misses = 0;
- m_demand_misses = 0;
- m_prefetches = 0;
- m_sw_prefetches = 0;
- m_hw_prefetches = 0;
- for (int i = 0; i < RubyAccessMode_NUM; i++) {
- m_accessModeTypeHistogram[i] = 0;
- }
-}
-
-void
-CacheProfiler::addCacheStatSample(RubyRequestType requestType,
- RubyAccessMode accessType,
- PrefetchBit pfBit)
-{
- m_cacheRequestType[requestType]++;
- addStatSample(accessType, pfBit);
-}
-
-void
-CacheProfiler::addGenericStatSample(GenericRequestType requestType,
- RubyAccessMode accessType,
- PrefetchBit pfBit)
-{
- m_genericRequestType[requestType]++;
- addStatSample(accessType, pfBit);
-}
-
-void
-CacheProfiler::addStatSample(RubyAccessMode accessType,
- PrefetchBit pfBit)
-{
- m_misses++;
-
- m_accessModeTypeHistogram[accessType]++;
- if (pfBit == PrefetchBit_No) {
- m_demand_misses++;
- } else if (pfBit == PrefetchBit_Yes) {
- m_prefetches++;
- m_sw_prefetches++;
- } else {
- // must be L1_HW || L2_HW prefetch
- m_prefetches++;
- m_hw_prefetches++;
- }
-}
+++ /dev/null
-/*
- * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef __MEM_RUBY_PROFILER_CACHEPROFILER_HH__
-#define __MEM_RUBY_PROFILER_CACHEPROFILER_HH__
-
-#include <iostream>
-#include <string>
-#include <vector>
-
-#include "mem/protocol/GenericRequestType.hh"
-#include "mem/protocol/PrefetchBit.hh"
-#include "mem/protocol/RubyAccessMode.hh"
-#include "mem/protocol/RubyRequestType.hh"
-#include "mem/ruby/common/Global.hh"
-#include "mem/ruby/common/Histogram.hh"
-
-class CacheProfiler
-{
- public:
- CacheProfiler(const std::string& description);
- ~CacheProfiler();
-
- void printStats(std::ostream& out) const;
- void clearStats();
-
- void addCacheStatSample(RubyRequestType requestType,
- RubyAccessMode type,
- PrefetchBit pfBit);
-
- void addGenericStatSample(GenericRequestType requestType,
- RubyAccessMode type,
- PrefetchBit pfBit);
-
- void print(std::ostream& out) const;
-
- private:
- // Private copy constructor and assignment operator
- CacheProfiler(const CacheProfiler& obj);
- CacheProfiler& operator=(const CacheProfiler& obj);
- void addStatSample(RubyAccessMode type, PrefetchBit pfBit);
-
- std::string m_description;
- int64 m_misses;
- int64 m_demand_misses;
- int64 m_prefetches;
- int64 m_sw_prefetches;
- int64 m_hw_prefetches;
- int64 m_accessModeTypeHistogram[RubyAccessMode_NUM];
-
- std::vector<int> m_cacheRequestType;
- std::vector<int> m_genericRequestType;
-};
-
-inline std::ostream&
-operator<<(std::ostream& out, const CacheProfiler& obj)
-{
- obj.print(out);
- out << std::flush;
- return out;
-}
-
-#endif // __MEM_RUBY_PROFILER_CACHEPROFILER_HH__
#include "base/hashmap.hh"
#include "mem/protocol/AccessType.hh"
#include "mem/protocol/GenericMachineType.hh"
-#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/PrefetchBit.hh"
#include "mem/protocol/RubyAccessMode.hh"
#include "mem/protocol/RubyRequestType.hh"
Source('AccessTraceForAddress.cc')
Source('AddressProfiler.cc')
-Source('CacheProfiler.cc')
Source('MemCntrlProfiler.cc')
Source('Profiler.cc')
Source('StoreTrace.cc')
m_latency = p->latency;
m_cache_assoc = p->assoc;
m_policy = p->replacement_policy;
- m_profiler_ptr = new CacheProfiler(name());
m_start_index_bit = p->start_index_bit;
m_is_instruction_only_cache = p->is_icache;
m_resource_stalls = p->resourceStalls;
{
if (m_replacementPolicy_ptr != NULL)
delete m_replacementPolicy_ptr;
- delete m_profiler_ptr;
for (int i = 0; i < m_cache_num_sets; i++) {
for (int j = 0; j < m_cache_assoc; j++) {
delete m_cache[i][j];
m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
}
-void
-CacheMemory::profileMiss(const RubyRequest& msg)
-{
- m_profiler_ptr->addCacheStatSample(msg.getType(),
- msg.getAccessMode(),
- msg.getPrefetch());
-}
-
-void
-CacheMemory::profileGenericRequest(GenericRequestType requestType,
- RubyAccessMode accessType,
- PrefetchBit pfBit)
-{
- m_profiler_ptr->addGenericStatSample(requestType,
- accessType,
- pfBit);
-}
-
void
CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
{
out << "printData() not supported" << endl;
}
-void
-CacheMemory::clearStats() const
-{
- m_profiler_ptr->clearStats();
-}
-
-void
-CacheMemory::printStats(ostream& out) const
-{
- m_profiler_ptr->printStats(out);
-}
-
void
CacheMemory::setLocked(const Address& address, int context)
{
}
void
-CacheMemory::recordRequestType(CacheRequestType requestType) {
- DPRINTF(RubyStats, "Recorded statistic: %s\n",
- CacheRequestType_to_string(requestType));
- switch(requestType) {
- case CacheRequestType_DataArrayRead:
- numDataArrayReads++;
- return;
- case CacheRequestType_DataArrayWrite:
- numDataArrayWrites++;
- return;
- case CacheRequestType_TagArrayRead:
- numTagArrayReads++;
- return;
- case CacheRequestType_TagArrayWrite:
- numTagArrayWrites++;
- return;
- default:
- warn("CacheMemory access_type not found: %s",
- CacheRequestType_to_string(requestType));
- }
-}
+CacheMemory::regStats()
+{
+ m_demand_hits
+ .name(name() + ".demand_hits")
+ .desc("Number of cache demand hits")
+ ;
-void
-CacheMemory::regStats() {
- using namespace Stats;
+ m_demand_misses
+ .name(name() + ".demand_misses")
+ .desc("Number of cache demand misses")
+ ;
+
+ m_demand_accesses
+ .name(name() + ".demand_accesses")
+ .desc("Number of cache demand accesses")
+ ;
+
+ m_demand_accesses = m_demand_hits + m_demand_misses;
+
+ m_sw_prefetches
+ .name(name() + ".total_sw_prefetches")
+ .desc("Number of software prefetches")
+ .flags(Stats::nozero)
+ ;
+
+ m_hw_prefetches
+ .name(name() + ".total_hw_prefetches")
+ .desc("Number of hardware prefetches")
+ .flags(Stats::nozero)
+ ;
+
+ m_prefetches
+ .name(name() + ".total_prefetches")
+ .desc("Number of prefetches")
+ .flags(Stats::nozero)
+ ;
+
+ m_prefetches = m_sw_prefetches + m_hw_prefetches;
+
+ m_accessModeType
+ .init(RubyRequestType_NUM)
+ .name(name() + ".access_mode")
+ .flags(Stats::pdf | Stats::total)
+ ;
+ for (int i = 0; i < RubyAccessMode_NUM; i++) {
+ m_accessModeType
+ .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
+ .flags(Stats::nozero)
+ ;
+ }
numDataArrayReads
.name(name() + ".num_data_array_reads")
.desc("number of data array reads")
+ .flags(Stats::nozero)
;
numDataArrayWrites
.name(name() + ".num_data_array_writes")
.desc("number of data array writes")
+ .flags(Stats::nozero)
;
numTagArrayReads
.name(name() + ".num_tag_array_reads")
.desc("number of tag array reads")
+ .flags(Stats::nozero)
;
numTagArrayWrites
.name(name() + ".num_tag_array_writes")
.desc("number of tag array writes")
+ .flags(Stats::nozero)
;
numTagArrayStalls
.name(name() + ".num_tag_array_stalls")
.desc("number of stalls caused by tag array")
+ .flags(Stats::nozero)
;
numDataArrayStalls
.name(name() + ".num_data_array_stalls")
.desc("number of stalls caused by data array")
+ .flags(Stats::nozero)
;
}
+void
+CacheMemory::recordRequestType(CacheRequestType requestType)
+{
+ DPRINTF(RubyStats, "Recorded statistic: %s\n",
+ CacheRequestType_to_string(requestType));
+ switch(requestType) {
+ case CacheRequestType_DataArrayRead:
+ numDataArrayReads++;
+ return;
+ case CacheRequestType_DataArrayWrite:
+ numDataArrayWrites++;
+ return;
+ case CacheRequestType_TagArrayRead:
+ numTagArrayReads++;
+ return;
+ case CacheRequestType_TagArrayWrite:
+ numTagArrayWrites++;
+ return;
+ default:
+ warn("CacheMemory access_type not found: %s",
+ CacheRequestType_to_string(requestType));
+ }
+}
+
bool
CacheMemory::checkResourceAvailable(CacheResourceType res, Address addr)
{
if (res == CacheResourceType_TagArray) {
if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
else {
- DPRINTF(RubyResourceStalls, "Tag array stall on addr %s in set %d\n", addr, addressToCacheSet(addr));
+ DPRINTF(RubyResourceStalls,
+ "Tag array stall on addr %s in set %d\n",
+ addr, addressToCacheSet(addr));
numTagArrayStalls++;
return false;
}
} else if (res == CacheResourceType_DataArray) {
if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
else {
- DPRINTF(RubyResourceStalls, "Data array stall on addr %s in set %d\n", addr, addressToCacheSet(addr));
+ DPRINTF(RubyResourceStalls,
+ "Data array stall on addr %s in set %d\n",
+ addr, addressToCacheSet(addr));
numDataArrayStalls++;
return false;
}
#include "base/statistics.hh"
#include "mem/protocol/CacheResourceType.hh"
#include "mem/protocol/CacheRequestType.hh"
-#include "mem/protocol/GenericRequestType.hh"
#include "mem/protocol/RubyRequest.hh"
#include "mem/ruby/common/DataBlock.hh"
-#include "mem/ruby/profiler/CacheProfiler.hh"
#include "mem/ruby/recorder/CacheRecorder.hh"
#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
// Set this address to most recently used
void setMRU(const Address& address);
- void profileMiss(const RubyRequest & msg);
-
- void profileGenericRequest(GenericRequestType requestType,
- RubyAccessMode accessType,
- PrefetchBit pfBit);
-
void setLocked (const Address& addr, int context);
void clearLocked (const Address& addr);
bool isLocked (const Address& addr, int context);
+
// Print cache contents
void print(std::ostream& out) const;
void printData(std::ostream& out) const;
- void clearStats() const;
- void printStats(std::ostream& out) const;
-
- void recordRequestType(CacheRequestType requestType);
void regStats();
+ bool checkResourceAvailable(CacheResourceType res, Address addr);
+ void recordRequestType(CacheRequestType requestType);
+
+ public:
+ Stats::Scalar m_demand_hits;
+ Stats::Scalar m_demand_misses;
+ Stats::Formula m_demand_accesses;
+
+ Stats::Scalar m_sw_prefetches;
+ Stats::Scalar m_hw_prefetches;
+ Stats::Formula m_prefetches;
+
+ Stats::Vector m_accessModeType;
Stats::Scalar numDataArrayReads;
Stats::Scalar numDataArrayWrites;
Stats::Scalar numTagArrayReads;
Stats::Scalar numTagArrayWrites;
- bool checkResourceAvailable(CacheResourceType res, Address addr);
-
Stats::Scalar numTagArrayStalls;
Stats::Scalar numDataArrayStalls;
+
private:
// convert a Address to its location in the cache
Index addressToCacheSet(const Address& address) const;
AbstractReplacementPolicy *m_replacementPolicy_ptr;
- CacheProfiler* m_profiler_ptr;
-
BankedArray dataArray;
BankedArray tagArray;
+++ /dev/null
-# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
-# Copyright (c) 2009 The Hewlett-Packard Development Company
-# All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met: redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer;
-# redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution;
-# neither the name of the copyright holders nor the names of its
-# contributors may be used to endorse or promote products derived from
-# this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-from slicc.ast.ExprAST import ExprAST
-from slicc.symbols import Type
-
-class InfixOperatorExprAST(ExprAST):
- def __init__(self, slicc, left, op, right):
- super(InfixOperatorExprAST, self).__init__(slicc)
-
- self.left = left
- self.op = op
- self.right = right
-
- def __repr__(self):
- return "[InfixExpr: %r %s %r]" % (self.left, self.op, self.right)
-
- def generate(self, code):
- lcode = self.slicc.codeFormatter()
- rcode = self.slicc.codeFormatter()
-
- ltype = self.left.generate(lcode)
- rtype = self.right.generate(rcode)
-
- # Figure out what the input and output types should be
- if self.op in ("==", "!=", ">=", "<=", ">", "<"):
- output = "bool"
- if (ltype != rtype):
- self.error("Type mismatch: left and right operands of " +
- "operator '%s' must be the same type. " +
- "left: '%s', right: '%s'",
- self.op, ltype, rtype)
- else:
- expected_types = []
- output = None
-
- if self.op in ("&&", "||"):
- # boolean inputs and output
- expected_types = [("bool", "bool", "bool")]
- elif self.op in ("<<", ">>"):
- expected_types = [("int", "int", "int"),
- ("Cycles", "int", "Cycles")]
- elif self.op in ("+", "-", "*", "/"):
- expected_types = [("int", "int", "int"),
- ("Cycles", "Cycles", "Cycles"),
- ("Cycles", "int", "Cycles"),
- ("int", "Cycles", "Cycles")]
- else:
- self.error("No operator matched with {0}!" .format(self.op))
-
- for expected_type in expected_types:
- left_input_type = self.symtab.find(expected_type[0], Type)
- right_input_type = self.symtab.find(expected_type[1], Type)
-
- if (left_input_type == ltype) and (right_input_type == rtype):
- output = expected_type[2]
-
- if output == None:
- self.error("Type mismatch: operands ({0}, {1}) for operator " \
- "'{2}' failed to match with the expected types" .
- format(ltype, rtype, self.op))
-
- # All is well
- fix = code.nofix()
- code("($lcode ${{self.op}} $rcode)")
- code.fix(fix)
- return self.symtab.find(output, Type)
--- /dev/null
+# Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+# Copyright (c) 2009 The Hewlett-Packard Development Company
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+from slicc.ast.ExprAST import ExprAST
+from slicc.symbols import Type
+
+class InfixOperatorExprAST(ExprAST):
+ def __init__(self, slicc, left, op, right):
+ super(InfixOperatorExprAST, self).__init__(slicc)
+
+ self.left = left
+ self.op = op
+ self.right = right
+
+ def __repr__(self):
+ return "[InfixExpr: %r %s %r]" % (self.left, self.op, self.right)
+
+ def generate(self, code):
+ lcode = self.slicc.codeFormatter()
+ rcode = self.slicc.codeFormatter()
+
+ ltype = self.left.generate(lcode)
+ rtype = self.right.generate(rcode)
+
+ # Figure out what the input and output types should be
+ if self.op in ("==", "!=", ">=", "<=", ">", "<"):
+ output = "bool"
+ if (ltype != rtype):
+ self.error("Type mismatch: left and right operands of " +
+ "operator '%s' must be the same type. " +
+ "left: '%s', right: '%s'",
+ self.op, ltype, rtype)
+ else:
+ expected_types = []
+ output = None
+
+ if self.op in ("&&", "||"):
+ # boolean inputs and output
+ expected_types = [("bool", "bool", "bool")]
+ elif self.op in ("<<", ">>"):
+ expected_types = [("int", "int", "int"),
+ ("Cycles", "int", "Cycles")]
+ elif self.op in ("+", "-", "*", "/"):
+ expected_types = [("int", "int", "int"),
+ ("Cycles", "Cycles", "Cycles"),
+ ("Cycles", "int", "Cycles"),
+ ("Scalar", "int", "Scalar"),
+ ("int", "Cycles", "Cycles")]
+ else:
+ self.error("No operator matched with {0}!" .format(self.op))
+
+ for expected_type in expected_types:
+ left_input_type = self.symtab.find(expected_type[0], Type)
+ right_input_type = self.symtab.find(expected_type[1], Type)
+
+ if (left_input_type == ltype) and (right_input_type == rtype):
+ output = expected_type[2]
+
+ if output == None:
+ self.error("Type mismatch: operands ({0}, {1}) for operator " \
+ "'{2}' failed to match with the expected types" .
+ format(ltype, rtype, self.op))
+
+ # All is well
+ fix = code.nofix()
+ code("($lcode ${{self.op}} $rcode)")
+ code.fix(fix)
+ return self.symtab.find(output, Type)
+
+class PrefixOperatorExprAST(ExprAST):
+ def __init__(self, slicc, op, operand):
+ super(PrefixOperatorExprAST, self).__init__(slicc)
+
+ self.op = op
+ self.operand = operand
+
+ def __repr__(self):
+ return "[PrefixExpr: %s %r]" % (self.op, self.operand)
+
+ def generate(self, code):
+ opcode = self.slicc.codeFormatter()
+ optype = self.operand.generate(opcode)
+
+ fix = code.nofix()
+ code("(${{self.op}} $opcode)")
+ code.fix(fix)
+
+ return self.symtab.find("void", Type)
from slicc.ast.FuncDeclAST import *
from slicc.ast.IfStatementAST import *
from slicc.ast.InPortDeclAST import *
-from slicc.ast.InfixOperatorExprAST import *
from slicc.ast.IsValidPtrExprAST import *
from slicc.ast.LiteralExprAST import *
from slicc.ast.LocalVariableAST import *
from slicc.ast.NewExprAST import *
from slicc.ast.OodAST import *
from slicc.ast.ObjDeclAST import *
+from slicc.ast.OperatorExprAST import *
from slicc.ast.OutPortDeclAST import *
from slicc.ast.PairAST import *
from slicc.ast.PairListAST import *
'LEFTSHIFT', 'RIGHTSHIFT',
'NOT', 'AND', 'OR',
'PLUS', 'DASH', 'STAR', 'SLASH',
+ 'INCR', 'DECR',
'DOUBLE_COLON', 'SEMI',
'ASSIGN', 'DOT',
'IDENT', 'LIT_BOOL', 'FLOATNUMBER', 'NUMBER', 'STRING' ]
t_SEMI = r';'
t_ASSIGN = r':='
t_DOT = r'\.'
+ t_INCR = r'\+\+'
+ t_DECR = r'--'
precedence = (
+ ('left', 'INCR', 'DECR'),
('left', 'AND', 'OR'),
('left', 'EQ', 'NE'),
('left', 'LT', 'GT', 'LE', 'GE'),
# FIXME - unary not
def p_expr__unary_op(self, p):
"""expr : NOT expr
+ | INCR expr
+ | DECR expr
| DASH expr %prec UMINUS"""
- p[0] = PrefixOperatorExpr(p[1], p[2])
+ p[0] = ast.PrefixOperatorExprAST(self, p[1], p[2])
def p_expr__parens(self, p):
"aexpr : '(' expr ')'"
# them. Print out these stats before dumping state transition stats.
#
for param in self.config_parameters:
- if param.type_ast.type.ident == "CacheMemory" or \
- param.type_ast.type.ident == "DirectoryMemory" or \
+ if param.type_ast.type.ident == "DirectoryMemory" or \
param.type_ast.type.ident == "MemoryControl":
assert(param.pointer)
code(' m_${{param.ident}}_ptr->printStats(out);')
# them. These stats must be cleared too.
#
for param in self.config_parameters:
- if param.type_ast.type.ident == "CacheMemory" or \
- param.type_ast.type.ident == "MemoryControl":
+ if param.type_ast.type.ident == "MemoryControl":
assert(param.pointer)
code(' m_${{param.ident}}_ptr->clearStats();')