This patch improves performance by as much as 10% on some spec benchmarks.
LSQDepCheckShift = Param.Unsigned(4, "Number of places to shift addr before check")
LSQCheckLoads = Param.Bool(True,
"Should dependency violations be checked for loads & stores or just stores")
+ store_set_clear_period = Param.Unsigned(250000,
+ "Number of load/store insts before the dep predictor should be invalidated")
LFSTSize = Param.Unsigned(1024, "Last fetched store table size")
SSITSize = Param.Unsigned(1024, "Store set ID table size")
template <class MemDepPred, class Impl>
MemDepUnit<MemDepPred, Impl>::MemDepUnit(DerivO3CPUParams *params)
: _name(params->name + ".memdepunit"),
- depPred(params->SSITSize, params->LFSTSize), loadBarrier(false),
- loadBarrierSN(0), storeBarrier(false), storeBarrierSN(0), iqPtr(NULL)
+ depPred(params->store_set_clear_period, params->SSITSize,
+ params->LFSTSize),
+ loadBarrier(false), loadBarrierSN(0), storeBarrier(false),
+ storeBarrierSN(0), iqPtr(NULL)
{
DPRINTF(MemDepUnit, "Creating MemDepUnit object.\n");
}
_name = csprintf("%s.memDep%d", params->name, tid);
id = tid;
- depPred.init(params->SSITSize, params->LFSTSize);
+ depPred.init(params->store_set_clear_period, params->SSITSize,
+ params->LFSTSize);
}
template <class MemDepPred, class Impl>
#include "cpu/o3/store_set.hh"
#include "debug/StoreSet.hh"
-StoreSet::StoreSet(int _SSIT_size, int _LFST_size)
- : SSITSize(_SSIT_size), LFSTSize(_LFST_size)
+StoreSet::StoreSet(uint64_t clear_period, int _SSIT_size, int _LFST_size)
+ : clearPeriod(clear_period), SSITSize(_SSIT_size), LFSTSize(_LFST_size)
{
DPRINTF(StoreSet, "StoreSet: Creating store set object.\n");
DPRINTF(StoreSet, "StoreSet: SSIT size: %i, LFST size: %i.\n",
indexMask = SSITSize - 1;
offsetBits = 2;
+
+ memOpsPred = 0;
}
StoreSet::~StoreSet()
}
void
-StoreSet::init(int _SSIT_size, int _LFST_size)
+StoreSet::init(uint64_t clear_period, int _SSIT_size, int _LFST_size)
{
SSITSize = _SSIT_size;
LFSTSize = _LFST_size;
+ clearPeriod = clear_period;
DPRINTF(StoreSet, "StoreSet: Creating store set object.\n");
DPRINTF(StoreSet, "StoreSet: SSIT size: %i, LFST size: %i.\n",
indexMask = SSITSize - 1;
offsetBits = 2;
+
+ memOpsPred = 0;
}
}
}
+void
+StoreSet::checkClear()
+{
+ memOpsPred++;
+ if (memOpsPred > clearPeriod) {
+ DPRINTF(StoreSet, "Wiping predictor state beacuse %d ld/st executed\n",
+ clearPeriod);
+ memOpsPred = 0;
+ clear();
+ }
+}
+
void
StoreSet::insertLoad(Addr load_PC, InstSeqNum load_seq_num)
{
+ checkClear();
// Does nothing.
return;
}
int store_SSID;
+ checkClear();
assert(index < SSITSize);
if (!validSSIT[index]) {
StoreSet() { };
/** Creates store set predictor with given table sizes. */
- StoreSet(int SSIT_size, int LFST_size);
+ StoreSet(uint64_t clear_period, int SSIT_size, int LFST_size);
/** Default destructor. */
~StoreSet();
/** Initializes the store set predictor with the given table sizes. */
- void init(int SSIT_size, int LFST_size);
+ void init(uint64_t clear_period, int SSIT_size, int LFST_size);
/** Records a memory ordering violation between the younger load
* and the older store. */
void violation(Addr store_PC, Addr load_PC);
+ /** Clears the store set predictor every so often so that all the
+ * entries aren't used and stores are constantly predicted as
+ * conflicting.
+ */
+ void checkClear();
+
/** Inserts a load into the store set predictor. This does nothing but
* is included in case other predictors require a similar function.
*/
typedef std::map<InstSeqNum, int, ltseqnum>::iterator SeqNumMapIt;
+ /** Number of loads/stores to process before wiping predictor so all
+ * entries don't get saturated
+ */
+ uint64_t clearPeriod;
+
/** Store Set ID Table size, in entries. */
int SSITSize;
// HACK: Hardcoded for now.
int offsetBits;
+
+ /** Number of memory operations predicted since last clear of predictor */
+ int memOpsPred;
};
#endif // __CPU_O3_STORE_SET_HH__