From fc09bc8678b5e78d553e009105c58e5c5d5befb4 Mon Sep 17 00:00:00 2001 From: Andreas Hansson Date: Thu, 30 May 2013 12:54:06 -0400 Subject: [PATCH] cpu: Add request elasticity to the traffic generator This patch adds an optional request elasticity to the traffic generator, effectievly compensating for it in the case of the linear and random generators, and adding it in the case of the trace generator. The accounting is left with the top-level traffic generator, and the individual generators do the necessary math as part of determining the next packet tick. Note that in the linear and random generators we have to compensate for the blocked time to not be elastic, i.e. without this patch the aforementioned generators will slow down in the case of back-pressure. --- src/cpu/testers/traffic_gen/TrafficGen.py | 6 ++++ src/cpu/testers/traffic_gen/generators.cc | 42 ++++++++++++++++++---- src/cpu/testers/traffic_gen/generators.hh | 18 ++++++---- src/cpu/testers/traffic_gen/traffic_gen.cc | 13 ++++--- src/cpu/testers/traffic_gen/traffic_gen.hh | 6 ++++ 5 files changed, 66 insertions(+), 19 deletions(-) diff --git a/src/cpu/testers/traffic_gen/TrafficGen.py b/src/cpu/testers/traffic_gen/TrafficGen.py index 916279f91..f29cedb3a 100644 --- a/src/cpu/testers/traffic_gen/TrafficGen.py +++ b/src/cpu/testers/traffic_gen/TrafficGen.py @@ -71,3 +71,9 @@ class TrafficGen(MemObject): # System used to determine the mode of the memory system system = Param.System(Parent.any, "System this generator is part of") + + # Should requests respond to back-pressure or not, if true, the + # rate of the traffic generator will be slowed down if requests + # are not immediately accepted + elastic_req = Param.Bool(False, + "Slow down requests in case of backpressure") diff --git a/src/cpu/testers/traffic_gen/generators.cc b/src/cpu/testers/traffic_gen/generators.cc index f9556b2b3..8a03e21d0 100644 --- a/src/cpu/testers/traffic_gen/generators.cc +++ b/src/cpu/testers/traffic_gen/generators.cc @@ -112,7 +112,7 @@ LinearGen::getNextPacket() } Tick -LinearGen::nextPacketTick() const +LinearGen::nextPacketTick(bool elastic, Tick delay) const { // Check to see if we have reached the data limit. If dataLimit is // zero we do not have a data limit and therefore we will keep @@ -123,7 +123,19 @@ LinearGen::nextPacketTick() const return MaxTick; } else { // return the time when the next request should take place - return curTick() + random_mt.random(minPeriod, maxPeriod); + Tick wait = random_mt.random(minPeriod, maxPeriod); + + // compensate for the delay experienced to not be elastic, by + // default the value we generate is from the time we are + // asked, so the elasticity happens automatically + if (!elastic) { + if (wait < delay) + wait = 0; + else + wait -= delay; + } + + return curTick() + wait; } } @@ -162,7 +174,7 @@ RandomGen::getNextPacket() } Tick -RandomGen::nextPacketTick() const +RandomGen::nextPacketTick(bool elastic, Tick delay) const { // Check to see if we have reached the data limit. If dataLimit is // zero we do not have a data limit and therefore we will keep @@ -173,8 +185,20 @@ RandomGen::nextPacketTick() const // No more requests. Return MaxTick. return MaxTick; } else { - // Return the time when the next request should take place. - return curTick() + random_mt.random(minPeriod, maxPeriod); + // return the time when the next request should take place + Tick wait = random_mt.random(minPeriod, maxPeriod); + + // compensate for the delay experienced to not be elastic, by + // default the value we generate is from the time we are + // asked, so the elasticity happens automatically + if (!elastic) { + if (wait < delay) + wait = 0; + else + wait -= delay; + } + + return curTick() + wait; } } @@ -217,7 +241,7 @@ TraceGen::InputStream::read(TraceElement& element) } Tick -TraceGen::nextPacketTick() const +TraceGen::nextPacketTick(bool elastic, Tick delay) const { if (traceComplete) { DPRINTF(TrafficGen, "No next tick as trace is finished\n"); @@ -232,7 +256,11 @@ TraceGen::nextPacketTick() const DPRINTF(TrafficGen, "Next packet tick is %d\n", tickOffset + nextElement.tick); - return tickOffset + nextElement.tick; + // if the playback is supposed to be elastic, add the delay + if (elastic) + tickOffset += delay; + + return std::max(tickOffset + nextElement.tick, curTick()); } void diff --git a/src/cpu/testers/traffic_gen/generators.hh b/src/cpu/testers/traffic_gen/generators.hh index 2b86afa22..dd3706a8f 100644 --- a/src/cpu/testers/traffic_gen/generators.hh +++ b/src/cpu/testers/traffic_gen/generators.hh @@ -124,9 +124,11 @@ class BaseGen * means that there will not be any further packets in the current * activation cycle of the generator. * + * @param elastic should the injection respond to flow control or not + * @param delay time the previous packet spent waiting * @return next tick when a packet is available */ - virtual Tick nextPacketTick() const = 0; + virtual Tick nextPacketTick(bool elastic, Tick delay) const = 0; }; @@ -146,7 +148,7 @@ class IdleGen : public BaseGen PacketPtr getNextPacket() { return NULL; } - Tick nextPacketTick() const { return MaxTick; } + Tick nextPacketTick(bool elastic, Tick delay) const { return MaxTick; } }; /** @@ -192,7 +194,7 @@ class LinearGen : public BaseGen PacketPtr getNextPacket(); - Tick nextPacketTick() const; + Tick nextPacketTick(bool elastic, Tick delay) const; private: @@ -269,7 +271,7 @@ class RandomGen : public BaseGen PacketPtr getNextPacket(); - Tick nextPacketTick() const; + Tick nextPacketTick(bool elastic, Tick delay) const; private: @@ -403,6 +405,7 @@ class TraceGen : public BaseGen const std::string& trace_file, Addr addr_offset) : BaseGen(_name, master_id, _duration), trace(trace_file), + tickOffset(0), addrOffset(addr_offset), traceComplete(false) { @@ -419,7 +422,7 @@ class TraceGen : public BaseGen * the end of the file has been reached, it returns MaxTick to * indicate that there will be no more requests. */ - Tick nextPacketTick() const; + Tick nextPacketTick(bool elastic, Tick delay) const; private: @@ -432,9 +435,10 @@ class TraceGen : public BaseGen /** * Stores the time when the state was entered. This is to add an - * offset to the times stored in the trace file. + * offset to the times stored in the trace file. This is mutable + * to allow us to change it as part of nextPacketTick. */ - Tick tickOffset; + mutable Tick tickOffset; /** * Offset for memory requests. Used to shift the trace diff --git a/src/cpu/testers/traffic_gen/traffic_gen.cc b/src/cpu/testers/traffic_gen/traffic_gen.cc index 8916dcb8d..f5835f8f4 100644 --- a/src/cpu/testers/traffic_gen/traffic_gen.cc +++ b/src/cpu/testers/traffic_gen/traffic_gen.cc @@ -55,6 +55,7 @@ TrafficGen::TrafficGen(const TrafficGenParams* p) system(p->system), masterID(system->getMasterId(name())), configFile(p->config_file), + elasticReq(p->elastic_req), nextTransitionTick(0), nextPacketTick(0), port(name() + ".port", *this), @@ -107,7 +108,7 @@ TrafficGen::initState() // when not restoring from a checkpoint, make sure we kick things off if (system->isTimingMode()) { // call nextPacketTick on the state to advance it - nextPacketTick = states[currState]->nextPacketTick(); + nextPacketTick = states[currState]->nextPacketTick(elasticReq, 0); schedule(updateEvent, std::min(nextPacketTick, nextTransitionTick)); } else { DPRINTF(TrafficGen, @@ -165,7 +166,7 @@ TrafficGen::unserialize(Checkpoint* cp, const string& section) // @todo In the case of a stateful generator state such as the // trace player we would also have to restore the position in the - // trace playback + // trace playback and the tick offset UNSERIALIZE_SCALAR(currState); } @@ -193,7 +194,7 @@ TrafficGen::update() if (retryPkt == NULL) { // schedule next update event based on either the next execute // tick or the next transition, which ever comes first - nextPacketTick = states[currState]->nextPacketTick(); + nextPacketTick = states[currState]->nextPacketTick(elasticReq, 0); Tick nextEventTick = std::min(nextPacketTick, nextTransitionTick); DPRINTF(TrafficGen, "Next event scheduled at %lld\n", nextEventTick); schedule(updateEvent, nextEventTick); @@ -386,14 +387,16 @@ TrafficGen::recvRetry() if (port.sendTimingReq(retryPkt)) { retryPkt = NULL; // remember how much delay was incurred due to back-pressure - // when sending the request + // when sending the request, we also use this to derive + // the tick for the next packet Tick delay = curTick() - retryPktTick; retryPktTick = 0; retryTicks += delay; if (drainManager == NULL) { // packet is sent, so find out when the next one is due - nextPacketTick = states[currState]->nextPacketTick(); + nextPacketTick = states[currState]->nextPacketTick(elasticReq, + delay); Tick nextEventTick = std::min(nextPacketTick, nextTransitionTick); schedule(updateEvent, std::max(curTick(), nextEventTick)); } else { diff --git a/src/cpu/testers/traffic_gen/traffic_gen.hh b/src/cpu/testers/traffic_gen/traffic_gen.hh index 0adcf781e..91460053a 100644 --- a/src/cpu/testers/traffic_gen/traffic_gen.hh +++ b/src/cpu/testers/traffic_gen/traffic_gen.hh @@ -116,6 +116,12 @@ class TrafficGen : public MemObject */ const std::string configFile; + /** + * Determine whether to add elasticity in the request injection, + * thus responding to backpressure by slowing things down. + */ + const bool elasticReq; + /** Time of next transition */ Tick nextTransitionTick; -- 2.30.2