99cefb771cd260c1c8538761326b263da5ccb53d
2 * Copyright (c) 2015, 2019 ARM Limited
5 * The license below extends only to copyright in the software and shall
6 * not be construed as granting a license to any other intellectual
7 * property including but not limited to intellectual property relating
8 * to a hardware implementation of the functionality of the software
9 * licensed hereunder. You may use the software subject to the license
10 * terms below provided that you ensure that this notice is replicated
11 * unmodified and in its entirety in all distributions of the software,
12 * modified or unmodified, in source code or in binary form.
14 * Copyright (c) 2002-2005 The Regents of The University of Michigan
15 * All rights reserved.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions are
19 * met: redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer;
21 * redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution;
24 * neither the name of the copyright holders nor the names of its
25 * contributors may be used to endorse or promote products derived from
26 * this software without specific prior written permission.
28 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "cpu/testers/memtest/memtest.hh"
43 #include "base/random.hh"
44 #include "base/statistics.hh"
45 #include "base/trace.hh"
46 #include "debug/MemTest.hh"
47 #include "sim/sim_exit.hh"
48 #include "sim/stats.hh"
49 #include "sim/system.hh"
53 unsigned int TESTER_ALLOCATOR
= 0;
56 MemTest::CpuPort::recvTimingResp(PacketPtr pkt
)
58 memtest
.completeRequest(pkt
);
63 MemTest::CpuPort::recvReqRetry()
69 MemTest::sendPkt(PacketPtr pkt
) {
74 if (!port
.sendTimingReq(pkt
)) {
82 MemTest::MemTest(const Params
&p
)
84 tickEvent([this]{ tick(); }, name()),
85 noRequestEvent([this]{ noRequest(); }, name()),
86 noResponseEvent([this]{ noResponse(); }, name()),
91 percentReads(p
.percent_reads
),
92 percentFunctional(p
.percent_functional
),
93 percentUncacheable(p
.percent_uncacheable
),
94 requestorId(p
.system
->getRequestorId(this)),
95 blockSize(p
.system
->cacheLineSize()),
96 blockAddrMask(blockSize
- 1),
97 progressInterval(p
.progress_interval
),
98 progressCheck(p
.progress_check
),
99 nextProgressMessage(p
.progress_interval
),
100 maxLoads(p
.max_loads
),
101 atomic(p
.system
->isAtomicMode()),
102 suppressFuncErrors(p
.suppress_func_errors
), stats(this)
104 id
= TESTER_ALLOCATOR
++;
105 fatal_if(id
>= blockSize
, "Too many testers, only %d allowed\n",
108 baseAddr1
= 0x100000;
109 baseAddr2
= 0x400000;
110 uncacheAddr
= 0x800000;
116 // kick things into action
117 schedule(tickEvent
, curTick());
118 schedule(noRequestEvent
, clockEdge(progressCheck
));
122 MemTest::getPort(const std::string
&if_name
, PortID idx
)
124 if (if_name
== "port")
127 return ClockedObject::getPort(if_name
, idx
);
131 MemTest::completeRequest(PacketPtr pkt
, bool functional
)
133 const RequestPtr
&req
= pkt
->req
;
134 assert(req
->getSize() == 1);
136 // this address is no longer outstanding
137 auto remove_addr
= outstandingAddrs
.find(req
->getPaddr());
138 assert(remove_addr
!= outstandingAddrs
.end());
139 outstandingAddrs
.erase(remove_addr
);
141 DPRINTF(MemTest
, "Completing %s at address %x (blk %x) %s\n",
142 pkt
->isWrite() ? "write" : "read",
143 req
->getPaddr(), blockAlign(req
->getPaddr()),
144 pkt
->isError() ? "error" : "success");
146 const uint8_t *pkt_data
= pkt
->getConstPtr
<uint8_t>();
148 if (pkt
->isError()) {
149 if (!functional
|| !suppressFuncErrors
)
150 panic( "%s access failed at %#x\n",
151 pkt
->isWrite() ? "Write" : "Read", req
->getPaddr());
154 uint8_t ref_data
= referenceData
[req
->getPaddr()];
155 if (pkt_data
[0] != ref_data
) {
156 panic("%s: read of %x (blk %x) @ cycle %d "
157 "returns %x, expected %x\n", name(),
158 req
->getPaddr(), blockAlign(req
->getPaddr()), curTick(),
159 pkt_data
[0], ref_data
);
165 if (numReads
== (uint64_t)nextProgressMessage
) {
166 ccprintf(cerr
, "%s: completed %d read, %d write accesses @%d\n",
167 name(), numReads
, numWrites
, curTick());
168 nextProgressMessage
+= progressInterval
;
171 if (maxLoads
!= 0 && numReads
>= maxLoads
)
172 exitSimLoop("maximum number of loads reached");
174 assert(pkt
->isWrite());
176 // update the reference data
177 referenceData
[req
->getPaddr()] = pkt_data
[0];
183 // the packet will delete the data
186 // finally shift the response timeout forward if we are still
187 // expecting responses; deschedule it otherwise
188 if (outstandingAddrs
.size() != 0)
189 reschedule(noResponseEvent
, clockEdge(progressCheck
));
190 else if (noResponseEvent
.scheduled())
191 deschedule(noResponseEvent
);
193 MemTest::MemTestStats::MemTestStats(Stats::Group
*parent
)
194 : Stats::Group(parent
),
195 ADD_STAT(numReads
, "number of read accesses completed"),
196 ADD_STAT(numWrites
, "number of write accesses completed")
204 // we should never tick if we are waiting for a retry
207 // create a new request
208 unsigned cmd
= random_mt
.random(0, 100);
209 uint8_t data
= random_mt
.random
<uint8_t>();
210 bool uncacheable
= random_mt
.random(0, 100) < percentUncacheable
;
211 unsigned base
= random_mt
.random(0, 1);
212 Request::Flags flags
;
215 // generate a unique address
217 unsigned offset
= random_mt
.random
<unsigned>(0, size
- 1);
219 // use the tester id as offset within the block for false sharing
220 offset
= blockAlign(offset
);
224 flags
.set(Request::UNCACHEABLE
);
225 paddr
= uncacheAddr
+ offset
;
227 paddr
= ((base
) ? baseAddr1
: baseAddr2
) + offset
;
229 } while (outstandingAddrs
.find(paddr
) != outstandingAddrs
.end());
231 bool do_functional
= (random_mt
.random(0, 100) < percentFunctional
) &&
233 RequestPtr req
= std::make_shared
<Request
>(paddr
, 1, flags
, requestorId
);
236 outstandingAddrs
.insert(paddr
);
239 panic_if(outstandingAddrs
.size() > 100,
240 "Tester %s has more than 100 outstanding requests\n", name());
242 PacketPtr pkt
= nullptr;
243 uint8_t *pkt_data
= new uint8_t[1];
245 if (cmd
< percentReads
) {
246 // start by ensuring there is a reference value if we have not
247 // seen this address before
248 M5_VAR_USED
uint8_t ref_data
= 0;
249 auto ref
= referenceData
.find(req
->getPaddr());
250 if (ref
== referenceData
.end()) {
251 referenceData
[req
->getPaddr()] = 0;
253 ref_data
= ref
->second
;
257 "Initiating %sread at addr %x (blk %x) expecting %x\n",
258 do_functional
? "functional " : "", req
->getPaddr(),
259 blockAlign(req
->getPaddr()), ref_data
);
261 pkt
= new Packet(req
, MemCmd::ReadReq
);
262 pkt
->dataDynamic(pkt_data
);
264 DPRINTF(MemTest
, "Initiating %swrite at addr %x (blk %x) value %x\n",
265 do_functional
? "functional " : "", req
->getPaddr(),
266 blockAlign(req
->getPaddr()), data
);
268 pkt
= new Packet(req
, MemCmd::WriteReq
);
269 pkt
->dataDynamic(pkt_data
);
273 // there is no point in ticking if we are waiting for a retry
274 bool keep_ticking
= true;
276 pkt
->setSuppressFuncError();
277 port
.sendFunctional(pkt
);
278 completeRequest(pkt
, true);
280 keep_ticking
= sendPkt(pkt
);
284 // schedule the next tick
285 schedule(tickEvent
, clockEdge(interval
));
287 // finally shift the timeout for sending of requests forwards
288 // as we have successfully sent a packet
289 reschedule(noRequestEvent
, clockEdge(progressCheck
), true);
291 DPRINTF(MemTest
, "Waiting for retry\n");
294 // Schedule noResponseEvent now if we are expecting a response
295 if (!noResponseEvent
.scheduled() && (outstandingAddrs
.size() != 0))
296 schedule(noResponseEvent
, clockEdge(progressCheck
));
302 panic("%s did not send a request for %d cycles", name(), progressCheck
);
306 MemTest::noResponse()
308 panic("%s did not see a response for %d cycles", name(), progressCheck
);
315 if (port
.sendTimingReq(retryPkt
)) {
316 DPRINTF(MemTest
, "Proceeding after successful retry\n");
319 // kick things into action again
320 schedule(tickEvent
, clockEdge(interval
));
321 reschedule(noRequestEvent
, clockEdge(progressCheck
), true);
326 MemTestParams::create() const
328 return new MemTest(*this);