params: Deprecate old-style constructors; update most SimObject constructors.
[gem5.git] / src / cpu / memtest / memtest.cc
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Steve Reinhardt
30 */
31
32 // FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
33
34 #include <iomanip>
35 #include <set>
36 #include <string>
37 #include <vector>
38
39 #include "base/misc.hh"
40 #include "base/statistics.hh"
41 #include "cpu/memtest/memtest.hh"
42 //#include "cpu/simple_thread.hh"
43 //#include "mem/cache/base_cache.hh"
44 #include "mem/mem_object.hh"
45 #include "mem/port.hh"
46 #include "mem/packet.hh"
47 //#include "mem/physical.hh"
48 #include "mem/request.hh"
49 #include "sim/sim_events.hh"
50 #include "sim/stats.hh"
51
52 using namespace std;
53
54 int TESTER_ALLOCATOR=0;
55
56 bool
57 MemTest::CpuPort::recvTiming(PacketPtr pkt)
58 {
59 if (pkt->isResponse()) {
60 memtest->completeRequest(pkt);
61 } else {
62 // must be snoop upcall
63 assert(pkt->isRequest());
64 assert(pkt->getDest() == Packet::Broadcast);
65 }
66 return true;
67 }
68
69 Tick
70 MemTest::CpuPort::recvAtomic(PacketPtr pkt)
71 {
72 // must be snoop upcall
73 assert(pkt->isRequest());
74 assert(pkt->getDest() == Packet::Broadcast);
75 return curTick;
76 }
77
78 void
79 MemTest::CpuPort::recvFunctional(PacketPtr pkt)
80 {
81 //Do nothing if we see one come through
82 // if (curTick != 0)//Supress warning durring initialization
83 // warn("Functional Writes not implemented in MemTester\n");
84 //Need to find any response values that intersect and update
85 return;
86 }
87
88 void
89 MemTest::CpuPort::recvStatusChange(Status status)
90 {
91 if (status == RangeChange) {
92 if (!snoopRangeSent) {
93 snoopRangeSent = true;
94 sendStatusChange(Port::RangeChange);
95 }
96 return;
97 }
98
99 panic("MemTest doesn't expect recvStatusChange callback!");
100 }
101
102 void
103 MemTest::CpuPort::recvRetry()
104 {
105 memtest->doRetry();
106 }
107
108 void
109 MemTest::sendPkt(PacketPtr pkt) {
110 if (atomic) {
111 cachePort.sendAtomic(pkt);
112 completeRequest(pkt);
113 }
114 else if (!cachePort.sendTiming(pkt)) {
115 accessRetry = true;
116 retryPkt = pkt;
117 }
118
119 }
120
121 MemTest::MemTest(const Params *p)
122 : MemObject(p),
123 tickEvent(this),
124 cachePort("test", this),
125 funcPort("functional", this),
126 retryPkt(NULL),
127 // mainMem(main_mem),
128 // checkMem(check_mem),
129 size(p->memory_size),
130 percentReads(p->percent_reads),
131 percentFunctional(p->percent_functional),
132 percentUncacheable(p->percent_uncacheable),
133 progressInterval(p->progress_interval),
134 nextProgressMessage(p->progress_interval),
135 percentSourceUnaligned(p->percent_source_unaligned),
136 percentDestUnaligned(p->percent_dest_unaligned),
137 maxLoads(p->max_loads),
138 atomic(p->atomic)
139 {
140 vector<string> cmd;
141 cmd.push_back("/bin/ls");
142 vector<string> null_vec;
143 // thread = new SimpleThread(NULL, 0, NULL, 0, mainMem);
144 curTick = 0;
145
146 cachePort.snoopRangeSent = false;
147 funcPort.snoopRangeSent = true;
148
149 // Needs to be masked off once we know the block size.
150 traceBlockAddr = p->trace_addr;
151 baseAddr1 = 0x100000;
152 baseAddr2 = 0x400000;
153 uncacheAddr = 0x800000;
154
155 // set up counters
156 noResponseCycles = 0;
157 numReads = 0;
158 tickEvent.schedule(0);
159
160 id = TESTER_ALLOCATOR++;
161
162 accessRetry = false;
163 }
164
165 Port *
166 MemTest::getPort(const std::string &if_name, int idx)
167 {
168 if (if_name == "functional")
169 return &funcPort;
170 else if (if_name == "test")
171 return &cachePort;
172 else
173 panic("No Such Port\n");
174 }
175
176 void
177 MemTest::init()
178 {
179 // By the time init() is called, the ports should be hooked up.
180 blockSize = cachePort.peerBlockSize();
181 blockAddrMask = blockSize - 1;
182 traceBlockAddr = blockAddr(traceBlockAddr);
183
184 // initial memory contents for both physical memory and functional
185 // memory should be 0; no need to initialize them.
186 }
187
188
189 void
190 MemTest::completeRequest(PacketPtr pkt)
191 {
192 Request *req = pkt->req;
193
194 DPRINTF(MemTest, "completing %s at address %x (blk %x)\n",
195 pkt->isWrite() ? "write" : "read",
196 req->getPaddr(), blockAddr(req->getPaddr()));
197
198 MemTestSenderState *state =
199 dynamic_cast<MemTestSenderState *>(pkt->senderState);
200
201 uint8_t *data = state->data;
202 uint8_t *pkt_data = pkt->getPtr<uint8_t>();
203
204 //Remove the address from the list of outstanding
205 std::set<unsigned>::iterator removeAddr =
206 outstandingAddrs.find(req->getPaddr());
207 assert(removeAddr != outstandingAddrs.end());
208 outstandingAddrs.erase(removeAddr);
209
210 switch (pkt->cmd.toInt()) {
211 case MemCmd::ReadResp:
212
213 if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
214 panic("%s: read of %x (blk %x) @ cycle %d "
215 "returns %x, expected %x\n", name(),
216 req->getPaddr(), blockAddr(req->getPaddr()), curTick,
217 *pkt_data, *data);
218 }
219
220 numReads++;
221 numReadsStat++;
222
223 if (numReads == nextProgressMessage) {
224 ccprintf(cerr, "%s: completed %d read accesses @%d\n",
225 name(), numReads, curTick);
226 nextProgressMessage += progressInterval;
227 }
228
229 if (maxLoads != 0 && numReads >= maxLoads)
230 exitSimLoop("maximum number of loads reached");
231 break;
232
233 case MemCmd::WriteResp:
234 numWritesStat++;
235 break;
236
237 default:
238 panic("invalid command %s (%d)", pkt->cmdString(), pkt->cmd.toInt());
239 }
240
241 noResponseCycles = 0;
242 delete state;
243 delete [] data;
244 delete pkt->req;
245 delete pkt;
246 }
247
248 void
249 MemTest::regStats()
250 {
251 using namespace Stats;
252
253 numReadsStat
254 .name(name() + ".num_reads")
255 .desc("number of read accesses completed")
256 ;
257
258 numWritesStat
259 .name(name() + ".num_writes")
260 .desc("number of write accesses completed")
261 ;
262
263 numCopiesStat
264 .name(name() + ".num_copies")
265 .desc("number of copy accesses completed")
266 ;
267 }
268
269 void
270 MemTest::tick()
271 {
272 if (!tickEvent.scheduled())
273 tickEvent.schedule(curTick + cycles(1));
274
275 if (++noResponseCycles >= 500000) {
276 cerr << name() << ": deadlocked at cycle " << curTick << endl;
277 fatal("");
278 }
279
280 if (accessRetry) {
281 return;
282 }
283
284 //make new request
285 unsigned cmd = random() % 100;
286 unsigned offset = random() % size;
287 unsigned base = random() % 2;
288 uint64_t data = random();
289 unsigned access_size = random() % 4;
290 unsigned cacheable = random() % 100;
291
292 //If we aren't doing copies, use id as offset, and do a false sharing
293 //mem tester
294 //We can eliminate the lower bits of the offset, and then use the id
295 //to offset within the blks
296 offset = blockAddr(offset);
297 offset += id;
298 access_size = 0;
299
300 Request *req = new Request();
301 uint32_t flags = 0;
302 Addr paddr;
303
304 if (cacheable < percentUncacheable) {
305 flags |= UNCACHEABLE;
306 paddr = uncacheAddr + offset;
307 } else {
308 paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
309 }
310 bool probe = (random() % 100 < percentFunctional) && !(flags & UNCACHEABLE);
311 //bool probe = false;
312
313 paddr &= ~((1 << access_size) - 1);
314 req->setPhys(paddr, 1 << access_size, flags);
315 req->setThreadContext(id,0);
316
317 uint8_t *result = new uint8_t[8];
318
319 if (cmd < percentReads) {
320 // read
321
322 // For now we only allow one outstanding request per address
323 // per tester This means we assume CPU does write forwarding
324 // to reads that alias something in the cpu store buffer.
325 if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
326 delete [] result;
327 delete req;
328 return;
329 }
330
331 outstandingAddrs.insert(paddr);
332
333 // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
334 funcPort.readBlob(req->getPaddr(), result, req->getSize());
335
336 DPRINTF(MemTest,
337 "initiating read at address %x (blk %x) expecting %x\n",
338 req->getPaddr(), blockAddr(req->getPaddr()), *result);
339
340 PacketPtr pkt = new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
341 pkt->setSrc(0);
342 pkt->dataDynamicArray(new uint8_t[req->getSize()]);
343 MemTestSenderState *state = new MemTestSenderState(result);
344 pkt->senderState = state;
345
346 if (probe) {
347 cachePort.sendFunctional(pkt);
348 completeRequest(pkt);
349 } else {
350 sendPkt(pkt);
351 }
352 } else {
353 // write
354
355 // For now we only allow one outstanding request per addreess
356 // per tester. This means we assume CPU does write forwarding
357 // to reads that alias something in the cpu store buffer.
358 if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) {
359 delete [] result;
360 delete req;
361 return;
362 }
363
364 outstandingAddrs.insert(paddr);
365
366 DPRINTF(MemTest, "initiating write at address %x (blk %x) value %x\n",
367 req->getPaddr(), blockAddr(req->getPaddr()), data & 0xff);
368
369 PacketPtr pkt = new Packet(req, MemCmd::WriteReq, Packet::Broadcast);
370 pkt->setSrc(0);
371 uint8_t *pkt_data = new uint8_t[req->getSize()];
372 pkt->dataDynamicArray(pkt_data);
373 memcpy(pkt_data, &data, req->getSize());
374 MemTestSenderState *state = new MemTestSenderState(result);
375 pkt->senderState = state;
376
377 funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize());
378
379 if (probe) {
380 cachePort.sendFunctional(pkt);
381 completeRequest(pkt);
382 } else {
383 sendPkt(pkt);
384 }
385 }
386 }
387
388 void
389 MemTest::doRetry()
390 {
391 if (cachePort.sendTiming(retryPkt)) {
392 accessRetry = false;
393 retryPkt = NULL;
394 }
395 }
396
397 MemTest *
398 MemTestParams::create()
399 {
400 return new MemTest(this);
401 }