Merge zizzer:/z/m5/Bitkeeper/newmem
[gem5.git] / src / cpu / memtest / memtest.cc
1 /*
2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 * Steve Reinhardt
30 */
31
32 // FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
33
34 #include <iomanip>
35 #include <set>
36 #include <string>
37 #include <vector>
38
39 #include "base/misc.hh"
40 #include "base/statistics.hh"
41 //#include "cpu/simple_thread.hh"
42 #include "cpu/memtest/memtest.hh"
43 //#include "mem/cache/base_cache.hh"
44 //#include "mem/physical.hh"
45 #include "sim/builder.hh"
46 #include "sim/sim_events.hh"
47 #include "sim/stats.hh"
48 #include "mem/packet.hh"
49 #include "mem/request.hh"
50 #include "mem/port.hh"
51 #include "mem/mem_object.hh"
52
53 using namespace std;
54
55 int TESTER_ALLOCATOR=0;
56
57 bool
58 MemTest::CpuPort::recvTiming(Packet *pkt)
59 {
60 memtest->completeRequest(pkt);
61 return true;
62 }
63
64 Tick
65 MemTest::CpuPort::recvAtomic(Packet *pkt)
66 {
67 panic("MemTest doesn't expect recvAtomic callback!");
68 return curTick;
69 }
70
71 void
72 MemTest::CpuPort::recvFunctional(Packet *pkt)
73 {
74 memtest->completeRequest(pkt);
75 }
76
77 void
78 MemTest::CpuPort::recvStatusChange(Status status)
79 {
80 if (status == RangeChange)
81 return;
82
83 panic("MemTest doesn't expect recvStatusChange callback!");
84 }
85
86 void
87 MemTest::CpuPort::recvRetry()
88 {
89 memtest->doRetry();
90 }
91
92 MemTest::MemTest(const string &name,
93 // MemInterface *_cache_interface,
94 // PhysicalMemory *main_mem,
95 // PhysicalMemory *check_mem,
96 unsigned _memorySize,
97 unsigned _percentReads,
98 // unsigned _percentCopies,
99 unsigned _percentUncacheable,
100 unsigned _progressInterval,
101 unsigned _percentSourceUnaligned,
102 unsigned _percentDestUnaligned,
103 Addr _traceAddr,
104 Counter _max_loads)
105 : MemObject(name),
106 tickEvent(this),
107 cachePort("test", this),
108 funcPort("functional", this),
109 retryPkt(NULL),
110 // mainMem(main_mem),
111 // checkMem(check_mem),
112 size(_memorySize),
113 percentReads(_percentReads),
114 // percentCopies(_percentCopies),
115 percentUncacheable(_percentUncacheable),
116 progressInterval(_progressInterval),
117 nextProgressMessage(_progressInterval),
118 percentSourceUnaligned(_percentSourceUnaligned),
119 percentDestUnaligned(percentDestUnaligned),
120 maxLoads(_max_loads)
121 {
122 vector<string> cmd;
123 cmd.push_back("/bin/ls");
124 vector<string> null_vec;
125 // thread = new SimpleThread(NULL, 0, NULL, 0, mainMem);
126 curTick = 0;
127
128 // Needs to be masked off once we know the block size.
129 traceBlockAddr = _traceAddr;
130 baseAddr1 = 0x100000;
131 baseAddr2 = 0x400000;
132 uncacheAddr = 0x800000;
133
134 // set up counters
135 noResponseCycles = 0;
136 numReads = 0;
137 tickEvent.schedule(0);
138
139 id = TESTER_ALLOCATOR++;
140
141 accessRetry = false;
142 }
143
144 Port *
145 MemTest::getPort(const std::string &if_name, int idx)
146 {
147 if (if_name == "functional")
148 return &funcPort;
149 else if (if_name == "test")
150 return &cachePort;
151 else
152 panic("No Such Port\n");
153 }
154
155 void
156 MemTest::init()
157 {
158 // By the time init() is called, the ports should be hooked up.
159 blockSize = cachePort.peerBlockSize();
160 blockAddrMask = blockSize - 1;
161 traceBlockAddr = blockAddr(traceBlockAddr);
162
163 // set up intial memory contents here
164
165 cachePort.memsetBlob(baseAddr1, 1, size);
166 funcPort.memsetBlob(baseAddr1, 1, size);
167 cachePort.memsetBlob(baseAddr2, 2, size);
168 funcPort.memsetBlob(baseAddr2, 2, size);
169 cachePort.memsetBlob(uncacheAddr, 3, size);
170 funcPort.memsetBlob(uncacheAddr, 3, size);
171 }
172
173 static void
174 printData(ostream &os, uint8_t *data, int nbytes)
175 {
176 os << hex << setfill('0');
177 // assume little-endian: print bytes from highest address to lowest
178 for (uint8_t *dp = data + nbytes - 1; dp >= data; --dp) {
179 os << setw(2) << (unsigned)*dp;
180 }
181 os << dec;
182 }
183
184 void
185 MemTest::completeRequest(Packet *pkt)
186 {
187 MemTestSenderState *state =
188 dynamic_cast<MemTestSenderState *>(pkt->senderState);
189
190 uint8_t *data = state->data;
191 uint8_t *pkt_data = pkt->getPtr<uint8_t>();
192 Request *req = pkt->req;
193
194 //Remove the address from the list of outstanding
195 std::set<unsigned>::iterator removeAddr = outstandingAddrs.find(req->getPaddr());
196 assert(removeAddr != outstandingAddrs.end());
197 outstandingAddrs.erase(removeAddr);
198
199 switch (pkt->cmd) {
200 case Packet::ReadResp:
201
202 if (memcmp(pkt_data, data, pkt->getSize()) != 0) {
203 cerr << name() << ": on read of 0x" << hex << req->getPaddr()
204 << " (0x" << hex << blockAddr(req->getPaddr()) << ")"
205 << "@ cycle " << dec << curTick
206 << ", cache returns 0x";
207 printData(cerr, pkt_data, pkt->getSize());
208 cerr << ", expected 0x";
209 printData(cerr, data, pkt->getSize());
210 cerr << endl;
211 fatal("");
212 }
213
214 numReads++;
215 numReadsStat++;
216
217 if (numReads == nextProgressMessage) {
218 ccprintf(cerr, "%s: completed %d read accesses @%d\n",
219 name(), numReads, curTick);
220 nextProgressMessage += progressInterval;
221 }
222
223 if (numReads >= maxLoads)
224 exitSimLoop("Maximum number of loads reached!");
225 break;
226
227 case Packet::WriteResp:
228 numWritesStat++;
229 break;
230 /*
231 case Copy:
232 //Also remove dest from outstanding list
233 removeAddr = outstandingAddrs.find(req->dest);
234 assert(removeAddr != outstandingAddrs.end());
235 outstandingAddrs.erase(removeAddr);
236 numCopiesStat++;
237 break;
238 */
239 default:
240 panic("invalid command");
241 }
242
243 if (blockAddr(req->getPaddr()) == traceBlockAddr) {
244 cerr << name() << ": completed "
245 << (pkt->isWrite() ? "write" : "read")
246 << " access of "
247 << dec << pkt->getSize() << " bytes at address 0x"
248 << hex << req->getPaddr()
249 << " (0x" << hex << blockAddr(req->getPaddr()) << ")"
250 << ", value = 0x";
251 printData(cerr, pkt_data, pkt->getSize());
252 cerr << " @ cycle " << dec << curTick;
253
254 cerr << endl;
255 }
256
257 noResponseCycles = 0;
258 delete state;
259 delete [] data;
260 delete pkt->req;
261 delete pkt;
262 }
263
264 void
265 MemTest::regStats()
266 {
267 using namespace Stats;
268
269 numReadsStat
270 .name(name() + ".num_reads")
271 .desc("number of read accesses completed")
272 ;
273
274 numWritesStat
275 .name(name() + ".num_writes")
276 .desc("number of write accesses completed")
277 ;
278
279 numCopiesStat
280 .name(name() + ".num_copies")
281 .desc("number of copy accesses completed")
282 ;
283 }
284
285 void
286 MemTest::tick()
287 {
288 if (!tickEvent.scheduled())
289 tickEvent.schedule(curTick + cycles(1));
290
291 if (++noResponseCycles >= 500000) {
292 cerr << name() << ": deadlocked at cycle " << curTick << endl;
293 fatal("");
294 }
295
296 if (accessRetry) {
297 return;
298 }
299
300 //make new request
301 unsigned cmd = random() % 100;
302 unsigned offset = random() % size;
303 unsigned base = random() % 2;
304 uint64_t data = random();
305 unsigned access_size = random() % 4;
306 unsigned cacheable = random() % 100;
307
308 //If we aren't doing copies, use id as offset, and do a false sharing
309 //mem tester
310 //We can eliminate the lower bits of the offset, and then use the id
311 //to offset within the blks
312 offset &= ~63; //Not the low order bits
313 offset += id;
314 access_size = 0;
315
316 Request *req = new Request();
317 uint32_t flags = 0;
318 Addr paddr;
319
320 if (cacheable < percentUncacheable) {
321 flags |= UNCACHEABLE;
322 paddr = uncacheAddr + offset;
323 } else {
324 paddr = ((base) ? baseAddr1 : baseAddr2) + offset;
325 }
326 // bool probe = (random() % 2 == 1) && !req->isUncacheable();
327 bool probe = false;
328
329 paddr &= ~((1 << access_size) - 1);
330 req->setPhys(paddr, 1 << access_size, flags);
331 req->setThreadContext(id,0);
332
333 uint8_t *result = new uint8_t[8];
334
335 if (cmd < percentReads) {
336 // read
337
338 //For now we only allow one outstanding request per addreess per tester
339 //This means we assume CPU does write forwarding to reads that alias something
340 //in the cpu store buffer.
341 if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) return;
342 else outstandingAddrs.insert(paddr);
343
344 // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
345 funcPort.readBlob(req->getPaddr(), result, req->getSize());
346
347 if (blockAddr(paddr) == traceBlockAddr) {
348 cerr << name()
349 << ": initiating read "
350 << ((probe) ? "probe of " : "access of ")
351 << dec << req->getSize() << " bytes from addr 0x"
352 << hex << paddr
353 << " (0x" << hex << blockAddr(paddr) << ")"
354 << " at cycle "
355 << dec << curTick << endl;
356 }
357
358 Packet *pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast);
359 pkt->dataDynamicArray(new uint8_t[req->getSize()]);
360 MemTestSenderState *state = new MemTestSenderState(result);
361 pkt->senderState = state;
362
363 if (probe) {
364 cachePort.sendFunctional(pkt);
365 // completeRequest(pkt, result);
366 } else {
367 // req->completionEvent = new MemCompleteEvent(req, result, this);
368 if (!cachePort.sendTiming(pkt)) {
369 accessRetry = true;
370 retryPkt = pkt;
371 }
372 }
373 } else {
374 // write
375
376 //For now we only allow one outstanding request per addreess per tester
377 //This means we assume CPU does write forwarding to reads that alias something
378 //in the cpu store buffer.
379 if (outstandingAddrs.find(paddr) != outstandingAddrs.end()) return;
380 else outstandingAddrs.insert(paddr);
381
382 /*
383 if (blockAddr(req->getPaddr()) == traceBlockAddr) {
384 cerr << name() << ": initiating write "
385 << ((probe)?"probe of ":"access of ")
386 << dec << req->getSize() << " bytes (value = 0x";
387 printData(cerr, data_pkt->getPtr(), req->getSize());
388 cerr << ") to addr 0x"
389 << hex << req->getPaddr()
390 << " (0x" << hex << blockAddr(req->getPaddr()) << ")"
391 << " at cycle "
392 << dec << curTick << endl;
393 }
394 */
395 Packet *pkt = new Packet(req, Packet::WriteReq, Packet::Broadcast);
396 uint8_t *pkt_data = new uint8_t[req->getSize()];
397 pkt->dataDynamicArray(pkt_data);
398 memcpy(pkt_data, &data, req->getSize());
399 MemTestSenderState *state = new MemTestSenderState(result);
400 pkt->senderState = state;
401
402 funcPort.writeBlob(req->getPaddr(), pkt_data, req->getSize());
403
404 if (probe) {
405 cachePort.sendFunctional(pkt);
406 // completeRequest(req, NULL);
407 } else {
408 // req->completionEvent = new MemCompleteEvent(req, NULL, this);
409 if (!cachePort.sendTiming(pkt)) {
410 accessRetry = true;
411 retryPkt = pkt;
412 }
413 }
414 }
415 /* else {
416 // copy
417 unsigned source_align = random() % 100;
418 unsigned dest_align = random() % 100;
419 unsigned offset2 = random() % size;
420
421 Addr source = ((base) ? baseAddr1 : baseAddr2) + offset;
422 Addr dest = ((base) ? baseAddr2 : baseAddr1) + offset2;
423 if (outstandingAddrs.find(source) != outstandingAddrs.end()) return;
424 else outstandingAddrs.insert(source);
425 if (outstandingAddrs.find(dest) != outstandingAddrs.end()) return;
426 else outstandingAddrs.insert(dest);
427
428 if (source_align >= percentSourceUnaligned) {
429 source = blockAddr(source);
430 }
431 if (dest_align >= percentDestUnaligned) {
432 dest = blockAddr(dest);
433 }
434 req->cmd = Copy;
435 req->flags &= ~UNCACHEABLE;
436 req->paddr = source;
437 req->dest = dest;
438 delete [] req->data;
439 req->data = new uint8_t[blockSize];
440 req->size = blockSize;
441 if (source == traceBlockAddr || dest == traceBlockAddr) {
442 cerr << name()
443 << ": initiating copy of "
444 << dec << req->size << " bytes from addr 0x"
445 << hex << source
446 << " (0x" << hex << blockAddr(source) << ")"
447 << " to addr 0x"
448 << hex << dest
449 << " (0x" << hex << blockAddr(dest) << ")"
450 << " at cycle "
451 << dec << curTick << endl;
452 }*
453 cacheInterface->access(req);
454 uint8_t result[blockSize];
455 checkMem->access(Read, source, &result, blockSize);
456 checkMem->access(Write, dest, &result, blockSize);
457 }
458 */
459 }
460
461 void
462 MemTest::doRetry()
463 {
464 if (cachePort.sendTiming(retryPkt)) {
465 accessRetry = false;
466 retryPkt = NULL;
467 }
468 }
469
470 BEGIN_DECLARE_SIM_OBJECT_PARAMS(MemTest)
471
472 // SimObjectParam<BaseCache *> cache;
473 // SimObjectParam<PhysicalMemory *> main_mem;
474 // SimObjectParam<PhysicalMemory *> check_mem;
475 Param<unsigned> memory_size;
476 Param<unsigned> percent_reads;
477 // Param<unsigned> percent_copies;
478 Param<unsigned> percent_uncacheable;
479 Param<unsigned> progress_interval;
480 Param<unsigned> percent_source_unaligned;
481 Param<unsigned> percent_dest_unaligned;
482 Param<Addr> trace_addr;
483 Param<Counter> max_loads;
484
485 END_DECLARE_SIM_OBJECT_PARAMS(MemTest)
486
487
488 BEGIN_INIT_SIM_OBJECT_PARAMS(MemTest)
489
490 // INIT_PARAM(cache, "L1 cache"),
491 // INIT_PARAM(main_mem, "hierarchical memory"),
492 // INIT_PARAM(check_mem, "check memory"),
493 INIT_PARAM(memory_size, "memory size"),
494 INIT_PARAM(percent_reads, "target read percentage"),
495 // INIT_PARAM(percent_copies, "target copy percentage"),
496 INIT_PARAM(percent_uncacheable, "target uncacheable percentage"),
497 INIT_PARAM(progress_interval, "progress report interval (in accesses)"),
498 INIT_PARAM(percent_source_unaligned,
499 "percent of copy source address that are unaligned"),
500 INIT_PARAM(percent_dest_unaligned,
501 "percent of copy dest address that are unaligned"),
502 INIT_PARAM(trace_addr, "address to trace"),
503 INIT_PARAM(max_loads, "terminate when we have reached this load count")
504
505 END_INIT_SIM_OBJECT_PARAMS(MemTest)
506
507
508 CREATE_SIM_OBJECT(MemTest)
509 {
510 return new MemTest(getInstanceName(), /*cache->getInterface(),*/ /*main_mem,*/
511 /*check_mem,*/ memory_size, percent_reads, /*percent_copies,*/
512 percent_uncacheable, progress_interval,
513 percent_source_unaligned, percent_dest_unaligned,
514 trace_addr, max_loads);
515 }
516
517 REGISTER_SIM_OBJECT("MemTest", MemTest)