2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
32 // FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
39 #include "base/misc.hh"
40 #include "base/statistics.hh"
41 #include "cpu/memtest/memtest.hh"
42 //#include "cpu/simple_thread.hh"
43 //#include "mem/cache/base_cache.hh"
44 #include "mem/mem_object.hh"
45 #include "mem/port.hh"
46 #include "mem/packet.hh"
47 //#include "mem/physical.hh"
48 #include "mem/request.hh"
49 #include "sim/builder.hh"
50 #include "sim/sim_events.hh"
51 #include "sim/stats.hh"
55 int TESTER_ALLOCATOR
=0;
58 MemTest::CpuPort::recvTiming(PacketPtr pkt
)
60 if (pkt
->isResponse()) {
61 memtest
->completeRequest(pkt
);
63 // must be snoop upcall
64 assert(pkt
->isRequest());
65 assert(pkt
->getDest() == Packet::Broadcast
);
71 MemTest::CpuPort::recvAtomic(PacketPtr pkt
)
73 // must be snoop upcall
74 assert(pkt
->isRequest());
75 assert(pkt
->getDest() == Packet::Broadcast
);
80 MemTest::CpuPort::recvFunctional(PacketPtr pkt
)
82 //Do nothing if we see one come through
83 // if (curTick != 0)//Supress warning durring initialization
84 // warn("Functional Writes not implemented in MemTester\n");
85 //Need to find any response values that intersect and update
90 MemTest::CpuPort::recvStatusChange(Status status
)
92 if (status
== RangeChange
) {
93 if (!snoopRangeSent
) {
94 snoopRangeSent
= true;
95 sendStatusChange(Port::RangeChange
);
100 panic("MemTest doesn't expect recvStatusChange callback!");
104 MemTest::CpuPort::recvRetry()
110 MemTest::sendPkt(PacketPtr pkt
) {
112 cachePort
.sendAtomic(pkt
);
113 completeRequest(pkt
);
115 else if (!cachePort
.sendTiming(pkt
)) {
122 MemTest::MemTest(const string
&name
,
123 // MemInterface *_cache_interface,
124 // PhysicalMemory *main_mem,
125 // PhysicalMemory *check_mem,
126 unsigned _memorySize
,
127 unsigned _percentReads
,
128 unsigned _percentFunctional
,
129 unsigned _percentUncacheable
,
130 unsigned _progressInterval
,
131 unsigned _percentSourceUnaligned
,
132 unsigned _percentDestUnaligned
,
138 cachePort("test", this),
139 funcPort("functional", this),
141 // mainMem(main_mem),
142 // checkMem(check_mem),
144 percentReads(_percentReads
),
145 percentFunctional(_percentFunctional
),
146 percentUncacheable(_percentUncacheable
),
147 progressInterval(_progressInterval
),
148 nextProgressMessage(_progressInterval
),
149 percentSourceUnaligned(_percentSourceUnaligned
),
150 percentDestUnaligned(percentDestUnaligned
),
151 maxLoads(_max_loads
),
155 cmd
.push_back("/bin/ls");
156 vector
<string
> null_vec
;
157 // thread = new SimpleThread(NULL, 0, NULL, 0, mainMem);
160 cachePort
.snoopRangeSent
= false;
161 funcPort
.snoopRangeSent
= true;
163 // Needs to be masked off once we know the block size.
164 traceBlockAddr
= _traceAddr
;
165 baseAddr1
= 0x100000;
166 baseAddr2
= 0x400000;
167 uncacheAddr
= 0x800000;
170 noResponseCycles
= 0;
172 tickEvent
.schedule(0);
174 id
= TESTER_ALLOCATOR
++;
180 MemTest::getPort(const std::string
&if_name
, int idx
)
182 if (if_name
== "functional")
184 else if (if_name
== "test")
187 panic("No Such Port\n");
193 // By the time init() is called, the ports should be hooked up.
194 blockSize
= cachePort
.peerBlockSize();
195 blockAddrMask
= blockSize
- 1;
196 traceBlockAddr
= blockAddr(traceBlockAddr
);
198 // initial memory contents for both physical memory and functional
199 // memory should be 0; no need to initialize them.
204 MemTest::completeRequest(PacketPtr pkt
)
206 Request
*req
= pkt
->req
;
208 DPRINTF(MemTest
, "completing %s at address %x (blk %x)\n",
209 pkt
->isWrite() ? "write" : "read",
210 req
->getPaddr(), blockAddr(req
->getPaddr()));
212 MemTestSenderState
*state
=
213 dynamic_cast<MemTestSenderState
*>(pkt
->senderState
);
215 uint8_t *data
= state
->data
;
216 uint8_t *pkt_data
= pkt
->getPtr
<uint8_t>();
218 //Remove the address from the list of outstanding
219 std::set
<unsigned>::iterator removeAddr
=
220 outstandingAddrs
.find(req
->getPaddr());
221 assert(removeAddr
!= outstandingAddrs
.end());
222 outstandingAddrs
.erase(removeAddr
);
224 switch (pkt
->cmd
.toInt()) {
225 case MemCmd::ReadResp
:
227 if (memcmp(pkt_data
, data
, pkt
->getSize()) != 0) {
228 panic("%s: read of %x (blk %x) @ cycle %d "
229 "returns %x, expected %x\n", name(),
230 req
->getPaddr(), blockAddr(req
->getPaddr()), curTick
,
237 if (numReads
== nextProgressMessage
) {
238 ccprintf(cerr
, "%s: completed %d read accesses @%d\n",
239 name(), numReads
, curTick
);
240 nextProgressMessage
+= progressInterval
;
243 if (maxLoads
!= 0 && numReads
>= maxLoads
)
244 exitSimLoop("maximum number of loads reached");
247 case MemCmd::WriteResp
:
252 panic("invalid command %s (%d)", pkt
->cmdString(), pkt
->cmd
.toInt());
255 noResponseCycles
= 0;
265 using namespace Stats
;
268 .name(name() + ".num_reads")
269 .desc("number of read accesses completed")
273 .name(name() + ".num_writes")
274 .desc("number of write accesses completed")
278 .name(name() + ".num_copies")
279 .desc("number of copy accesses completed")
286 if (!tickEvent
.scheduled())
287 tickEvent
.schedule(curTick
+ cycles(1));
289 if (++noResponseCycles
>= 500000) {
290 cerr
<< name() << ": deadlocked at cycle " << curTick
<< endl
;
299 unsigned cmd
= random() % 100;
300 unsigned offset
= random() % size
;
301 unsigned base
= random() % 2;
302 uint64_t data
= random();
303 unsigned access_size
= random() % 4;
304 unsigned cacheable
= random() % 100;
306 //If we aren't doing copies, use id as offset, and do a false sharing
308 //We can eliminate the lower bits of the offset, and then use the id
309 //to offset within the blks
310 offset
= blockAddr(offset
);
314 Request
*req
= new Request();
318 if (cacheable
< percentUncacheable
) {
319 flags
|= UNCACHEABLE
;
320 paddr
= uncacheAddr
+ offset
;
322 paddr
= ((base
) ? baseAddr1
: baseAddr2
) + offset
;
324 bool probe
= (random() % 100 < percentFunctional
) && !(flags
& UNCACHEABLE
);
325 //bool probe = false;
327 paddr
&= ~((1 << access_size
) - 1);
328 req
->setPhys(paddr
, 1 << access_size
, flags
);
329 req
->setThreadContext(id
,0);
331 uint8_t *result
= new uint8_t[8];
333 if (cmd
< percentReads
) {
336 // For now we only allow one outstanding request per address
337 // per tester This means we assume CPU does write forwarding
338 // to reads that alias something in the cpu store buffer.
339 if (outstandingAddrs
.find(paddr
) != outstandingAddrs
.end()) {
345 outstandingAddrs
.insert(paddr
);
347 // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
348 funcPort
.readBlob(req
->getPaddr(), result
, req
->getSize());
351 "initiating read at address %x (blk %x) expecting %x\n",
352 req
->getPaddr(), blockAddr(req
->getPaddr()), *result
);
354 PacketPtr pkt
= new Packet(req
, MemCmd::ReadReq
, Packet::Broadcast
);
356 pkt
->dataDynamicArray(new uint8_t[req
->getSize()]);
357 MemTestSenderState
*state
= new MemTestSenderState(result
);
358 pkt
->senderState
= state
;
361 cachePort
.sendFunctional(pkt
);
362 pkt
->makeAtomicResponse();
363 completeRequest(pkt
);
370 // For now we only allow one outstanding request per addreess
371 // per tester. This means we assume CPU does write forwarding
372 // to reads that alias something in the cpu store buffer.
373 if (outstandingAddrs
.find(paddr
) != outstandingAddrs
.end()) {
379 outstandingAddrs
.insert(paddr
);
381 DPRINTF(MemTest
, "initiating write at address %x (blk %x) value %x\n",
382 req
->getPaddr(), blockAddr(req
->getPaddr()), data
& 0xff);
384 PacketPtr pkt
= new Packet(req
, MemCmd::WriteReq
, Packet::Broadcast
);
386 uint8_t *pkt_data
= new uint8_t[req
->getSize()];
387 pkt
->dataDynamicArray(pkt_data
);
388 memcpy(pkt_data
, &data
, req
->getSize());
389 MemTestSenderState
*state
= new MemTestSenderState(result
);
390 pkt
->senderState
= state
;
392 funcPort
.writeBlob(req
->getPaddr(), pkt_data
, req
->getSize());
395 cachePort
.sendFunctional(pkt
);
396 pkt
->makeAtomicResponse();
397 completeRequest(pkt
);
407 if (cachePort
.sendTiming(retryPkt
)) {
413 BEGIN_DECLARE_SIM_OBJECT_PARAMS(MemTest
)
415 // SimObjectParam<BaseCache *> cache;
416 // SimObjectParam<PhysicalMemory *> main_mem;
417 // SimObjectParam<PhysicalMemory *> check_mem;
418 Param
<unsigned> memory_size
;
419 Param
<unsigned> percent_reads
;
420 Param
<unsigned> percent_functional
;
421 Param
<unsigned> percent_uncacheable
;
422 Param
<unsigned> progress_interval
;
423 Param
<unsigned> percent_source_unaligned
;
424 Param
<unsigned> percent_dest_unaligned
;
425 Param
<Addr
> trace_addr
;
426 Param
<Counter
> max_loads
;
429 END_DECLARE_SIM_OBJECT_PARAMS(MemTest
)
432 BEGIN_INIT_SIM_OBJECT_PARAMS(MemTest
)
434 // INIT_PARAM(cache, "L1 cache"),
435 // INIT_PARAM(main_mem, "hierarchical memory"),
436 // INIT_PARAM(check_mem, "check memory"),
437 INIT_PARAM(memory_size
, "memory size"),
438 INIT_PARAM(percent_reads
, "target read percentage"),
439 INIT_PARAM(percent_functional
, "percentage of access that are functional"),
440 INIT_PARAM(percent_uncacheable
, "target uncacheable percentage"),
441 INIT_PARAM(progress_interval
, "progress report interval (in accesses)"),
442 INIT_PARAM(percent_source_unaligned
,
443 "percent of copy source address that are unaligned"),
444 INIT_PARAM(percent_dest_unaligned
,
445 "percent of copy dest address that are unaligned"),
446 INIT_PARAM(trace_addr
, "address to trace"),
447 INIT_PARAM(max_loads
, "terminate when we have reached this load count"),
448 INIT_PARAM(atomic
, "Is the tester testing atomic mode (or timing)")
450 END_INIT_SIM_OBJECT_PARAMS(MemTest
)
453 CREATE_SIM_OBJECT(MemTest
)
455 return new MemTest(getInstanceName(), /*cache->getInterface(),*/ /*main_mem,*/
456 /*check_mem,*/ memory_size
, percent_reads
, percent_functional
,
457 percent_uncacheable
, progress_interval
,
458 percent_source_unaligned
, percent_dest_unaligned
,
459 trace_addr
, max_loads
, atomic
);
462 REGISTER_SIM_OBJECT("MemTest", MemTest
)