2 * Copyright (c) 2002-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
32 // FIX ME: make trackBlkAddr use blocksize from actual cache, not hard coded
39 #include "base/misc.hh"
40 #include "base/statistics.hh"
41 #include "cpu/memtest/memtest.hh"
42 //#include "cpu/simple_thread.hh"
43 //#include "mem/cache/base_cache.hh"
44 #include "mem/mem_object.hh"
45 #include "mem/port.hh"
46 #include "mem/packet.hh"
47 //#include "mem/physical.hh"
48 #include "mem/request.hh"
49 #include "params/MemTest.hh"
50 #include "sim/sim_events.hh"
51 #include "sim/stats.hh"
55 int TESTER_ALLOCATOR
=0;
58 MemTest::CpuPort::recvTiming(PacketPtr pkt
)
60 if (pkt
->isResponse()) {
61 memtest
->completeRequest(pkt
);
63 // must be snoop upcall
64 assert(pkt
->isRequest());
65 assert(pkt
->getDest() == Packet::Broadcast
);
71 MemTest::CpuPort::recvAtomic(PacketPtr pkt
)
73 // must be snoop upcall
74 assert(pkt
->isRequest());
75 assert(pkt
->getDest() == Packet::Broadcast
);
80 MemTest::CpuPort::recvFunctional(PacketPtr pkt
)
82 //Do nothing if we see one come through
83 // if (curTick != 0)//Supress warning durring initialization
84 // warn("Functional Writes not implemented in MemTester\n");
85 //Need to find any response values that intersect and update
90 MemTest::CpuPort::recvStatusChange(Status status
)
92 if (status
== RangeChange
) {
93 if (!snoopRangeSent
) {
94 snoopRangeSent
= true;
95 sendStatusChange(Port::RangeChange
);
100 panic("MemTest doesn't expect recvStatusChange callback!");
104 MemTest::CpuPort::recvRetry()
110 MemTest::sendPkt(PacketPtr pkt
) {
112 cachePort
.sendAtomic(pkt
);
113 completeRequest(pkt
);
115 else if (!cachePort
.sendTiming(pkt
)) {
122 MemTest::MemTest(const string
&name
,
123 // MemInterface *_cache_interface,
124 // PhysicalMemory *main_mem,
125 // PhysicalMemory *check_mem,
126 unsigned _memorySize
,
127 unsigned _percentReads
,
128 unsigned _percentFunctional
,
129 unsigned _percentUncacheable
,
130 unsigned _progressInterval
,
131 unsigned _percentSourceUnaligned
,
132 unsigned _percentDestUnaligned
,
138 cachePort("test", this),
139 funcPort("functional", this),
141 // mainMem(main_mem),
142 // checkMem(check_mem),
144 percentReads(_percentReads
),
145 percentFunctional(_percentFunctional
),
146 percentUncacheable(_percentUncacheable
),
147 progressInterval(_progressInterval
),
148 nextProgressMessage(_progressInterval
),
149 percentSourceUnaligned(_percentSourceUnaligned
),
150 percentDestUnaligned(percentDestUnaligned
),
151 maxLoads(_max_loads
),
155 cmd
.push_back("/bin/ls");
156 vector
<string
> null_vec
;
157 // thread = new SimpleThread(NULL, 0, NULL, 0, mainMem);
160 cachePort
.snoopRangeSent
= false;
161 funcPort
.snoopRangeSent
= true;
163 // Needs to be masked off once we know the block size.
164 traceBlockAddr
= _traceAddr
;
165 baseAddr1
= 0x100000;
166 baseAddr2
= 0x400000;
167 uncacheAddr
= 0x800000;
170 noResponseCycles
= 0;
172 tickEvent
.schedule(0);
174 id
= TESTER_ALLOCATOR
++;
180 MemTest::getPort(const std::string
&if_name
, int idx
)
182 if (if_name
== "functional")
184 else if (if_name
== "test")
187 panic("No Such Port\n");
193 // By the time init() is called, the ports should be hooked up.
194 blockSize
= cachePort
.peerBlockSize();
195 blockAddrMask
= blockSize
- 1;
196 traceBlockAddr
= blockAddr(traceBlockAddr
);
198 // initial memory contents for both physical memory and functional
199 // memory should be 0; no need to initialize them.
204 MemTest::completeRequest(PacketPtr pkt
)
206 Request
*req
= pkt
->req
;
208 DPRINTF(MemTest
, "completing %s at address %x (blk %x)\n",
209 pkt
->isWrite() ? "write" : "read",
210 req
->getPaddr(), blockAddr(req
->getPaddr()));
212 MemTestSenderState
*state
=
213 dynamic_cast<MemTestSenderState
*>(pkt
->senderState
);
215 uint8_t *data
= state
->data
;
216 uint8_t *pkt_data
= pkt
->getPtr
<uint8_t>();
218 //Remove the address from the list of outstanding
219 std::set
<unsigned>::iterator removeAddr
=
220 outstandingAddrs
.find(req
->getPaddr());
221 assert(removeAddr
!= outstandingAddrs
.end());
222 outstandingAddrs
.erase(removeAddr
);
224 switch (pkt
->cmd
.toInt()) {
225 case MemCmd::ReadResp
:
227 if (memcmp(pkt_data
, data
, pkt
->getSize()) != 0) {
228 panic("%s: read of %x (blk %x) @ cycle %d "
229 "returns %x, expected %x\n", name(),
230 req
->getPaddr(), blockAddr(req
->getPaddr()), curTick
,
237 if (numReads
== nextProgressMessage
) {
238 ccprintf(cerr
, "%s: completed %d read accesses @%d\n",
239 name(), numReads
, curTick
);
240 nextProgressMessage
+= progressInterval
;
243 if (maxLoads
!= 0 && numReads
>= maxLoads
)
244 exitSimLoop("maximum number of loads reached");
247 case MemCmd::WriteResp
:
252 panic("invalid command %s (%d)", pkt
->cmdString(), pkt
->cmd
.toInt());
255 noResponseCycles
= 0;
265 using namespace Stats
;
268 .name(name() + ".num_reads")
269 .desc("number of read accesses completed")
273 .name(name() + ".num_writes")
274 .desc("number of write accesses completed")
278 .name(name() + ".num_copies")
279 .desc("number of copy accesses completed")
286 if (!tickEvent
.scheduled())
287 tickEvent
.schedule(curTick
+ cycles(1));
289 if (++noResponseCycles
>= 500000) {
290 cerr
<< name() << ": deadlocked at cycle " << curTick
<< endl
;
299 unsigned cmd
= random() % 100;
300 unsigned offset
= random() % size
;
301 unsigned base
= random() % 2;
302 uint64_t data
= random();
303 unsigned access_size
= random() % 4;
304 unsigned cacheable
= random() % 100;
306 //If we aren't doing copies, use id as offset, and do a false sharing
308 //We can eliminate the lower bits of the offset, and then use the id
309 //to offset within the blks
310 offset
= blockAddr(offset
);
314 Request
*req
= new Request();
318 if (cacheable
< percentUncacheable
) {
319 flags
|= UNCACHEABLE
;
320 paddr
= uncacheAddr
+ offset
;
322 paddr
= ((base
) ? baseAddr1
: baseAddr2
) + offset
;
324 bool probe
= (random() % 100 < percentFunctional
) && !(flags
& UNCACHEABLE
);
325 //bool probe = false;
327 paddr
&= ~((1 << access_size
) - 1);
328 req
->setPhys(paddr
, 1 << access_size
, flags
);
329 req
->setThreadContext(id
,0);
331 uint8_t *result
= new uint8_t[8];
333 if (cmd
< percentReads
) {
336 // For now we only allow one outstanding request per address
337 // per tester This means we assume CPU does write forwarding
338 // to reads that alias something in the cpu store buffer.
339 if (outstandingAddrs
.find(paddr
) != outstandingAddrs
.end()) {
345 outstandingAddrs
.insert(paddr
);
347 // ***** NOTE FOR RON: I'm not sure how to access checkMem. - Kevin
348 funcPort
.readBlob(req
->getPaddr(), result
, req
->getSize());
351 "initiating read at address %x (blk %x) expecting %x\n",
352 req
->getPaddr(), blockAddr(req
->getPaddr()), *result
);
354 PacketPtr pkt
= new Packet(req
, MemCmd::ReadReq
, Packet::Broadcast
);
356 pkt
->dataDynamicArray(new uint8_t[req
->getSize()]);
357 MemTestSenderState
*state
= new MemTestSenderState(result
);
358 pkt
->senderState
= state
;
361 cachePort
.sendFunctional(pkt
);
362 completeRequest(pkt
);
369 // For now we only allow one outstanding request per addreess
370 // per tester. This means we assume CPU does write forwarding
371 // to reads that alias something in the cpu store buffer.
372 if (outstandingAddrs
.find(paddr
) != outstandingAddrs
.end()) {
378 outstandingAddrs
.insert(paddr
);
380 DPRINTF(MemTest
, "initiating write at address %x (blk %x) value %x\n",
381 req
->getPaddr(), blockAddr(req
->getPaddr()), data
& 0xff);
383 PacketPtr pkt
= new Packet(req
, MemCmd::WriteReq
, Packet::Broadcast
);
385 uint8_t *pkt_data
= new uint8_t[req
->getSize()];
386 pkt
->dataDynamicArray(pkt_data
);
387 memcpy(pkt_data
, &data
, req
->getSize());
388 MemTestSenderState
*state
= new MemTestSenderState(result
);
389 pkt
->senderState
= state
;
391 funcPort
.writeBlob(req
->getPaddr(), pkt_data
, req
->getSize());
394 cachePort
.sendFunctional(pkt
);
395 completeRequest(pkt
);
405 if (cachePort
.sendTiming(retryPkt
)) {
412 MemTestParams::create()
414 return new MemTest(name
,
416 cache
->getInterface(), main_mem
, check_mem
,
418 memory_size
, percent_reads
, percent_functional
,
419 percent_uncacheable
, progress_interval
,
420 percent_source_unaligned
, percent_dest_unaligned
,
421 trace_addr
, max_loads
, atomic
);