2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * Definition of a memory trace CPU object for optimal caches. Uses a memory
32 * trace to access a fully associative cache with optimal replacement.
35 #include <algorithm> // For heap functions.
37 #include "cpu/trace/opt_cpu.hh"
38 #include "cpu/trace/reader/mem_trace_reader.hh"
40 #include "sim/builder.hh"
41 #include "sim/sim_events.hh"
45 OptCPU::OptCPU(const string
&name
,
46 MemTraceReader
*_trace
,
50 : SimObject(name
), tickEvent(this), trace(_trace
),
51 numBlks(cache_size
/block_size
), assoc(_assoc
), numSets(numBlks
/assoc
),
54 int log_block_size
= 0;
55 int tmp_block_size
= block_size
;
56 while (tmp_block_size
> 1) {
58 tmp_block_size
= tmp_block_size
>> 1;
60 assert(1<<log_block_size
== block_size
);
62 trace
->getNextReq(req
);
63 refInfo
.resize(numSets
);
66 temp
.addr
= req
->paddr
>> log_block_size
;
67 int set
= temp
.addr
& setMask
;
68 refInfo
[set
].push_back(temp
);
69 trace
->getNextReq(req
);
72 // Initialize top level of lookup table.
73 lookupTable
.resize(16);
75 // Annotate references with next ref time.
76 for (int k
= 0; k
< numSets
; ++k
) {
77 for (RefIndex i
= refInfo
[k
].size() - 1; i
>= 0; --i
) {
78 Addr addr
= refInfo
[k
][i
].addr
;
79 initTable(addr
, InfiniteRef
);
80 refInfo
[k
][i
].nextRefTime
= lookupValue(addr
);
85 // Reset the lookup table
86 for (int j
= 0; j
< 16; ++j
) {
87 if (lookupTable
[j
].size() == (1<<16)) {
88 for (int k
= 0; k
< (1<<16); ++k
) {
89 if (lookupTable
[j
][k
].size() == (1<<16)) {
90 for (int l
= 0; l
< (1<<16); ++l
) {
91 lookupTable
[j
][k
][l
] = -1;
98 tickEvent
.schedule(0);
105 OptCPU::processSet(int set
)
108 int blks_in_cache
= 0;
111 cacheHeap
.resize(assoc
);
113 while (blks_in_cache
< assoc
) {
114 RefIndex cache_index
= lookupValue(refInfo
[set
][i
].addr
);
115 if (cache_index
== -1) {
116 // First reference to this block
118 cache_index
= blks_in_cache
++;
119 setValue(refInfo
[set
][i
].addr
, cache_index
);
123 // update cache heap to most recent reference
124 cacheHeap
[cache_index
] = i
;
125 if (++i
>= refInfo
[set
].size()) {
129 for (int start
= assoc
/2; start
>= 0; --start
) {
134 for (; i
< refInfo
[set
].size(); ++i
) {
135 RefIndex cache_index
= lookupValue(refInfo
[set
][i
].addr
);
136 if (cache_index
== -1) {
139 // replace from cacheHeap[0]
140 // mark replaced block as absent
141 setValue(refInfo
[set
][cacheHeap
[0]].addr
, -1);
142 setValue(refInfo
[set
][i
].addr
, 0);
145 // Make sure its in the cache
146 assert(lookupValue(refInfo
[set
][i
].addr
) != -1);
150 assert(refInfo
[set
][cacheHeap
[cache_index
]].addr
==
151 refInfo
[set
][i
].addr
);
152 assert(refInfo
[set
][cacheHeap
[cache_index
]].nextRefTime
== i
);
153 assert(heapLeft(cache_index
) >= assoc
);
155 cacheHeap
[cache_index
] = i
;
156 processRankIncrease(set
, cache_index
);
157 assert(lookupValue(refInfo
[set
][i
].addr
) != -1);
167 for (int set
= 0; set
< numSets
; ++set
) {
168 if (!refInfo
[set
].empty()) {
171 references
+= refInfo
[set
].size();
174 fprintf(stderr
,"sys.cpu.misses %d #opt cache misses\n",misses
);
175 fprintf(stderr
,"sys.cpu.hits %d #opt cache hits\n", hits
);
176 fprintf(stderr
,"sys.cpu.accesses %d #opt cache acceses\n", references
);
177 new SimExitEvent("Finshed Memory Trace");
181 OptCPU::initTable(Addr addr
, RefIndex index
)
183 int l1_index
= (addr
>> 32) & 0x0f;
184 int l2_index
= (addr
>> 16) & 0xffff;
185 assert(l1_index
== addr
>> 32);
186 if (lookupTable
[l1_index
].size() != (1<<16)) {
187 lookupTable
[l1_index
].resize(1<<16);
189 if (lookupTable
[l1_index
][l2_index
].size() != (1<<16)) {
190 lookupTable
[l1_index
][l2_index
].resize(1<<16, index
);
194 OptCPU::TickEvent::TickEvent(OptCPU
*c
)
195 : Event(&mainEventQueue
, CPU_Tick_Pri
), cpu(c
)
200 OptCPU::TickEvent::process()
206 OptCPU::TickEvent::description()
208 return "OptCPU tick event";
212 BEGIN_DECLARE_SIM_OBJECT_PARAMS(OptCPU
)
214 SimObjectParam
<MemTraceReader
*> data_trace
;
216 Param
<int> block_size
;
219 END_DECLARE_SIM_OBJECT_PARAMS(OptCPU
)
221 BEGIN_INIT_SIM_OBJECT_PARAMS(OptCPU
)
223 INIT_PARAM_DFLT(data_trace
, "memory trace", NULL
),
224 INIT_PARAM(size
, "cache size"),
225 INIT_PARAM(block_size
, "block size"),
226 INIT_PARAM(assoc
,"associativity")
228 END_INIT_SIM_OBJECT_PARAMS(OptCPU
)
230 CREATE_SIM_OBJECT(OptCPU
)
232 return new OptCPU(getInstanceName(),
239 REGISTER_SIM_OBJECT("OptCPU", OptCPU
)