2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
33 * Definition of a memory trace CPU object for optimal caches. Uses a memory
34 * trace to access a fully associative cache with optimal replacement.
37 #include <algorithm> // For heap functions.
39 #include "cpu/trace/opt_cpu.hh"
40 #include "cpu/trace/reader/mem_trace_reader.hh"
42 #include "sim/builder.hh"
43 #include "sim/sim_events.hh"
47 OptCPU::OptCPU(const string
&name
,
48 MemTraceReader
*_trace
,
52 : SimObject(name
), tickEvent(this), trace(_trace
),
53 numBlks(cache_size
/block_size
), assoc(_assoc
), numSets(numBlks
/assoc
),
56 int log_block_size
= 0;
57 int tmp_block_size
= block_size
;
58 while (tmp_block_size
> 1) {
60 tmp_block_size
= tmp_block_size
>> 1;
62 assert(1<<log_block_size
== block_size
);
64 trace
->getNextReq(req
);
65 refInfo
.resize(numSets
);
68 temp
.addr
= req
->paddr
>> log_block_size
;
69 int set
= temp
.addr
& setMask
;
70 refInfo
[set
].push_back(temp
);
71 trace
->getNextReq(req
);
74 // Initialize top level of lookup table.
75 lookupTable
.resize(16);
77 // Annotate references with next ref time.
78 for (int k
= 0; k
< numSets
; ++k
) {
79 for (RefIndex i
= refInfo
[k
].size() - 1; i
>= 0; --i
) {
80 Addr addr
= refInfo
[k
][i
].addr
;
81 initTable(addr
, InfiniteRef
);
82 refInfo
[k
][i
].nextRefTime
= lookupValue(addr
);
87 // Reset the lookup table
88 for (int j
= 0; j
< 16; ++j
) {
89 if (lookupTable
[j
].size() == (1<<16)) {
90 for (int k
= 0; k
< (1<<16); ++k
) {
91 if (lookupTable
[j
][k
].size() == (1<<16)) {
92 for (int l
= 0; l
< (1<<16); ++l
) {
93 lookupTable
[j
][k
][l
] = -1;
100 tickEvent
.schedule(0);
107 OptCPU::processSet(int set
)
110 int blks_in_cache
= 0;
113 cacheHeap
.resize(assoc
);
115 while (blks_in_cache
< assoc
) {
116 RefIndex cache_index
= lookupValue(refInfo
[set
][i
].addr
);
117 if (cache_index
== -1) {
118 // First reference to this block
120 cache_index
= blks_in_cache
++;
121 setValue(refInfo
[set
][i
].addr
, cache_index
);
125 // update cache heap to most recent reference
126 cacheHeap
[cache_index
] = i
;
127 if (++i
>= refInfo
[set
].size()) {
131 for (int start
= assoc
/2; start
>= 0; --start
) {
136 for (; i
< refInfo
[set
].size(); ++i
) {
137 RefIndex cache_index
= lookupValue(refInfo
[set
][i
].addr
);
138 if (cache_index
== -1) {
141 // replace from cacheHeap[0]
142 // mark replaced block as absent
143 setValue(refInfo
[set
][cacheHeap
[0]].addr
, -1);
144 setValue(refInfo
[set
][i
].addr
, 0);
147 // Make sure its in the cache
148 assert(lookupValue(refInfo
[set
][i
].addr
) != -1);
152 assert(refInfo
[set
][cacheHeap
[cache_index
]].addr
==
153 refInfo
[set
][i
].addr
);
154 assert(refInfo
[set
][cacheHeap
[cache_index
]].nextRefTime
== i
);
155 assert(heapLeft(cache_index
) >= assoc
);
157 cacheHeap
[cache_index
] = i
;
158 processRankIncrease(set
, cache_index
);
159 assert(lookupValue(refInfo
[set
][i
].addr
) != -1);
169 for (int set
= 0; set
< numSets
; ++set
) {
170 if (!refInfo
[set
].empty()) {
173 references
+= refInfo
[set
].size();
176 fprintf(stderr
,"sys.cpu.misses %d #opt cache misses\n",misses
);
177 fprintf(stderr
,"sys.cpu.hits %d #opt cache hits\n", hits
);
178 fprintf(stderr
,"sys.cpu.accesses %d #opt cache acceses\n", references
);
179 exitSimLoop("end of memory trace reached");
183 OptCPU::initTable(Addr addr
, RefIndex index
)
185 int l1_index
= (addr
>> 32) & 0x0f;
186 int l2_index
= (addr
>> 16) & 0xffff;
187 assert(l1_index
== addr
>> 32);
188 if (lookupTable
[l1_index
].size() != (1<<16)) {
189 lookupTable
[l1_index
].resize(1<<16);
191 if (lookupTable
[l1_index
][l2_index
].size() != (1<<16)) {
192 lookupTable
[l1_index
][l2_index
].resize(1<<16, index
);
196 OptCPU::TickEvent::TickEvent(OptCPU
*c
)
197 : Event(&mainEventQueue
, CPU_Tick_Pri
), cpu(c
)
202 OptCPU::TickEvent::process()
208 OptCPU::TickEvent::description()
210 return "OptCPU tick event";
214 BEGIN_DECLARE_SIM_OBJECT_PARAMS(OptCPU
)
216 SimObjectParam
<MemTraceReader
*> data_trace
;
218 Param
<int> block_size
;
221 END_DECLARE_SIM_OBJECT_PARAMS(OptCPU
)
223 BEGIN_INIT_SIM_OBJECT_PARAMS(OptCPU
)
225 INIT_PARAM_DFLT(data_trace
, "memory trace", NULL
),
226 INIT_PARAM(size
, "cache size"),
227 INIT_PARAM(block_size
, "block size"),
228 INIT_PARAM(assoc
,"associativity")
230 END_INIT_SIM_OBJECT_PARAMS(OptCPU
)
232 CREATE_SIM_OBJECT(OptCPU
)
234 return new OptCPU(getInstanceName(),
241 REGISTER_SIM_OBJECT("OptCPU", OptCPU
)