2 * Copyright (c) 2004-2005 The Regents of The University of Michigan
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28 * Authors: Erik Hallnor
33 * Definition of a memory trace CPU object for optimal caches. Uses a memory
34 * trace to access a fully associative cache with optimal replacement.
37 #include <algorithm> // For heap functions.
39 #include "cpu/trace/opt_cpu.hh"
40 #include "cpu/trace/reader/mem_trace_reader.hh"
41 #include "params/OptCPU.hh"
42 #include "sim/sim_events.hh"
46 OptCPU::OptCPU(const string
&name
,
47 MemTraceReader
*_trace
,
51 : SimObject(name
), tickEvent(this), trace(_trace
),
52 numBlks(cache_size
/block_size
), assoc(_assoc
), numSets(numBlks
/assoc
),
55 int log_block_size
= 0;
56 int tmp_block_size
= block_size
;
57 while (tmp_block_size
> 1) {
59 tmp_block_size
= tmp_block_size
>> 1;
61 assert(1<<log_block_size
== block_size
);
63 trace
->getNextReq(req
);
64 refInfo
.resize(numSets
);
67 temp
.addr
= req
->paddr
>> log_block_size
;
68 int set
= temp
.addr
& setMask
;
69 refInfo
[set
].push_back(temp
);
70 trace
->getNextReq(req
);
73 // Initialize top level of lookup table.
74 lookupTable
.resize(16);
76 // Annotate references with next ref time.
77 for (int k
= 0; k
< numSets
; ++k
) {
78 for (RefIndex i
= refInfo
[k
].size() - 1; i
>= 0; --i
) {
79 Addr addr
= refInfo
[k
][i
].addr
;
80 initTable(addr
, InfiniteRef
);
81 refInfo
[k
][i
].nextRefTime
= lookupValue(addr
);
86 // Reset the lookup table
87 for (int j
= 0; j
< 16; ++j
) {
88 if (lookupTable
[j
].size() == (1<<16)) {
89 for (int k
= 0; k
< (1<<16); ++k
) {
90 if (lookupTable
[j
][k
].size() == (1<<16)) {
91 for (int l
= 0; l
< (1<<16); ++l
) {
92 lookupTable
[j
][k
][l
] = -1;
99 tickEvent
.schedule(0);
106 OptCPU::processSet(int set
)
109 int blks_in_cache
= 0;
112 cacheHeap
.resize(assoc
);
114 while (blks_in_cache
< assoc
) {
115 RefIndex cache_index
= lookupValue(refInfo
[set
][i
].addr
);
116 if (cache_index
== -1) {
117 // First reference to this block
119 cache_index
= blks_in_cache
++;
120 setValue(refInfo
[set
][i
].addr
, cache_index
);
124 // update cache heap to most recent reference
125 cacheHeap
[cache_index
] = i
;
126 if (++i
>= refInfo
[set
].size()) {
130 for (int start
= assoc
/2; start
>= 0; --start
) {
135 for (; i
< refInfo
[set
].size(); ++i
) {
136 RefIndex cache_index
= lookupValue(refInfo
[set
][i
].addr
);
137 if (cache_index
== -1) {
140 // replace from cacheHeap[0]
141 // mark replaced block as absent
142 setValue(refInfo
[set
][cacheHeap
[0]].addr
, -1);
143 setValue(refInfo
[set
][i
].addr
, 0);
146 // Make sure its in the cache
147 assert(lookupValue(refInfo
[set
][i
].addr
) != -1);
151 assert(refInfo
[set
][cacheHeap
[cache_index
]].addr
==
152 refInfo
[set
][i
].addr
);
153 assert(refInfo
[set
][cacheHeap
[cache_index
]].nextRefTime
== i
);
154 assert(heapLeft(cache_index
) >= assoc
);
156 cacheHeap
[cache_index
] = i
;
157 processRankIncrease(set
, cache_index
);
158 assert(lookupValue(refInfo
[set
][i
].addr
) != -1);
168 for (int set
= 0; set
< numSets
; ++set
) {
169 if (!refInfo
[set
].empty()) {
172 references
+= refInfo
[set
].size();
175 fprintf(stderr
,"sys.cpu.misses %d #opt cache misses\n",misses
);
176 fprintf(stderr
,"sys.cpu.hits %d #opt cache hits\n", hits
);
177 fprintf(stderr
,"sys.cpu.accesses %d #opt cache acceses\n", references
);
178 exitSimLoop("end of memory trace reached");
182 OptCPU::initTable(Addr addr
, RefIndex index
)
184 int l1_index
= (addr
>> 32) & 0x0f;
185 int l2_index
= (addr
>> 16) & 0xffff;
186 assert(l1_index
== addr
>> 32);
187 if (lookupTable
[l1_index
].size() != (1<<16)) {
188 lookupTable
[l1_index
].resize(1<<16);
190 if (lookupTable
[l1_index
][l2_index
].size() != (1<<16)) {
191 lookupTable
[l1_index
][l2_index
].resize(1<<16, index
);
195 OptCPU::TickEvent::TickEvent(OptCPU
*c
)
196 : Event(&mainEventQueue
, CPU_Tick_Pri
), cpu(c
)
201 OptCPU::TickEvent::process()
207 OptCPU::TickEvent::description()
209 return "OptCPU tick event";
214 OptCPUParams::create()
216 return new OptCPU(name
, data_trace
, block_size
, size
, assoc
);