Cache: Split invalidateBlk up to seperate block vs. tags
[gem5.git] / src / mem / cache / tags / fa_lru.cc
1 /*
2 * Copyright (c) 2003-2005 The Regents of The University of Michigan
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
7 * met: redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer;
9 * redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution;
12 * neither the name of the copyright holders nor the names of its
13 * contributors may be used to endorse or promote products derived from
14 * this software without specific prior written permission.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 *
28 * Authors: Erik Hallnor
29 */
30
31 /**
32 * @file
33 * Definitions a fully associative LRU tagstore.
34 */
35
36 #include <cassert>
37 #include <sstream>
38
39 #include "base/intmath.hh"
40 #include "base/misc.hh"
41 #include "mem/cache/tags/fa_lru.hh"
42
43 using namespace std;
44
45 FALRU::FALRU(unsigned _blkSize, unsigned _size, unsigned hit_latency)
46 : blkSize(_blkSize), size(_size), hitLatency(hit_latency)
47 {
48 if (!isPowerOf2(blkSize))
49 fatal("cache block size (in bytes) `%d' must be a power of two",
50 blkSize);
51 if (!(hitLatency > 0))
52 fatal("Access latency in cycles must be at least one cycle");
53 if (!isPowerOf2(size))
54 fatal("Cache Size must be power of 2 for now");
55
56 // Track all cache sizes from 128K up by powers of 2
57 numCaches = floorLog2(size) - 17;
58 if (numCaches >0){
59 cacheBoundaries = new FALRUBlk *[numCaches];
60 cacheMask = (1 << numCaches) - 1;
61 } else {
62 cacheMask = 0;
63 }
64
65 warmedUp = false;
66 warmupBound = size/blkSize;
67 numBlocks = size/blkSize;
68
69 blks = new FALRUBlk[numBlocks];
70 head = &(blks[0]);
71 tail = &(blks[numBlocks-1]);
72
73 head->prev = NULL;
74 head->next = &(blks[1]);
75 head->inCache = cacheMask;
76
77 tail->prev = &(blks[numBlocks-2]);
78 tail->next = NULL;
79 tail->inCache = 0;
80
81 unsigned index = (1 << 17) / blkSize;
82 unsigned j = 0;
83 int flags = cacheMask;
84 for (unsigned i = 1; i < numBlocks - 1; i++) {
85 blks[i].inCache = flags;
86 if (i == index - 1){
87 cacheBoundaries[j] = &(blks[i]);
88 flags &= ~ (1<<j);
89 ++j;
90 index = index << 1;
91 }
92 blks[i].prev = &(blks[i-1]);
93 blks[i].next = &(blks[i+1]);
94 blks[i].isTouched = false;
95 }
96 assert(j == numCaches);
97 assert(index == numBlocks);
98 //assert(check());
99 }
100
101 FALRU::~FALRU()
102 {
103 if (numCaches)
104 delete[] cacheBoundaries;
105
106 delete[] blks;
107 }
108
109 void
110 FALRU::regStats(const string &name)
111 {
112 using namespace Stats;
113 BaseTags::regStats(name);
114 hits
115 .init(numCaches+1)
116 .name(name + ".falru_hits")
117 .desc("The number of hits in each cache size.")
118 ;
119 misses
120 .init(numCaches+1)
121 .name(name + ".falru_misses")
122 .desc("The number of misses in each cache size.")
123 ;
124 accesses
125 .name(name + ".falru_accesses")
126 .desc("The number of accesses to the FA LRU cache.")
127 ;
128
129 for (unsigned i = 0; i <= numCaches; ++i) {
130 stringstream size_str;
131 if (i < 3){
132 size_str << (1<<(i+7)) <<"K";
133 } else {
134 size_str << (1<<(i-3)) <<"M";
135 }
136
137 hits.subname(i, size_str.str());
138 hits.subdesc(i, "Hits in a " + size_str.str() +" cache");
139 misses.subname(i, size_str.str());
140 misses.subdesc(i, "Misses in a " + size_str.str() +" cache");
141 }
142 }
143
144 FALRUBlk *
145 FALRU::hashLookup(Addr addr) const
146 {
147 tagIterator iter = tagHash.find(addr);
148 if (iter != tagHash.end()) {
149 return (*iter).second;
150 }
151 return NULL;
152 }
153
154 void
155 FALRU::invalidate(FALRU::BlkType *blk)
156 {
157 assert(blk);
158 tagsInUse--;
159 }
160
161 FALRUBlk*
162 FALRU::accessBlock(Addr addr, int &lat, int context_src, int *inCache)
163 {
164 accesses++;
165 int tmp_in_cache = 0;
166 Addr blkAddr = blkAlign(addr);
167 FALRUBlk* blk = hashLookup(blkAddr);
168
169 if (blk && blk->isValid()) {
170 assert(blk->tag == blkAddr);
171 tmp_in_cache = blk->inCache;
172 for (unsigned i = 0; i < numCaches; i++) {
173 if (1<<i & blk->inCache) {
174 hits[i]++;
175 } else {
176 misses[i]++;
177 }
178 }
179 hits[numCaches]++;
180 if (blk != head){
181 moveToHead(blk);
182 }
183 } else {
184 blk = NULL;
185 for (unsigned i = 0; i <= numCaches; ++i) {
186 misses[i]++;
187 }
188 }
189 if (inCache) {
190 *inCache = tmp_in_cache;
191 }
192
193 lat = hitLatency;
194 //assert(check());
195 return blk;
196 }
197
198
199 FALRUBlk*
200 FALRU::findBlock(Addr addr) const
201 {
202 Addr blkAddr = blkAlign(addr);
203 FALRUBlk* blk = hashLookup(blkAddr);
204
205 if (blk && blk->isValid()) {
206 assert(blk->tag == blkAddr);
207 } else {
208 blk = NULL;
209 }
210 return blk;
211 }
212
213 FALRUBlk*
214 FALRU::findVictim(Addr addr, PacketList &writebacks)
215 {
216 FALRUBlk * blk = tail;
217 assert(blk->inCache == 0);
218 moveToHead(blk);
219 tagHash.erase(blk->tag);
220 tagHash[blkAlign(addr)] = blk;
221 if (blk->isValid()) {
222 replacements[0]++;
223 } else {
224 tagsInUse++;
225 blk->isTouched = true;
226 if (!warmedUp && tagsInUse.value() >= warmupBound) {
227 warmedUp = true;
228 warmupCycle = curTick();
229 }
230 }
231 //assert(check());
232 return blk;
233 }
234
235 void
236 FALRU::insertBlock(Addr addr, FALRU::BlkType *blk, int context_src)
237 {
238 }
239
240 void
241 FALRU::moveToHead(FALRUBlk *blk)
242 {
243 int updateMask = blk->inCache ^ cacheMask;
244 for (unsigned i = 0; i < numCaches; i++){
245 if ((1<<i) & updateMask) {
246 cacheBoundaries[i]->inCache &= ~(1<<i);
247 cacheBoundaries[i] = cacheBoundaries[i]->prev;
248 } else if (cacheBoundaries[i] == blk) {
249 cacheBoundaries[i] = blk->prev;
250 }
251 }
252 blk->inCache = cacheMask;
253 if (blk != head) {
254 if (blk == tail){
255 assert(blk->next == NULL);
256 tail = blk->prev;
257 tail->next = NULL;
258 } else {
259 blk->prev->next = blk->next;
260 blk->next->prev = blk->prev;
261 }
262 blk->next = head;
263 blk->prev = NULL;
264 head->prev = blk;
265 head = blk;
266 }
267 }
268
269 bool
270 FALRU::check()
271 {
272 FALRUBlk* blk = head;
273 int size = 0;
274 int boundary = 1<<17;
275 int j = 0;
276 int flags = cacheMask;
277 while (blk) {
278 size += blkSize;
279 if (blk->inCache != flags) {
280 return false;
281 }
282 if (size == boundary && blk != tail) {
283 if (cacheBoundaries[j] != blk) {
284 return false;
285 }
286 flags &=~(1 << j);
287 boundary = boundary<<1;
288 ++j;
289 }
290 blk = blk->next;
291 }
292 return true;
293 }
294
295 void
296 FALRU::clearLocks()
297 {
298 for (int i = 0; i < numBlocks; i++){
299 blks[i].clearLoadLocks();
300 }
301 }