misc: Make many includes explicit.
[gem5.git] / src / cpu / pred / tage_base.cc
1 /*
2 * Copyright (c) 2014 The University of Wisconsin
3 *
4 * Copyright (c) 2006 INRIA (Institut National de Recherche en
5 * Informatique et en Automatique / French National Research Institute
6 * for Computer Science and Applied Mathematics)
7 *
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met: redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer;
14 * redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution;
17 * neither the name of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /* @file
35 * Implementation of a TAGE branch predictor
36 */
37
38 #include "cpu/pred/tage_base.hh"
39
40 #include "base/intmath.hh"
41 #include "base/logging.hh"
42 #include "debug/Fetch.hh"
43 #include "debug/Tage.hh"
44
45 TAGEBase::TAGEBase(const TAGEBaseParams *p)
46 : SimObject(p),
47 logRatioBiModalHystEntries(p->logRatioBiModalHystEntries),
48 nHistoryTables(p->nHistoryTables),
49 tagTableCounterBits(p->tagTableCounterBits),
50 tagTableUBits(p->tagTableUBits),
51 histBufferSize(p->histBufferSize),
52 minHist(p->minHist),
53 maxHist(p->maxHist),
54 pathHistBits(p->pathHistBits),
55 tagTableTagWidths(p->tagTableTagWidths),
56 logTagTableSizes(p->logTagTableSizes),
57 threadHistory(p->numThreads),
58 logUResetPeriod(p->logUResetPeriod),
59 initialTCounterValue(p->initialTCounterValue),
60 numUseAltOnNa(p->numUseAltOnNa),
61 useAltOnNaBits(p->useAltOnNaBits),
62 maxNumAlloc(p->maxNumAlloc),
63 noSkip(p->noSkip),
64 speculativeHistUpdate(p->speculativeHistUpdate),
65 instShiftAmt(p->instShiftAmt),
66 initialized(false)
67 {
68 if (noSkip.empty()) {
69 // Set all the table to enabled by default
70 noSkip.resize(nHistoryTables + 1, true);
71 }
72 }
73
74 TAGEBase::BranchInfo*
75 TAGEBase::makeBranchInfo() {
76 return new BranchInfo(*this);
77 }
78
79 void
80 TAGEBase::init()
81 {
82 if (initialized) {
83 return;
84 }
85 // Current method for periodically resetting the u counter bits only
86 // works for 1 or 2 bits
87 // Also make sure that it is not 0
88 assert(tagTableUBits <= 2 && (tagTableUBits > 0));
89
90 // we use int type for the path history, so it cannot be more than
91 // its size
92 assert(pathHistBits <= (sizeof(int)*8));
93
94 // initialize the counter to half of the period
95 assert(logUResetPeriod != 0);
96 tCounter = initialTCounterValue;
97
98 assert(histBufferSize > maxHist * 2);
99
100 useAltPredForNewlyAllocated.resize(numUseAltOnNa, 0);
101
102 for (auto& history : threadHistory) {
103 history.pathHist = 0;
104 history.globalHistory = new uint8_t[histBufferSize];
105 history.gHist = history.globalHistory;
106 memset(history.gHist, 0, histBufferSize);
107 history.ptGhist = 0;
108 }
109
110 histLengths = new int [nHistoryTables+1];
111
112 calculateParameters();
113
114 assert(tagTableTagWidths.size() == (nHistoryTables+1));
115 assert(logTagTableSizes.size() == (nHistoryTables+1));
116
117 // First entry is for the Bimodal table and it is untagged in this
118 // implementation
119 assert(tagTableTagWidths[0] == 0);
120
121 for (auto& history : threadHistory) {
122 history.computeIndices = new FoldedHistory[nHistoryTables+1];
123 history.computeTags[0] = new FoldedHistory[nHistoryTables+1];
124 history.computeTags[1] = new FoldedHistory[nHistoryTables+1];
125
126 initFoldedHistories(history);
127 }
128
129 const uint64_t bimodalTableSize = ULL(1) << logTagTableSizes[0];
130 btablePrediction.resize(bimodalTableSize, false);
131 btableHysteresis.resize(bimodalTableSize >> logRatioBiModalHystEntries,
132 true);
133
134 gtable = new TageEntry*[nHistoryTables + 1];
135 buildTageTables();
136
137 tableIndices = new int [nHistoryTables+1];
138 tableTags = new int [nHistoryTables+1];
139 initialized = true;
140 }
141
142 void
143 TAGEBase::initFoldedHistories(ThreadHistory & history)
144 {
145 for (int i = 1; i <= nHistoryTables; i++) {
146 history.computeIndices[i].init(
147 histLengths[i], (logTagTableSizes[i]));
148 history.computeTags[0][i].init(
149 history.computeIndices[i].origLength, tagTableTagWidths[i]);
150 history.computeTags[1][i].init(
151 history.computeIndices[i].origLength, tagTableTagWidths[i]-1);
152 DPRINTF(Tage, "HistLength:%d, TTSize:%d, TTTWidth:%d\n",
153 histLengths[i], logTagTableSizes[i], tagTableTagWidths[i]);
154 }
155 }
156
157 void
158 TAGEBase::buildTageTables()
159 {
160 for (int i = 1; i <= nHistoryTables; i++) {
161 gtable[i] = new TageEntry[1<<(logTagTableSizes[i])];
162 }
163 }
164
165 void
166 TAGEBase::calculateParameters()
167 {
168 histLengths[1] = minHist;
169 histLengths[nHistoryTables] = maxHist;
170
171 for (int i = 2; i <= nHistoryTables; i++) {
172 histLengths[i] = (int) (((double) minHist *
173 pow ((double) (maxHist) / (double) minHist,
174 (double) (i - 1) / (double) ((nHistoryTables- 1))))
175 + 0.5);
176 }
177 }
178
179 void
180 TAGEBase::btbUpdate(ThreadID tid, Addr branch_pc, BranchInfo* &bi)
181 {
182 if (speculativeHistUpdate) {
183 ThreadHistory& tHist = threadHistory[tid];
184 DPRINTF(Tage, "BTB miss resets prediction: %lx\n", branch_pc);
185 assert(tHist.gHist == &tHist.globalHistory[tHist.ptGhist]);
186 tHist.gHist[0] = 0;
187 for (int i = 1; i <= nHistoryTables; i++) {
188 tHist.computeIndices[i].comp = bi->ci[i];
189 tHist.computeTags[0][i].comp = bi->ct0[i];
190 tHist.computeTags[1][i].comp = bi->ct1[i];
191 tHist.computeIndices[i].update(tHist.gHist);
192 tHist.computeTags[0][i].update(tHist.gHist);
193 tHist.computeTags[1][i].update(tHist.gHist);
194 }
195 }
196 }
197
198 int
199 TAGEBase::bindex(Addr pc_in) const
200 {
201 return ((pc_in >> instShiftAmt) & ((ULL(1) << (logTagTableSizes[0])) - 1));
202 }
203
204 int
205 TAGEBase::F(int A, int size, int bank) const
206 {
207 int A1, A2;
208
209 A = A & ((ULL(1) << size) - 1);
210 A1 = (A & ((ULL(1) << logTagTableSizes[bank]) - 1));
211 A2 = (A >> logTagTableSizes[bank]);
212 A2 = ((A2 << bank) & ((ULL(1) << logTagTableSizes[bank]) - 1))
213 + (A2 >> (logTagTableSizes[bank] - bank));
214 A = A1 ^ A2;
215 A = ((A << bank) & ((ULL(1) << logTagTableSizes[bank]) - 1))
216 + (A >> (logTagTableSizes[bank] - bank));
217 return (A);
218 }
219
220 // gindex computes a full hash of pc, ghist and pathHist
221 int
222 TAGEBase::gindex(ThreadID tid, Addr pc, int bank) const
223 {
224 int index;
225 int hlen = (histLengths[bank] > pathHistBits) ? pathHistBits :
226 histLengths[bank];
227 const unsigned int shiftedPc = pc >> instShiftAmt;
228 index =
229 shiftedPc ^
230 (shiftedPc >> ((int) abs(logTagTableSizes[bank] - bank) + 1)) ^
231 threadHistory[tid].computeIndices[bank].comp ^
232 F(threadHistory[tid].pathHist, hlen, bank);
233
234 return (index & ((ULL(1) << (logTagTableSizes[bank])) - 1));
235 }
236
237
238 // Tag computation
239 uint16_t
240 TAGEBase::gtag(ThreadID tid, Addr pc, int bank) const
241 {
242 int tag = (pc >> instShiftAmt) ^
243 threadHistory[tid].computeTags[0][bank].comp ^
244 (threadHistory[tid].computeTags[1][bank].comp << 1);
245
246 return (tag & ((ULL(1) << tagTableTagWidths[bank]) - 1));
247 }
248
249
250 // Up-down saturating counter
251 template<typename T>
252 void
253 TAGEBase::ctrUpdate(T & ctr, bool taken, int nbits)
254 {
255 assert(nbits <= sizeof(T) << 3);
256 if (taken) {
257 if (ctr < ((1 << (nbits - 1)) - 1))
258 ctr++;
259 } else {
260 if (ctr > -(1 << (nbits - 1)))
261 ctr--;
262 }
263 }
264
265 // int8_t and int versions of this function may be needed
266 template void TAGEBase::ctrUpdate(int8_t & ctr, bool taken, int nbits);
267 template void TAGEBase::ctrUpdate(int & ctr, bool taken, int nbits);
268
269 // Up-down unsigned saturating counter
270 void
271 TAGEBase::unsignedCtrUpdate(uint8_t & ctr, bool up, unsigned nbits)
272 {
273 assert(nbits <= sizeof(uint8_t) << 3);
274 if (up) {
275 if (ctr < ((1 << nbits) - 1))
276 ctr++;
277 } else {
278 if (ctr)
279 ctr--;
280 }
281 }
282
283 // Bimodal prediction
284 bool
285 TAGEBase::getBimodePred(Addr pc, BranchInfo* bi) const
286 {
287 return btablePrediction[bi->bimodalIndex];
288 }
289
290
291 // Update the bimodal predictor: a hysteresis bit is shared among N prediction
292 // bits (N = 2 ^ logRatioBiModalHystEntries)
293 void
294 TAGEBase::baseUpdate(Addr pc, bool taken, BranchInfo* bi)
295 {
296 int inter = (btablePrediction[bi->bimodalIndex] << 1)
297 + btableHysteresis[bi->bimodalIndex >> logRatioBiModalHystEntries];
298 if (taken) {
299 if (inter < 3)
300 inter++;
301 } else if (inter > 0) {
302 inter--;
303 }
304 const bool pred = inter >> 1;
305 const bool hyst = inter & 1;
306 btablePrediction[bi->bimodalIndex] = pred;
307 btableHysteresis[bi->bimodalIndex >> logRatioBiModalHystEntries] = hyst;
308 DPRINTF(Tage, "Updating branch %lx, pred:%d, hyst:%d\n", pc, pred, hyst);
309 }
310
311 // shifting the global history: we manage the history in a big table in order
312 // to reduce simulation time
313 void
314 TAGEBase::updateGHist(uint8_t * &h, bool dir, uint8_t * tab, int &pt)
315 {
316 if (pt == 0) {
317 DPRINTF(Tage, "Rolling over the histories\n");
318 // Copy beginning of globalHistoryBuffer to end, such that
319 // the last maxHist outcomes are still reachable
320 // through pt[0 .. maxHist - 1].
321 for (int i = 0; i < maxHist; i++)
322 tab[histBufferSize - maxHist + i] = tab[i];
323 pt = histBufferSize - maxHist;
324 h = &tab[pt];
325 }
326 pt--;
327 h--;
328 h[0] = (dir) ? 1 : 0;
329 }
330
331 void
332 TAGEBase::calculateIndicesAndTags(ThreadID tid, Addr branch_pc,
333 BranchInfo* bi)
334 {
335 // computes the table addresses and the partial tags
336 for (int i = 1; i <= nHistoryTables; i++) {
337 tableIndices[i] = gindex(tid, branch_pc, i);
338 bi->tableIndices[i] = tableIndices[i];
339 tableTags[i] = gtag(tid, branch_pc, i);
340 bi->tableTags[i] = tableTags[i];
341 }
342 }
343
344 unsigned
345 TAGEBase::getUseAltIdx(BranchInfo* bi, Addr branch_pc)
346 {
347 // There is only 1 counter on the base TAGE implementation
348 return 0;
349 }
350
351 bool
352 TAGEBase::tagePredict(ThreadID tid, Addr branch_pc,
353 bool cond_branch, BranchInfo* bi)
354 {
355 Addr pc = branch_pc;
356 bool pred_taken = true;
357
358 if (cond_branch) {
359 // TAGE prediction
360
361 calculateIndicesAndTags(tid, pc, bi);
362
363 bi->bimodalIndex = bindex(pc);
364
365 bi->hitBank = 0;
366 bi->altBank = 0;
367 //Look for the bank with longest matching history
368 for (int i = nHistoryTables; i > 0; i--) {
369 if (noSkip[i] &&
370 gtable[i][tableIndices[i]].tag == tableTags[i]) {
371 bi->hitBank = i;
372 bi->hitBankIndex = tableIndices[bi->hitBank];
373 break;
374 }
375 }
376 //Look for the alternate bank
377 for (int i = bi->hitBank - 1; i > 0; i--) {
378 if (noSkip[i] &&
379 gtable[i][tableIndices[i]].tag == tableTags[i]) {
380 bi->altBank = i;
381 bi->altBankIndex = tableIndices[bi->altBank];
382 break;
383 }
384 }
385 //computes the prediction and the alternate prediction
386 if (bi->hitBank > 0) {
387 if (bi->altBank > 0) {
388 bi->altTaken =
389 gtable[bi->altBank][tableIndices[bi->altBank]].ctr >= 0;
390 extraAltCalc(bi);
391 }else {
392 bi->altTaken = getBimodePred(pc, bi);
393 }
394
395 bi->longestMatchPred =
396 gtable[bi->hitBank][tableIndices[bi->hitBank]].ctr >= 0;
397 bi->pseudoNewAlloc =
398 abs(2 * gtable[bi->hitBank][bi->hitBankIndex].ctr + 1) <= 1;
399
400 //if the entry is recognized as a newly allocated entry and
401 //useAltPredForNewlyAllocated is positive use the alternate
402 //prediction
403 if ((useAltPredForNewlyAllocated[getUseAltIdx(bi, branch_pc)] < 0)
404 || ! bi->pseudoNewAlloc) {
405 bi->tagePred = bi->longestMatchPred;
406 bi->provider = TAGE_LONGEST_MATCH;
407 } else {
408 bi->tagePred = bi->altTaken;
409 bi->provider = bi->altBank ? TAGE_ALT_MATCH
410 : BIMODAL_ALT_MATCH;
411 }
412 } else {
413 bi->altTaken = getBimodePred(pc, bi);
414 bi->tagePred = bi->altTaken;
415 bi->longestMatchPred = bi->altTaken;
416 bi->provider = BIMODAL_ONLY;
417 }
418 //end TAGE prediction
419
420 pred_taken = (bi->tagePred);
421 DPRINTF(Tage, "Predict for %lx: taken?:%d, tagePred:%d, altPred:%d\n",
422 branch_pc, pred_taken, bi->tagePred, bi->altTaken);
423 }
424 bi->branchPC = branch_pc;
425 bi->condBranch = cond_branch;
426 return pred_taken;
427 }
428
429 void
430 TAGEBase::adjustAlloc(bool & alloc, bool taken, bool pred_taken)
431 {
432 // Nothing for this base class implementation
433 }
434
435 void
436 TAGEBase::handleAllocAndUReset(bool alloc, bool taken, BranchInfo* bi,
437 int nrand)
438 {
439 if (alloc) {
440 // is there some "unuseful" entry to allocate
441 uint8_t min = 1;
442 for (int i = nHistoryTables; i > bi->hitBank; i--) {
443 if (gtable[i][bi->tableIndices[i]].u < min) {
444 min = gtable[i][bi->tableIndices[i]].u;
445 }
446 }
447
448 // we allocate an entry with a longer history
449 // to avoid ping-pong, we do not choose systematically the next
450 // entry, but among the 3 next entries
451 int Y = nrand &
452 ((ULL(1) << (nHistoryTables - bi->hitBank - 1)) - 1);
453 int X = bi->hitBank + 1;
454 if (Y & 1) {
455 X++;
456 if (Y & 2)
457 X++;
458 }
459 // No entry available, forces one to be available
460 if (min > 0) {
461 gtable[X][bi->tableIndices[X]].u = 0;
462 }
463
464
465 //Allocate entries
466 unsigned numAllocated = 0;
467 for (int i = X; i <= nHistoryTables; i++) {
468 if ((gtable[i][bi->tableIndices[i]].u == 0)) {
469 gtable[i][bi->tableIndices[i]].tag = bi->tableTags[i];
470 gtable[i][bi->tableIndices[i]].ctr = (taken) ? 0 : -1;
471 ++numAllocated;
472 if (numAllocated == maxNumAlloc) {
473 break;
474 }
475 }
476 }
477 }
478
479 tCounter++;
480
481 handleUReset();
482 }
483
484 void
485 TAGEBase::handleUReset()
486 {
487 //periodic reset of u: reset is not complete but bit by bit
488 if ((tCounter & ((ULL(1) << logUResetPeriod) - 1)) == 0) {
489 // reset least significant bit
490 // most significant bit becomes least significant bit
491 for (int i = 1; i <= nHistoryTables; i++) {
492 for (int j = 0; j < (ULL(1) << logTagTableSizes[i]); j++) {
493 resetUctr(gtable[i][j].u);
494 }
495 }
496 }
497 }
498
499 void
500 TAGEBase::resetUctr(uint8_t & u)
501 {
502 u >>= 1;
503 }
504
505 void
506 TAGEBase::condBranchUpdate(ThreadID tid, Addr branch_pc, bool taken,
507 BranchInfo* bi, int nrand, Addr corrTarget, bool pred, bool preAdjustAlloc)
508 {
509 // TAGE UPDATE
510 // try to allocate a new entries only if prediction was wrong
511 bool alloc = (bi->tagePred != taken) && (bi->hitBank < nHistoryTables);
512
513 if (preAdjustAlloc) {
514 adjustAlloc(alloc, taken, pred);
515 }
516
517 if (bi->hitBank > 0) {
518 // Manage the selection between longest matching and alternate
519 // matching for "pseudo"-newly allocated longest matching entry
520 bool PseudoNewAlloc = bi->pseudoNewAlloc;
521 // an entry is considered as newly allocated if its prediction
522 // counter is weak
523 if (PseudoNewAlloc) {
524 if (bi->longestMatchPred == taken) {
525 alloc = false;
526 }
527 // if it was delivering the correct prediction, no need to
528 // allocate new entry even if the overall prediction was false
529 if (bi->longestMatchPred != bi->altTaken) {
530 ctrUpdate(
531 useAltPredForNewlyAllocated[getUseAltIdx(bi, branch_pc)],
532 bi->altTaken == taken, useAltOnNaBits);
533 }
534 }
535 }
536
537 if (!preAdjustAlloc) {
538 adjustAlloc(alloc, taken, pred);
539 }
540
541 handleAllocAndUReset(alloc, taken, bi, nrand);
542
543 handleTAGEUpdate(branch_pc, taken, bi);
544 }
545
546 void
547 TAGEBase::handleTAGEUpdate(Addr branch_pc, bool taken, BranchInfo* bi)
548 {
549 if (bi->hitBank > 0) {
550 DPRINTF(Tage, "Updating tag table entry (%d,%d) for branch %lx\n",
551 bi->hitBank, bi->hitBankIndex, branch_pc);
552 ctrUpdate(gtable[bi->hitBank][bi->hitBankIndex].ctr, taken,
553 tagTableCounterBits);
554 // if the provider entry is not certified to be useful also update
555 // the alternate prediction
556 if (gtable[bi->hitBank][bi->hitBankIndex].u == 0) {
557 if (bi->altBank > 0) {
558 ctrUpdate(gtable[bi->altBank][bi->altBankIndex].ctr, taken,
559 tagTableCounterBits);
560 DPRINTF(Tage, "Updating tag table entry (%d,%d) for"
561 " branch %lx\n", bi->hitBank, bi->hitBankIndex,
562 branch_pc);
563 }
564 if (bi->altBank == 0) {
565 baseUpdate(branch_pc, taken, bi);
566 }
567 }
568
569 // update the u counter
570 if (bi->tagePred != bi->altTaken) {
571 unsignedCtrUpdate(gtable[bi->hitBank][bi->hitBankIndex].u,
572 bi->tagePred == taken, tagTableUBits);
573 }
574 } else {
575 baseUpdate(branch_pc, taken, bi);
576 }
577 }
578
579 void
580 TAGEBase::updateHistories(ThreadID tid, Addr branch_pc, bool taken,
581 BranchInfo* bi, bool speculative,
582 const StaticInstPtr &inst, Addr target)
583 {
584 if (speculative != speculativeHistUpdate) {
585 return;
586 }
587 ThreadHistory& tHist = threadHistory[tid];
588 // UPDATE HISTORIES
589 bool pathbit = ((branch_pc >> instShiftAmt) & 1);
590 //on a squash, return pointers to this and recompute indices.
591 //update user history
592 updateGHist(tHist.gHist, taken, tHist.globalHistory, tHist.ptGhist);
593 tHist.pathHist = (tHist.pathHist << 1) + pathbit;
594 tHist.pathHist = (tHist.pathHist & ((ULL(1) << pathHistBits) - 1));
595
596 if (speculative) {
597 bi->ptGhist = tHist.ptGhist;
598 bi->pathHist = tHist.pathHist;
599 }
600
601 //prepare next index and tag computations for user branchs
602 for (int i = 1; i <= nHistoryTables; i++)
603 {
604 if (speculative) {
605 bi->ci[i] = tHist.computeIndices[i].comp;
606 bi->ct0[i] = tHist.computeTags[0][i].comp;
607 bi->ct1[i] = tHist.computeTags[1][i].comp;
608 }
609 tHist.computeIndices[i].update(tHist.gHist);
610 tHist.computeTags[0][i].update(tHist.gHist);
611 tHist.computeTags[1][i].update(tHist.gHist);
612 }
613 DPRINTF(Tage, "Updating global histories with branch:%lx; taken?:%d, "
614 "path Hist: %x; pointer:%d\n", branch_pc, taken, tHist.pathHist,
615 tHist.ptGhist);
616 assert(threadHistory[tid].gHist ==
617 &threadHistory[tid].globalHistory[threadHistory[tid].ptGhist]);
618 }
619
620 void
621 TAGEBase::squash(ThreadID tid, bool taken, TAGEBase::BranchInfo *bi,
622 Addr target)
623 {
624 if (!speculativeHistUpdate) {
625 /* If there are no speculative updates, no actions are needed */
626 return;
627 }
628
629 ThreadHistory& tHist = threadHistory[tid];
630 DPRINTF(Tage, "Restoring branch info: %lx; taken? %d; PathHistory:%x, "
631 "pointer:%d\n", bi->branchPC,taken, bi->pathHist, bi->ptGhist);
632 tHist.pathHist = bi->pathHist;
633 tHist.ptGhist = bi->ptGhist;
634 tHist.gHist = &(tHist.globalHistory[tHist.ptGhist]);
635 tHist.gHist[0] = (taken ? 1 : 0);
636 for (int i = 1; i <= nHistoryTables; i++) {
637 tHist.computeIndices[i].comp = bi->ci[i];
638 tHist.computeTags[0][i].comp = bi->ct0[i];
639 tHist.computeTags[1][i].comp = bi->ct1[i];
640 tHist.computeIndices[i].update(tHist.gHist);
641 tHist.computeTags[0][i].update(tHist.gHist);
642 tHist.computeTags[1][i].update(tHist.gHist);
643 }
644 }
645
646 void
647 TAGEBase::extraAltCalc(BranchInfo* bi)
648 {
649 // do nothing. This is only used in some derived classes
650 return;
651 }
652
653 void
654 TAGEBase::updateStats(bool taken, BranchInfo* bi)
655 {
656 if (taken == bi->tagePred) {
657 // correct prediction
658 switch (bi->provider) {
659 case BIMODAL_ONLY: tageBimodalProviderCorrect++; break;
660 case TAGE_LONGEST_MATCH: tageLongestMatchProviderCorrect++; break;
661 case BIMODAL_ALT_MATCH: bimodalAltMatchProviderCorrect++; break;
662 case TAGE_ALT_MATCH: tageAltMatchProviderCorrect++; break;
663 }
664 } else {
665 // wrong prediction
666 switch (bi->provider) {
667 case BIMODAL_ONLY: tageBimodalProviderWrong++; break;
668 case TAGE_LONGEST_MATCH:
669 tageLongestMatchProviderWrong++;
670 if (bi->altTaken == taken) {
671 tageAltMatchProviderWouldHaveHit++;
672 }
673 break;
674 case BIMODAL_ALT_MATCH:
675 bimodalAltMatchProviderWrong++;
676 break;
677 case TAGE_ALT_MATCH:
678 tageAltMatchProviderWrong++;
679 break;
680 }
681
682 switch (bi->provider) {
683 case BIMODAL_ALT_MATCH:
684 case TAGE_ALT_MATCH:
685 if (bi->longestMatchPred == taken) {
686 tageLongestMatchProviderWouldHaveHit++;
687 }
688 }
689 }
690
691 switch (bi->provider) {
692 case TAGE_LONGEST_MATCH:
693 case TAGE_ALT_MATCH:
694 tageLongestMatchProvider[bi->hitBank]++;
695 tageAltMatchProvider[bi->altBank]++;
696 break;
697 }
698 }
699
700 unsigned
701 TAGEBase::getGHR(ThreadID tid, BranchInfo *bi) const
702 {
703 unsigned val = 0;
704 for (unsigned i = 0; i < 32; i++) {
705 // Make sure we don't go out of bounds
706 int gh_offset = bi->ptGhist + i;
707 assert(&(threadHistory[tid].globalHistory[gh_offset]) <
708 threadHistory[tid].globalHistory + histBufferSize);
709 val |= ((threadHistory[tid].globalHistory[gh_offset] & 0x1) << i);
710 }
711
712 return val;
713 }
714
715 void
716 TAGEBase::regStats()
717 {
718 tageLongestMatchProviderCorrect
719 .name(name() + ".tageLongestMatchProviderCorrect")
720 .desc("Number of times TAGE Longest Match is the provider and "
721 "the prediction is correct");
722
723 tageAltMatchProviderCorrect
724 .name(name() + ".tageAltMatchProviderCorrect")
725 .desc("Number of times TAGE Alt Match is the provider and "
726 "the prediction is correct");
727
728 bimodalAltMatchProviderCorrect
729 .name(name() + ".bimodalAltMatchProviderCorrect")
730 .desc("Number of times TAGE Alt Match is the bimodal and it is the "
731 "provider and the prediction is correct");
732
733 tageBimodalProviderCorrect
734 .name(name() + ".tageBimodalProviderCorrect")
735 .desc("Number of times there are no hits on the TAGE tables "
736 "and the bimodal prediction is correct");
737
738 tageLongestMatchProviderWrong
739 .name(name() + ".tageLongestMatchProviderWrong")
740 .desc("Number of times TAGE Longest Match is the provider and "
741 "the prediction is wrong");
742
743 tageAltMatchProviderWrong
744 .name(name() + ".tageAltMatchProviderWrong")
745 .desc("Number of times TAGE Alt Match is the provider and "
746 "the prediction is wrong");
747
748 bimodalAltMatchProviderWrong
749 .name(name() + ".bimodalAltMatchProviderWrong")
750 .desc("Number of times TAGE Alt Match is the bimodal and it is the "
751 "provider and the prediction is wrong");
752
753 tageBimodalProviderWrong
754 .name(name() + ".tageBimodalProviderWrong")
755 .desc("Number of times there are no hits on the TAGE tables "
756 "and the bimodal prediction is wrong");
757
758 tageAltMatchProviderWouldHaveHit
759 .name(name() + ".tageAltMatchProviderWouldHaveHit")
760 .desc("Number of times TAGE Longest Match is the provider, "
761 "the prediction is wrong and Alt Match prediction was correct");
762
763 tageLongestMatchProviderWouldHaveHit
764 .name(name() + ".tageLongestMatchProviderWouldHaveHit")
765 .desc("Number of times TAGE Alt Match is the provider, the "
766 "prediction is wrong and Longest Match prediction was correct");
767
768 tageLongestMatchProvider
769 .init(nHistoryTables + 1)
770 .name(name() + ".tageLongestMatchProvider")
771 .desc("TAGE provider for longest match");
772
773 tageAltMatchProvider
774 .init(nHistoryTables + 1)
775 .name(name() + ".tageAltMatchProvider")
776 .desc("TAGE provider for alt match");
777 }
778
779 int8_t
780 TAGEBase::getCtr(int hitBank, int hitBankIndex) const
781 {
782 return gtable[hitBank][hitBankIndex].ctr;
783 }
784
785 unsigned
786 TAGEBase::getTageCtrBits() const
787 {
788 return tagTableCounterBits;
789 }
790
791 int
792 TAGEBase::getPathHist(ThreadID tid) const
793 {
794 return threadHistory[tid].pathHist;
795 }
796
797 bool
798 TAGEBase::isSpeculativeUpdateEnabled() const
799 {
800 return speculativeHistUpdate;
801 }
802
803 size_t
804 TAGEBase::getSizeInBits() const {
805 size_t bits = 0;
806 for (int i = 1; i <= nHistoryTables; i++) {
807 bits += (1 << logTagTableSizes[i]) *
808 (tagTableCounterBits + tagTableUBits + tagTableTagWidths[i]);
809 }
810 uint64_t bimodalTableSize = ULL(1) << logTagTableSizes[0];
811 bits += numUseAltOnNa * useAltOnNaBits;
812 bits += bimodalTableSize;
813 bits += (bimodalTableSize >> logRatioBiModalHystEntries);
814 bits += histLengths[nHistoryTables];
815 bits += pathHistBits;
816 bits += logUResetPeriod;
817 return bits;
818 }
819
820 TAGEBase*
821 TAGEBaseParams::create()
822 {
823 return new TAGEBase(this);
824 }