cpu: convert tage_base to new style stats
[gem5.git] / src / cpu / pred / tage_base.cc
1 /*
2 * Copyright (c) 2014 The University of Wisconsin
3 *
4 * Copyright (c) 2006 INRIA (Institut National de Recherche en
5 * Informatique et en Automatique / French National Research Institute
6 * for Computer Science and Applied Mathematics)
7 *
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are
12 * met: redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer;
14 * redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution;
17 * neither the name of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /* @file
35 * Implementation of a TAGE branch predictor
36 */
37
38 #include "cpu/pred/tage_base.hh"
39
40 #include "base/intmath.hh"
41 #include "base/logging.hh"
42 #include "debug/Fetch.hh"
43 #include "debug/Tage.hh"
44
45 TAGEBase::TAGEBase(const TAGEBaseParams *p)
46 : SimObject(p),
47 logRatioBiModalHystEntries(p->logRatioBiModalHystEntries),
48 nHistoryTables(p->nHistoryTables),
49 tagTableCounterBits(p->tagTableCounterBits),
50 tagTableUBits(p->tagTableUBits),
51 histBufferSize(p->histBufferSize),
52 minHist(p->minHist),
53 maxHist(p->maxHist),
54 pathHistBits(p->pathHistBits),
55 tagTableTagWidths(p->tagTableTagWidths),
56 logTagTableSizes(p->logTagTableSizes),
57 threadHistory(p->numThreads),
58 logUResetPeriod(p->logUResetPeriod),
59 initialTCounterValue(p->initialTCounterValue),
60 numUseAltOnNa(p->numUseAltOnNa),
61 useAltOnNaBits(p->useAltOnNaBits),
62 maxNumAlloc(p->maxNumAlloc),
63 noSkip(p->noSkip),
64 speculativeHistUpdate(p->speculativeHistUpdate),
65 instShiftAmt(p->instShiftAmt),
66 initialized(false),
67 stats(this, nHistoryTables)
68 {
69 if (noSkip.empty()) {
70 // Set all the table to enabled by default
71 noSkip.resize(nHistoryTables + 1, true);
72 }
73 }
74
75 TAGEBase::BranchInfo*
76 TAGEBase::makeBranchInfo() {
77 return new BranchInfo(*this);
78 }
79
80 void
81 TAGEBase::init()
82 {
83 if (initialized) {
84 return;
85 }
86
87 // Current method for periodically resetting the u counter bits only
88 // works for 1 or 2 bits
89 // Also make sure that it is not 0
90 assert(tagTableUBits <= 2 && (tagTableUBits > 0));
91
92 // we use int type for the path history, so it cannot be more than
93 // its size
94 assert(pathHistBits <= (sizeof(int)*8));
95
96 // initialize the counter to half of the period
97 assert(logUResetPeriod != 0);
98 tCounter = initialTCounterValue;
99
100 assert(histBufferSize > maxHist * 2);
101
102 useAltPredForNewlyAllocated.resize(numUseAltOnNa, 0);
103
104 for (auto& history : threadHistory) {
105 history.pathHist = 0;
106 history.globalHistory = new uint8_t[histBufferSize];
107 history.gHist = history.globalHistory;
108 memset(history.gHist, 0, histBufferSize);
109 history.ptGhist = 0;
110 }
111
112 histLengths = new int [nHistoryTables+1];
113
114 calculateParameters();
115
116 assert(tagTableTagWidths.size() == (nHistoryTables+1));
117 assert(logTagTableSizes.size() == (nHistoryTables+1));
118
119 // First entry is for the Bimodal table and it is untagged in this
120 // implementation
121 assert(tagTableTagWidths[0] == 0);
122
123 for (auto& history : threadHistory) {
124 history.computeIndices = new FoldedHistory[nHistoryTables+1];
125 history.computeTags[0] = new FoldedHistory[nHistoryTables+1];
126 history.computeTags[1] = new FoldedHistory[nHistoryTables+1];
127
128 initFoldedHistories(history);
129 }
130
131 const uint64_t bimodalTableSize = ULL(1) << logTagTableSizes[0];
132 btablePrediction.resize(bimodalTableSize, false);
133 btableHysteresis.resize(bimodalTableSize >> logRatioBiModalHystEntries,
134 true);
135
136 gtable = new TageEntry*[nHistoryTables + 1];
137 buildTageTables();
138
139 tableIndices = new int [nHistoryTables+1];
140 tableTags = new int [nHistoryTables+1];
141 initialized = true;
142 }
143
144 void
145 TAGEBase::initFoldedHistories(ThreadHistory & history)
146 {
147 for (int i = 1; i <= nHistoryTables; i++) {
148 history.computeIndices[i].init(
149 histLengths[i], (logTagTableSizes[i]));
150 history.computeTags[0][i].init(
151 history.computeIndices[i].origLength, tagTableTagWidths[i]);
152 history.computeTags[1][i].init(
153 history.computeIndices[i].origLength, tagTableTagWidths[i]-1);
154 DPRINTF(Tage, "HistLength:%d, TTSize:%d, TTTWidth:%d\n",
155 histLengths[i], logTagTableSizes[i], tagTableTagWidths[i]);
156 }
157 }
158
159 void
160 TAGEBase::buildTageTables()
161 {
162 for (int i = 1; i <= nHistoryTables; i++) {
163 gtable[i] = new TageEntry[1<<(logTagTableSizes[i])];
164 }
165 }
166
167 void
168 TAGEBase::calculateParameters()
169 {
170 histLengths[1] = minHist;
171 histLengths[nHistoryTables] = maxHist;
172
173 for (int i = 2; i <= nHistoryTables; i++) {
174 histLengths[i] = (int) (((double) minHist *
175 pow ((double) (maxHist) / (double) minHist,
176 (double) (i - 1) / (double) ((nHistoryTables- 1))))
177 + 0.5);
178 }
179 }
180
181 void
182 TAGEBase::btbUpdate(ThreadID tid, Addr branch_pc, BranchInfo* &bi)
183 {
184 if (speculativeHistUpdate) {
185 ThreadHistory& tHist = threadHistory[tid];
186 DPRINTF(Tage, "BTB miss resets prediction: %lx\n", branch_pc);
187 assert(tHist.gHist == &tHist.globalHistory[tHist.ptGhist]);
188 tHist.gHist[0] = 0;
189 for (int i = 1; i <= nHistoryTables; i++) {
190 tHist.computeIndices[i].comp = bi->ci[i];
191 tHist.computeTags[0][i].comp = bi->ct0[i];
192 tHist.computeTags[1][i].comp = bi->ct1[i];
193 tHist.computeIndices[i].update(tHist.gHist);
194 tHist.computeTags[0][i].update(tHist.gHist);
195 tHist.computeTags[1][i].update(tHist.gHist);
196 }
197 }
198 }
199
200 int
201 TAGEBase::bindex(Addr pc_in) const
202 {
203 return ((pc_in >> instShiftAmt) & ((ULL(1) << (logTagTableSizes[0])) - 1));
204 }
205
206 int
207 TAGEBase::F(int A, int size, int bank) const
208 {
209 int A1, A2;
210
211 A = A & ((ULL(1) << size) - 1);
212 A1 = (A & ((ULL(1) << logTagTableSizes[bank]) - 1));
213 A2 = (A >> logTagTableSizes[bank]);
214 A2 = ((A2 << bank) & ((ULL(1) << logTagTableSizes[bank]) - 1))
215 + (A2 >> (logTagTableSizes[bank] - bank));
216 A = A1 ^ A2;
217 A = ((A << bank) & ((ULL(1) << logTagTableSizes[bank]) - 1))
218 + (A >> (logTagTableSizes[bank] - bank));
219 return (A);
220 }
221
222 // gindex computes a full hash of pc, ghist and pathHist
223 int
224 TAGEBase::gindex(ThreadID tid, Addr pc, int bank) const
225 {
226 int index;
227 int hlen = (histLengths[bank] > pathHistBits) ? pathHistBits :
228 histLengths[bank];
229 const unsigned int shiftedPc = pc >> instShiftAmt;
230 index =
231 shiftedPc ^
232 (shiftedPc >> ((int) abs(logTagTableSizes[bank] - bank) + 1)) ^
233 threadHistory[tid].computeIndices[bank].comp ^
234 F(threadHistory[tid].pathHist, hlen, bank);
235
236 return (index & ((ULL(1) << (logTagTableSizes[bank])) - 1));
237 }
238
239
240 // Tag computation
241 uint16_t
242 TAGEBase::gtag(ThreadID tid, Addr pc, int bank) const
243 {
244 int tag = (pc >> instShiftAmt) ^
245 threadHistory[tid].computeTags[0][bank].comp ^
246 (threadHistory[tid].computeTags[1][bank].comp << 1);
247
248 return (tag & ((ULL(1) << tagTableTagWidths[bank]) - 1));
249 }
250
251
252 // Up-down saturating counter
253 template<typename T>
254 void
255 TAGEBase::ctrUpdate(T & ctr, bool taken, int nbits)
256 {
257 assert(nbits <= sizeof(T) << 3);
258 if (taken) {
259 if (ctr < ((1 << (nbits - 1)) - 1))
260 ctr++;
261 } else {
262 if (ctr > -(1 << (nbits - 1)))
263 ctr--;
264 }
265 }
266
267 // int8_t and int versions of this function may be needed
268 template void TAGEBase::ctrUpdate(int8_t & ctr, bool taken, int nbits);
269 template void TAGEBase::ctrUpdate(int & ctr, bool taken, int nbits);
270
271 // Up-down unsigned saturating counter
272 void
273 TAGEBase::unsignedCtrUpdate(uint8_t & ctr, bool up, unsigned nbits)
274 {
275 assert(nbits <= sizeof(uint8_t) << 3);
276 if (up) {
277 if (ctr < ((1 << nbits) - 1))
278 ctr++;
279 } else {
280 if (ctr)
281 ctr--;
282 }
283 }
284
285 // Bimodal prediction
286 bool
287 TAGEBase::getBimodePred(Addr pc, BranchInfo* bi) const
288 {
289 return btablePrediction[bi->bimodalIndex];
290 }
291
292
293 // Update the bimodal predictor: a hysteresis bit is shared among N prediction
294 // bits (N = 2 ^ logRatioBiModalHystEntries)
295 void
296 TAGEBase::baseUpdate(Addr pc, bool taken, BranchInfo* bi)
297 {
298 int inter = (btablePrediction[bi->bimodalIndex] << 1)
299 + btableHysteresis[bi->bimodalIndex >> logRatioBiModalHystEntries];
300 if (taken) {
301 if (inter < 3)
302 inter++;
303 } else if (inter > 0) {
304 inter--;
305 }
306 const bool pred = inter >> 1;
307 const bool hyst = inter & 1;
308 btablePrediction[bi->bimodalIndex] = pred;
309 btableHysteresis[bi->bimodalIndex >> logRatioBiModalHystEntries] = hyst;
310 DPRINTF(Tage, "Updating branch %lx, pred:%d, hyst:%d\n", pc, pred, hyst);
311 }
312
313 // shifting the global history: we manage the history in a big table in order
314 // to reduce simulation time
315 void
316 TAGEBase::updateGHist(uint8_t * &h, bool dir, uint8_t * tab, int &pt)
317 {
318 if (pt == 0) {
319 DPRINTF(Tage, "Rolling over the histories\n");
320 // Copy beginning of globalHistoryBuffer to end, such that
321 // the last maxHist outcomes are still reachable
322 // through pt[0 .. maxHist - 1].
323 for (int i = 0; i < maxHist; i++)
324 tab[histBufferSize - maxHist + i] = tab[i];
325 pt = histBufferSize - maxHist;
326 h = &tab[pt];
327 }
328 pt--;
329 h--;
330 h[0] = (dir) ? 1 : 0;
331 }
332
333 void
334 TAGEBase::calculateIndicesAndTags(ThreadID tid, Addr branch_pc,
335 BranchInfo* bi)
336 {
337 // computes the table addresses and the partial tags
338 for (int i = 1; i <= nHistoryTables; i++) {
339 tableIndices[i] = gindex(tid, branch_pc, i);
340 bi->tableIndices[i] = tableIndices[i];
341 tableTags[i] = gtag(tid, branch_pc, i);
342 bi->tableTags[i] = tableTags[i];
343 }
344 }
345
346 unsigned
347 TAGEBase::getUseAltIdx(BranchInfo* bi, Addr branch_pc)
348 {
349 // There is only 1 counter on the base TAGE implementation
350 return 0;
351 }
352
353 bool
354 TAGEBase::tagePredict(ThreadID tid, Addr branch_pc,
355 bool cond_branch, BranchInfo* bi)
356 {
357 Addr pc = branch_pc;
358 bool pred_taken = true;
359
360 if (cond_branch) {
361 // TAGE prediction
362
363 calculateIndicesAndTags(tid, pc, bi);
364
365 bi->bimodalIndex = bindex(pc);
366
367 bi->hitBank = 0;
368 bi->altBank = 0;
369 //Look for the bank with longest matching history
370 for (int i = nHistoryTables; i > 0; i--) {
371 if (noSkip[i] &&
372 gtable[i][tableIndices[i]].tag == tableTags[i]) {
373 bi->hitBank = i;
374 bi->hitBankIndex = tableIndices[bi->hitBank];
375 break;
376 }
377 }
378 //Look for the alternate bank
379 for (int i = bi->hitBank - 1; i > 0; i--) {
380 if (noSkip[i] &&
381 gtable[i][tableIndices[i]].tag == tableTags[i]) {
382 bi->altBank = i;
383 bi->altBankIndex = tableIndices[bi->altBank];
384 break;
385 }
386 }
387 //computes the prediction and the alternate prediction
388 if (bi->hitBank > 0) {
389 if (bi->altBank > 0) {
390 bi->altTaken =
391 gtable[bi->altBank][tableIndices[bi->altBank]].ctr >= 0;
392 extraAltCalc(bi);
393 }else {
394 bi->altTaken = getBimodePred(pc, bi);
395 }
396
397 bi->longestMatchPred =
398 gtable[bi->hitBank][tableIndices[bi->hitBank]].ctr >= 0;
399 bi->pseudoNewAlloc =
400 abs(2 * gtable[bi->hitBank][bi->hitBankIndex].ctr + 1) <= 1;
401
402 //if the entry is recognized as a newly allocated entry and
403 //useAltPredForNewlyAllocated is positive use the alternate
404 //prediction
405 if ((useAltPredForNewlyAllocated[getUseAltIdx(bi, branch_pc)] < 0)
406 || ! bi->pseudoNewAlloc) {
407 bi->tagePred = bi->longestMatchPred;
408 bi->provider = TAGE_LONGEST_MATCH;
409 } else {
410 bi->tagePred = bi->altTaken;
411 bi->provider = bi->altBank ? TAGE_ALT_MATCH
412 : BIMODAL_ALT_MATCH;
413 }
414 } else {
415 bi->altTaken = getBimodePred(pc, bi);
416 bi->tagePred = bi->altTaken;
417 bi->longestMatchPred = bi->altTaken;
418 bi->provider = BIMODAL_ONLY;
419 }
420 //end TAGE prediction
421
422 pred_taken = (bi->tagePred);
423 DPRINTF(Tage, "Predict for %lx: taken?:%d, tagePred:%d, altPred:%d\n",
424 branch_pc, pred_taken, bi->tagePred, bi->altTaken);
425 }
426 bi->branchPC = branch_pc;
427 bi->condBranch = cond_branch;
428 return pred_taken;
429 }
430
431 void
432 TAGEBase::adjustAlloc(bool & alloc, bool taken, bool pred_taken)
433 {
434 // Nothing for this base class implementation
435 }
436
437 void
438 TAGEBase::handleAllocAndUReset(bool alloc, bool taken, BranchInfo* bi,
439 int nrand)
440 {
441 if (alloc) {
442 // is there some "unuseful" entry to allocate
443 uint8_t min = 1;
444 for (int i = nHistoryTables; i > bi->hitBank; i--) {
445 if (gtable[i][bi->tableIndices[i]].u < min) {
446 min = gtable[i][bi->tableIndices[i]].u;
447 }
448 }
449
450 // we allocate an entry with a longer history
451 // to avoid ping-pong, we do not choose systematically the next
452 // entry, but among the 3 next entries
453 int Y = nrand &
454 ((ULL(1) << (nHistoryTables - bi->hitBank - 1)) - 1);
455 int X = bi->hitBank + 1;
456 if (Y & 1) {
457 X++;
458 if (Y & 2)
459 X++;
460 }
461 // No entry available, forces one to be available
462 if (min > 0) {
463 gtable[X][bi->tableIndices[X]].u = 0;
464 }
465
466
467 //Allocate entries
468 unsigned numAllocated = 0;
469 for (int i = X; i <= nHistoryTables; i++) {
470 if ((gtable[i][bi->tableIndices[i]].u == 0)) {
471 gtable[i][bi->tableIndices[i]].tag = bi->tableTags[i];
472 gtable[i][bi->tableIndices[i]].ctr = (taken) ? 0 : -1;
473 ++numAllocated;
474 if (numAllocated == maxNumAlloc) {
475 break;
476 }
477 }
478 }
479 }
480
481 tCounter++;
482
483 handleUReset();
484 }
485
486 void
487 TAGEBase::handleUReset()
488 {
489 //periodic reset of u: reset is not complete but bit by bit
490 if ((tCounter & ((ULL(1) << logUResetPeriod) - 1)) == 0) {
491 // reset least significant bit
492 // most significant bit becomes least significant bit
493 for (int i = 1; i <= nHistoryTables; i++) {
494 for (int j = 0; j < (ULL(1) << logTagTableSizes[i]); j++) {
495 resetUctr(gtable[i][j].u);
496 }
497 }
498 }
499 }
500
501 void
502 TAGEBase::resetUctr(uint8_t & u)
503 {
504 u >>= 1;
505 }
506
507 void
508 TAGEBase::condBranchUpdate(ThreadID tid, Addr branch_pc, bool taken,
509 BranchInfo* bi, int nrand, Addr corrTarget, bool pred, bool preAdjustAlloc)
510 {
511 // TAGE UPDATE
512 // try to allocate a new entries only if prediction was wrong
513 bool alloc = (bi->tagePred != taken) && (bi->hitBank < nHistoryTables);
514
515 if (preAdjustAlloc) {
516 adjustAlloc(alloc, taken, pred);
517 }
518
519 if (bi->hitBank > 0) {
520 // Manage the selection between longest matching and alternate
521 // matching for "pseudo"-newly allocated longest matching entry
522 bool PseudoNewAlloc = bi->pseudoNewAlloc;
523 // an entry is considered as newly allocated if its prediction
524 // counter is weak
525 if (PseudoNewAlloc) {
526 if (bi->longestMatchPred == taken) {
527 alloc = false;
528 }
529 // if it was delivering the correct prediction, no need to
530 // allocate new entry even if the overall prediction was false
531 if (bi->longestMatchPred != bi->altTaken) {
532 ctrUpdate(
533 useAltPredForNewlyAllocated[getUseAltIdx(bi, branch_pc)],
534 bi->altTaken == taken, useAltOnNaBits);
535 }
536 }
537 }
538
539 if (!preAdjustAlloc) {
540 adjustAlloc(alloc, taken, pred);
541 }
542
543 handleAllocAndUReset(alloc, taken, bi, nrand);
544
545 handleTAGEUpdate(branch_pc, taken, bi);
546 }
547
548 void
549 TAGEBase::handleTAGEUpdate(Addr branch_pc, bool taken, BranchInfo* bi)
550 {
551 if (bi->hitBank > 0) {
552 DPRINTF(Tage, "Updating tag table entry (%d,%d) for branch %lx\n",
553 bi->hitBank, bi->hitBankIndex, branch_pc);
554 ctrUpdate(gtable[bi->hitBank][bi->hitBankIndex].ctr, taken,
555 tagTableCounterBits);
556 // if the provider entry is not certified to be useful also update
557 // the alternate prediction
558 if (gtable[bi->hitBank][bi->hitBankIndex].u == 0) {
559 if (bi->altBank > 0) {
560 ctrUpdate(gtable[bi->altBank][bi->altBankIndex].ctr, taken,
561 tagTableCounterBits);
562 DPRINTF(Tage, "Updating tag table entry (%d,%d) for"
563 " branch %lx\n", bi->hitBank, bi->hitBankIndex,
564 branch_pc);
565 }
566 if (bi->altBank == 0) {
567 baseUpdate(branch_pc, taken, bi);
568 }
569 }
570
571 // update the u counter
572 if (bi->tagePred != bi->altTaken) {
573 unsignedCtrUpdate(gtable[bi->hitBank][bi->hitBankIndex].u,
574 bi->tagePred == taken, tagTableUBits);
575 }
576 } else {
577 baseUpdate(branch_pc, taken, bi);
578 }
579 }
580
581 void
582 TAGEBase::updateHistories(ThreadID tid, Addr branch_pc, bool taken,
583 BranchInfo* bi, bool speculative,
584 const StaticInstPtr &inst, Addr target)
585 {
586 if (speculative != speculativeHistUpdate) {
587 return;
588 }
589 ThreadHistory& tHist = threadHistory[tid];
590 // UPDATE HISTORIES
591 bool pathbit = ((branch_pc >> instShiftAmt) & 1);
592 //on a squash, return pointers to this and recompute indices.
593 //update user history
594 updateGHist(tHist.gHist, taken, tHist.globalHistory, tHist.ptGhist);
595 tHist.pathHist = (tHist.pathHist << 1) + pathbit;
596 tHist.pathHist = (tHist.pathHist & ((ULL(1) << pathHistBits) - 1));
597
598 if (speculative) {
599 bi->ptGhist = tHist.ptGhist;
600 bi->pathHist = tHist.pathHist;
601 }
602
603 //prepare next index and tag computations for user branchs
604 for (int i = 1; i <= nHistoryTables; i++)
605 {
606 if (speculative) {
607 bi->ci[i] = tHist.computeIndices[i].comp;
608 bi->ct0[i] = tHist.computeTags[0][i].comp;
609 bi->ct1[i] = tHist.computeTags[1][i].comp;
610 }
611 tHist.computeIndices[i].update(tHist.gHist);
612 tHist.computeTags[0][i].update(tHist.gHist);
613 tHist.computeTags[1][i].update(tHist.gHist);
614 }
615 DPRINTF(Tage, "Updating global histories with branch:%lx; taken?:%d, "
616 "path Hist: %x; pointer:%d\n", branch_pc, taken, tHist.pathHist,
617 tHist.ptGhist);
618 assert(threadHistory[tid].gHist ==
619 &threadHistory[tid].globalHistory[threadHistory[tid].ptGhist]);
620 }
621
622 void
623 TAGEBase::squash(ThreadID tid, bool taken, TAGEBase::BranchInfo *bi,
624 Addr target)
625 {
626 if (!speculativeHistUpdate) {
627 /* If there are no speculative updates, no actions are needed */
628 return;
629 }
630
631 ThreadHistory& tHist = threadHistory[tid];
632 DPRINTF(Tage, "Restoring branch info: %lx; taken? %d; PathHistory:%x, "
633 "pointer:%d\n", bi->branchPC,taken, bi->pathHist, bi->ptGhist);
634 tHist.pathHist = bi->pathHist;
635 tHist.ptGhist = bi->ptGhist;
636 tHist.gHist = &(tHist.globalHistory[tHist.ptGhist]);
637 tHist.gHist[0] = (taken ? 1 : 0);
638 for (int i = 1; i <= nHistoryTables; i++) {
639 tHist.computeIndices[i].comp = bi->ci[i];
640 tHist.computeTags[0][i].comp = bi->ct0[i];
641 tHist.computeTags[1][i].comp = bi->ct1[i];
642 tHist.computeIndices[i].update(tHist.gHist);
643 tHist.computeTags[0][i].update(tHist.gHist);
644 tHist.computeTags[1][i].update(tHist.gHist);
645 }
646 }
647
648 void
649 TAGEBase::extraAltCalc(BranchInfo* bi)
650 {
651 // do nothing. This is only used in some derived classes
652 return;
653 }
654
655 void
656 TAGEBase::updateStats(bool taken, BranchInfo* bi)
657 {
658 if (taken == bi->tagePred) {
659 // correct prediction
660 switch (bi->provider) {
661 case BIMODAL_ONLY: stats.bimodalProviderCorrect++; break;
662 case TAGE_LONGEST_MATCH: stats.longestMatchProviderCorrect++; break;
663 case BIMODAL_ALT_MATCH:
664 stats.bimodalAltMatchProviderCorrect++;
665 break;
666 case TAGE_ALT_MATCH: stats.altMatchProviderCorrect++; break;
667 }
668 } else {
669 // wrong prediction
670 switch (bi->provider) {
671 case BIMODAL_ONLY: stats.bimodalProviderWrong++; break;
672 case TAGE_LONGEST_MATCH:
673 stats.longestMatchProviderWrong++;
674 if (bi->altTaken == taken) {
675 stats.altMatchProviderWouldHaveHit++;
676 }
677 break;
678 case BIMODAL_ALT_MATCH:
679 stats.bimodalAltMatchProviderWrong++;
680 break;
681 case TAGE_ALT_MATCH:
682 stats.altMatchProviderWrong++;
683 break;
684 }
685
686 switch (bi->provider) {
687 case BIMODAL_ALT_MATCH:
688 case TAGE_ALT_MATCH:
689 if (bi->longestMatchPred == taken) {
690 stats.longestMatchProviderWouldHaveHit++;
691 }
692 }
693 }
694
695 switch (bi->provider) {
696 case TAGE_LONGEST_MATCH:
697 case TAGE_ALT_MATCH:
698 stats.longestMatchProvider[bi->hitBank]++;
699 stats.altMatchProvider[bi->altBank]++;
700 break;
701 }
702 }
703
704 unsigned
705 TAGEBase::getGHR(ThreadID tid, BranchInfo *bi) const
706 {
707 unsigned val = 0;
708 for (unsigned i = 0; i < 32; i++) {
709 // Make sure we don't go out of bounds
710 int gh_offset = bi->ptGhist + i;
711 assert(&(threadHistory[tid].globalHistory[gh_offset]) <
712 threadHistory[tid].globalHistory + histBufferSize);
713 val |= ((threadHistory[tid].globalHistory[gh_offset] & 0x1) << i);
714 }
715
716 return val;
717 }
718
719 TAGEBase::TAGEBaseStats::TAGEBaseStats(
720 Stats::Group *parent, unsigned nHistoryTables)
721 : Stats::Group(parent),
722 ADD_STAT(longestMatchProviderCorrect, "Number of times TAGE Longest"
723 " Match is the provider and the prediction is correct"),
724 ADD_STAT(altMatchProviderCorrect, "Number of times TAGE Alt Match"
725 " is the provider and the prediction is correct"),
726 ADD_STAT(bimodalAltMatchProviderCorrect, "Number of times TAGE Alt"
727 " Match is the bimodal and it is the provider and the prediction"
728 " is correct"),
729 ADD_STAT(bimodalProviderCorrect, "Number of times there are no"
730 " hits on the TAGE tables and the bimodal prediction is correct"),
731 ADD_STAT(longestMatchProviderWrong, "Number of times TAGE Longest"
732 " Match is the provider and the prediction is wrong"),
733 ADD_STAT(altMatchProviderWrong, "Number of times TAGE Alt Match is"
734 " the provider and the prediction is wrong"),
735 ADD_STAT(bimodalAltMatchProviderWrong, "Number of times TAGE Alt Match"
736 " is the bimodal and it is the provider and the prediction is"
737 " wrong"),
738 ADD_STAT(bimodalProviderWrong, "Number of times there are no hits"
739 " on the TAGE tables and the bimodal prediction is wrong"),
740 ADD_STAT(altMatchProviderWouldHaveHit, "Number of times TAGE"
741 " Longest Match is the provider, the prediction is wrong and"
742 " Alt Match prediction was correct"),
743 ADD_STAT(longestMatchProviderWouldHaveHit, "Number of times"
744 " TAGE Alt Match is the provider, the prediction is wrong and"
745 " Longest Match prediction was correct"),
746 ADD_STAT(longestMatchProvider, "TAGE provider for longest match"),
747 ADD_STAT(altMatchProvider, "TAGE provider for alt match")
748 {
749 longestMatchProvider.init(nHistoryTables + 1);
750 altMatchProvider.init(nHistoryTables + 1);
751 }
752
753 int8_t
754 TAGEBase::getCtr(int hitBank, int hitBankIndex) const
755 {
756 return gtable[hitBank][hitBankIndex].ctr;
757 }
758
759 unsigned
760 TAGEBase::getTageCtrBits() const
761 {
762 return tagTableCounterBits;
763 }
764
765 int
766 TAGEBase::getPathHist(ThreadID tid) const
767 {
768 return threadHistory[tid].pathHist;
769 }
770
771 bool
772 TAGEBase::isSpeculativeUpdateEnabled() const
773 {
774 return speculativeHistUpdate;
775 }
776
777 size_t
778 TAGEBase::getSizeInBits() const {
779 size_t bits = 0;
780 for (int i = 1; i <= nHistoryTables; i++) {
781 bits += (1 << logTagTableSizes[i]) *
782 (tagTableCounterBits + tagTableUBits + tagTableTagWidths[i]);
783 }
784 uint64_t bimodalTableSize = ULL(1) << logTagTableSizes[0];
785 bits += numUseAltOnNa * useAltOnNaBits;
786 bits += bimodalTableSize;
787 bits += (bimodalTableSize >> logRatioBiModalHystEntries);
788 bits += histLengths[nHistoryTables];
789 bits += pathHistBits;
790 bits += logUResetPeriod;
791 return bits;
792 }
793
794 TAGEBase*
795 TAGEBaseParams::create()
796 {
797 return new TAGEBase(this);
798 }